ARM64: Instruction simplification for array accesses.
HArrayGet and HArraySet with variable indexes generate two
instructions on arm64, like
add temp, obj, #data_offset
ldr out, [temp, index LSL #shift_amount]
When we have multiple accesses to the same array, the initial `add`
instruction is redundant.
This patch introduces the first instruction simplification in the
arm64-specific instruction simplification pass. It splits HArrayGet
and HArraySet using the new arm64-specific IR HIntermediateAddress.
After that we run GVN again to squash the multiple occurrences of
HIntermediateAddress.
Change-Id: I2e3d12fbb07fed07b2cb2f3f47f99f5a032f8312
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index ffb9b79..5dda394 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1580,6 +1580,21 @@
HandleBinaryOp(instruction);
}
+void LocationsBuilderARM64::VisitArm64IntermediateAddress(HArm64IntermediateAddress* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction));
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArm64IntermediateAddress(
+ HArm64IntermediateAddress* instruction) {
+ __ Add(OutputRegister(instruction),
+ InputRegisterAt(instruction, 0),
+ Operand(InputOperandAt(instruction, 1)));
+}
+
void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -1593,14 +1608,16 @@
}
void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
- LocationSummary* locations = instruction->GetLocations();
Primitive::Type type = instruction->GetType();
Register obj = InputRegisterAt(instruction, 0);
- Location index = locations->InAt(1);
+ Location index = instruction->GetLocations()->InAt(1);
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
MemOperand source = HeapOperand(obj);
+ CPURegister dest = OutputCPURegister(instruction);
+
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
+ // Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
BlockPoolsScope block_pools(masm);
if (index.IsConstant()) {
@@ -1608,15 +1625,26 @@
source = HeapOperand(obj, offset);
} else {
Register temp = temps.AcquireSameSizeAs(obj);
- __ Add(temp, obj, offset);
+ if (instruction->GetArray()->IsArm64IntermediateAddress()) {
+ // We do not need to compute the intermediate address from the array: the
+ // input instruction has done it already. See the comment in
+ // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
+ if (kIsDebugBuild) {
+ HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
+ DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+ }
+ temp = obj;
+ } else {
+ __ Add(temp, obj, offset);
+ }
source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
}
- codegen_->Load(type, OutputCPURegister(instruction), source);
+ codegen_->Load(type, dest, source);
codegen_->MaybeRecordImplicitNullCheck(instruction);
- if (type == Primitive::kPrimNot) {
- GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W());
+ if (instruction->GetType() == Primitive::kPrimNot) {
+ GetAssembler()->MaybeUnpoisonHeapReference(dest.W());
}
}
@@ -1670,7 +1698,18 @@
} else {
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireSameSizeAs(array);
- __ Add(temp, array, offset);
+ if (instruction->GetArray()->IsArm64IntermediateAddress()) {
+ // We do not need to compute the intermediate address from the array: the
+ // input instruction has done it already. See the comment in
+ // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
+ if (kIsDebugBuild) {
+ HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
+ DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+ }
+ temp = array;
+ } else {
+ __ Add(temp, array, offset);
+ }
destination = HeapOperand(temp,
XRegisterFrom(index),
LSL,
@@ -1680,6 +1719,7 @@
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
DCHECK(needs_write_barrier);
+ DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress());
vixl::Label done;
SlowPathCodeARM64* slow_path = nullptr;
{
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a068b48..799f1bd 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -382,7 +382,7 @@
uint32_t dex_pc,
SlowPathCode* slow_path);
- ParallelMoveResolverARM64* GetMoveResolver() { return &move_resolver_; }
+ ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return false;
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 4abe5e9..e1a8c9c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -203,19 +203,23 @@
int64_t value = CodeGenerator::GetInt64ValueOf(constant);
- if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
- instr->IsCompare() || instr->IsBoundsCheck()) {
+ if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
+ // Uses logical operations.
+ return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
+ } else if (instr->IsNeg()) {
+ // Uses mov -immediate.
+ return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
+ } else {
+ DCHECK(instr->IsAdd() ||
+ instr->IsArm64IntermediateAddress() ||
+ instr->IsBoundsCheck() ||
+ instr->IsCompare() ||
+ instr->IsCondition() ||
+ instr->IsSub());
// Uses aliases of ADD/SUB instructions.
// If `value` does not fit but `-value` does, VIXL will automatically use
// the 'opposite' instruction.
return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
- } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
- // Uses logical operations.
- return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
- } else {
- DCHECK(instr->IsNeg());
- // Uses mov -immediate.
- return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
}
}
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 4b2d36f..eb79f46 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -16,8 +16,65 @@
#include "instruction_simplifier_arm64.h"
+#include "mirror/array-inl.h"
+
namespace art {
namespace arm64 {
+void InstructionSimplifierArm64Visitor::TryExtractArrayAccessAddress(HInstruction* access,
+ HInstruction* array,
+ HInstruction* index,
+ int access_size) {
+ if (index->IsConstant() ||
+ (index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
+ // When the index is a constant all the addressing can be fitted in the
+ // memory access instruction, so do not split the access.
+ return;
+ }
+ if (access->IsArraySet() &&
+ access->AsArraySet()->GetValue()->GetType() == Primitive::kPrimNot) {
+ // The access may require a runtime call or the original array pointer.
+ return;
+ }
+
+ // Proceed to extract the base address computation.
+ ArenaAllocator* arena = GetGraph()->GetArena();
+
+ HIntConstant* offset =
+ GetGraph()->GetIntConstant(mirror::Array::DataOffset(access_size).Uint32Value());
+ HArm64IntermediateAddress* address =
+ new (arena) HArm64IntermediateAddress(array, offset, kNoDexPc);
+ access->GetBlock()->InsertInstructionBefore(address, access);
+ access->ReplaceInput(address, 0);
+ // Both instructions must depend on GC to prevent any instruction that can
+ // trigger GC to be inserted between the two.
+ access->AddSideEffects(SideEffects::DependsOnGC());
+ DCHECK(address->GetSideEffects().Includes(SideEffects::DependsOnGC()));
+ DCHECK(access->GetSideEffects().Includes(SideEffects::DependsOnGC()));
+ // TODO: Code generation for HArrayGet and HArraySet will check whether the input address
+ // is an HArm64IntermediateAddress and generate appropriate code.
+ // We would like to replace the `HArrayGet` and `HArraySet` with custom instructions (maybe
+ // `HArm64Load` and `HArm64Store`). We defer these changes because these new instructions would
+ // not bring any advantages yet.
+ // Also see the comments in
+ // `InstructionCodeGeneratorARM64::VisitArrayGet()` and
+ // `InstructionCodeGeneratorARM64::VisitArraySet()`.
+ RecordSimplification();
+}
+
+void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
+ TryExtractArrayAccessAddress(instruction,
+ instruction->GetArray(),
+ instruction->GetIndex(),
+ Primitive::ComponentSize(instruction->GetType()));
+}
+
+void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) {
+ TryExtractArrayAccessAddress(instruction,
+ instruction->GetArray(),
+ instruction->GetIndex(),
+ Primitive::ComponentSize(instruction->GetComponentType()));
+}
+
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index d7f4eae..4b697db 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -35,6 +35,14 @@
}
}
+ void TryExtractArrayAccessAddress(HInstruction* access,
+ HInstruction* array,
+ HInstruction* index,
+ int access_size);
+
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
+ void VisitArraySet(HArraySet* instruction) OVERRIDE;
+
OptimizingCompilerStats* stats_;
};
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 939e62c..490bfc8 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1079,14 +1079,23 @@
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)
+#ifndef ART_ENABLE_CODEGEN_arm64
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
+#else
+#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \
+ M(Arm64IntermediateAddress, Instruction)
+#endif
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
+#ifndef ART_ENABLE_CODEGEN_x86
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)
+#else
#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
M(X86ComputeBaseMethodAddress, Instruction) \
M(X86LoadFromConstantTable, Instruction) \
M(X86PackedSwitch, Instruction)
+#endif
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1370,6 +1379,10 @@
return SideEffects(flags_ & ~other.flags_);
}
+ void Add(SideEffects other) {
+ flags_ |= other.flags_;
+ }
+
bool Includes(SideEffects other) const {
return (other.flags_ & flags_) == other.flags_;
}
@@ -1943,6 +1956,7 @@
}
SideEffects GetSideEffects() const { return side_effects_; }
+ void AddSideEffects(SideEffects other) { side_effects_.Add(other); }
size_t GetLifetimePosition() const { return lifetime_position_; }
void SetLifetimePosition(size_t position) { lifetime_position_ = position; }
@@ -2012,7 +2026,7 @@
// order of blocks where this instruction's live interval start.
size_t lifetime_position_;
- const SideEffects side_effects_;
+ SideEffects side_effects_;
// TODO: for primitive types this should be marked as invalid.
ReferenceTypeInfo reference_type_info_;
@@ -4424,8 +4438,11 @@
HArrayGet(HInstruction* array,
HInstruction* index,
Primitive::Type type,
- uint32_t dex_pc)
- : HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
+ uint32_t dex_pc,
+ SideEffects additional_side_effects = SideEffects::None())
+ : HExpression(type,
+ SideEffects::ArrayReadOfType(type).Union(additional_side_effects),
+ dex_pc) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
}
@@ -4460,10 +4477,13 @@
HInstruction* index,
HInstruction* value,
Primitive::Type expected_component_type,
- uint32_t dex_pc)
+ uint32_t dex_pc,
+ SideEffects additional_side_effects = SideEffects::None())
: HTemplateInstruction(
SideEffects::ArrayWriteOfType(expected_component_type).Union(
- SideEffectsForArchRuntimeCalls(value->GetType())), dex_pc),
+ SideEffectsForArchRuntimeCalls(value->GetType())).Union(
+ additional_side_effects),
+ dex_pc),
expected_component_type_(expected_component_type),
needs_type_check_(value->GetType() == Primitive::kPrimNot),
value_can_be_null_(true),
@@ -4518,6 +4538,10 @@
: expected_component_type_;
}
+ Primitive::Type GetRawExpectedComponentType() const {
+ return expected_component_type_;
+ }
+
static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type value_type) {
return (value_type == Primitive::kPrimNot) ? SideEffects::CanTriggerGC() : SideEffects::None();
}
@@ -4576,6 +4600,7 @@
bool CanThrow() const OVERRIDE { return true; }
+ HInstruction* GetIndex() const { return InputAt(0); }
DECLARE_INSTRUCTION(BoundsCheck);
@@ -5389,6 +5414,9 @@
} // namespace art
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "nodes_arm64.h"
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
#include "nodes_x86.h"
#endif
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
new file mode 100644
index 0000000..885d3a2
--- /dev/null
+++ b/compiler/optimizing/nodes_arm64.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
+#define ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
+
+namespace art {
+
+// This instruction computes an intermediate address pointing in the 'middle' of an object. The
+// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
+// never used across anything that can trigger GC.
+class HArm64IntermediateAddress : public HExpression<2> {
+ public:
+ HArm64IntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
+ : HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) {
+ SetRawInputAt(0, base_address);
+ SetRawInputAt(1, offset);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+
+ HInstruction* GetBaseAddress() const { return InputAt(0); }
+ HInstruction* GetOffset() const { return InputAt(1); }
+
+ DECLARE_INSTRUCTION(Arm64IntermediateAddress);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_