summaryrefslogtreecommitdiff
path: root/compiler/optimizing/nodes.h
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing/nodes.h')
-rw-r--r--compiler/optimizing/nodes.h152
1 files changed, 134 insertions, 18 deletions
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8a9e61875a..671f950aa6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -323,6 +323,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
temporaries_vreg_slots_(0),
has_bounds_checks_(false),
has_try_catch_(false),
+ has_simd_(false),
has_loops_(false),
has_irreducible_loops_(false),
debuggable_(debuggable),
@@ -340,6 +341,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_current_method_(nullptr),
+ art_method_(nullptr),
inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
osr_(osr),
cha_single_implementation_list_(arena->Adapter(kArenaAllocCHA)) {
@@ -398,6 +400,12 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// put deoptimization instructions, etc.
void TransformLoopHeaderForBCE(HBasicBlock* header);
+ // Adds a new loop directly after the loop with the given header and exit.
+ // Returns the new preheader.
+ HBasicBlock* TransformLoopForVectorization(HBasicBlock* header,
+ HBasicBlock* body,
+ HBasicBlock* exit);
+
// Removes `block` from the graph. Assumes `block` has been disconnected from
// other blocks and has no instructions or phis.
void DeleteDeadEmptyBlock(HBasicBlock* block);
@@ -560,6 +568,9 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
bool HasTryCatch() const { return has_try_catch_; }
void SetHasTryCatch(bool value) { has_try_catch_ = value; }
+ bool HasSIMD() const { return has_simd_; }
+ void SetHasSIMD(bool value) { has_simd_ = value; }
+
bool HasLoops() const { return has_loops_; }
void SetHasLoops(bool value) { has_loops_ = value; }
@@ -652,6 +663,11 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// false positives.
bool has_try_catch_;
+ // Flag whether SIMD instructions appear in the graph. If true, the
+ // code generators may have to be more careful spilling the wider
+ // contents of SIMD registers.
+ bool has_simd_;
+
// Flag whether there are any loops in the graph. We can skip loop
// optimization if it's false. It's only best effort to keep it up
// to date in the presence of code elimination so there might be false
@@ -1353,6 +1369,26 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(TypeConversion, Instruction) \
M(UShr, BinaryOperation) \
M(Xor, BinaryOperation) \
+ M(VecReplicateScalar, VecUnaryOperation) \
+ M(VecSetScalars, VecUnaryOperation) \
+ M(VecSumReduce, VecUnaryOperation) \
+ M(VecCnv, VecUnaryOperation) \
+ M(VecNeg, VecUnaryOperation) \
+ M(VecAbs, VecUnaryOperation) \
+ M(VecNot, VecUnaryOperation) \
+ M(VecAdd, VecBinaryOperation) \
+ M(VecSub, VecBinaryOperation) \
+ M(VecMul, VecBinaryOperation) \
+ M(VecDiv, VecBinaryOperation) \
+ M(VecAnd, VecBinaryOperation) \
+ M(VecAndNot, VecBinaryOperation) \
+ M(VecOr, VecBinaryOperation) \
+ M(VecXor, VecBinaryOperation) \
+ M(VecShl, VecBinaryOperation) \
+ M(VecShr, VecBinaryOperation) \
+ M(VecUShr, VecBinaryOperation) \
+ M(VecLoad, VecMemoryOperation) \
+ M(VecStore, VecMemoryOperation) \
/*
* Instructions, shared across several (not all) architectures.
@@ -1414,7 +1450,11 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(Constant, Instruction) \
M(UnaryOperation, Instruction) \
M(BinaryOperation, Instruction) \
- M(Invoke, Instruction)
+ M(Invoke, Instruction) \
+ M(VecOperation, Instruction) \
+ M(VecUnaryOperation, VecOperation) \
+ M(VecBinaryOperation, VecOperation) \
+ M(VecMemoryOperation, VecOperation)
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1734,11 +1774,11 @@ class SideEffects : public ValueObject {
// A HEnvironment object contains the values of virtual registers at a given location.
class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
public:
- HEnvironment(ArenaAllocator* arena,
- size_t number_of_vregs,
- ArtMethod* method,
- uint32_t dex_pc,
- HInstruction* holder)
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* arena,
+ size_t number_of_vregs,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ HInstruction* holder)
: vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
locations_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
@@ -1747,7 +1787,7 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
holder_(holder) {
}
- HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
: HEnvironment(arena,
to_copy.Size(),
to_copy.GetMethod(),
@@ -1914,6 +1954,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
virtual bool IsControlFlow() const { return false; }
+ // Can the instruction throw?
+ // TODO: We should rename to CanVisiblyThrow, as some instructions (like HNewInstance),
+ // could throw OOME, but it is still OK to remove them if they are unused.
virtual bool CanThrow() const { return false; }
bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); }
@@ -2068,6 +2111,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
void SetLocations(LocationSummary* locations) { locations_ = locations; }
void ReplaceWith(HInstruction* instruction);
+ void ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction* replacement);
void ReplaceInput(HInstruction* replacement, size_t index);
// This is almost the same as doing `ReplaceWith()`. But in this helper, the
@@ -2931,28 +2975,97 @@ class HTryBoundary FINAL : public HTemplateInstruction<0> {
};
// Deoptimize to interpreter, upon checking a condition.
-class HDeoptimize FINAL : public HTemplateInstruction<1> {
+class HDeoptimize FINAL : public HVariableInputSizeInstruction {
public:
+ enum class Kind {
+ kBCE,
+ kInline,
+ kLast = kInline
+ };
+
+ // Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
+ // across.
+ HDeoptimize(ArenaAllocator* arena, HInstruction* cond, Kind kind, uint32_t dex_pc)
+ : HVariableInputSizeInstruction(
+ SideEffects::All(),
+ dex_pc,
+ arena,
+ /* number_of_inputs */ 1,
+ kArenaAllocMisc) {
+ SetPackedFlag<kFieldCanBeMoved>(false);
+ SetPackedField<DeoptimizeKindField>(kind);
+ SetRawInputAt(0, cond);
+ }
+
+ // Use this constructor when the `HDeoptimize` guards an instruction, and any user
+ // that relies on the deoptimization to pass should have its input be the `HDeoptimize`
+ // instead of `guard`.
// We set CanTriggerGC to prevent any intermediate address to be live
// at the point of the `HDeoptimize`.
- HDeoptimize(HInstruction* cond, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) {
+ HDeoptimize(ArenaAllocator* arena,
+ HInstruction* cond,
+ HInstruction* guard,
+ Kind kind,
+ uint32_t dex_pc)
+ : HVariableInputSizeInstruction(
+ SideEffects::CanTriggerGC(),
+ dex_pc,
+ arena,
+ /* number_of_inputs */ 2,
+ kArenaAllocMisc) {
+ SetPackedFlag<kFieldCanBeMoved>(true);
+ SetPackedField<DeoptimizeKindField>(kind);
SetRawInputAt(0, cond);
+ SetRawInputAt(1, guard);
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
- return true;
+ bool CanBeMoved() const OVERRIDE { return GetPackedFlag<kFieldCanBeMoved>(); }
+
+ bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ return (other->CanBeMoved() == CanBeMoved()) && (other->AsDeoptimize()->GetKind() == GetKind());
}
+
bool NeedsEnvironment() const OVERRIDE { return true; }
+
bool CanThrow() const OVERRIDE { return true; }
+ Kind GetKind() const { return GetPackedField<DeoptimizeKindField>(); }
+
+ Primitive::Type GetType() const OVERRIDE {
+ return GuardsAnInput() ? GuardedInput()->GetType() : Primitive::kPrimVoid;
+ }
+
+ bool GuardsAnInput() const {
+ return InputCount() == 2;
+ }
+
+ HInstruction* GuardedInput() const {
+ DCHECK(GuardsAnInput());
+ return InputAt(1);
+ }
+
+ void RemoveGuard() {
+ RemoveInputAt(1);
+ }
+
DECLARE_INSTRUCTION(Deoptimize);
private:
+ static constexpr size_t kFieldCanBeMoved = kNumberOfGenericPackedBits;
+ static constexpr size_t kFieldDeoptimizeKind = kNumberOfGenericPackedBits + 1;
+ static constexpr size_t kFieldDeoptimizeKindSize =
+ MinimumBitsToStore(static_cast<size_t>(Kind::kLast));
+ static constexpr size_t kNumberOfDeoptimizePackedBits =
+ kFieldDeoptimizeKind + kFieldDeoptimizeKindSize;
+ static_assert(kNumberOfDeoptimizePackedBits <= kMaxNumberOfPackedBits,
+ "Too many packed fields.");
+ using DeoptimizeKindField = BitField<Kind, kFieldDeoptimizeKind, kFieldDeoptimizeKindSize>;
+
DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
+std::ostream& operator<<(std::ostream& os, const HDeoptimize::Kind& rhs);
+
// Represents a should_deoptimize flag. Currently used for CHA-based devirtualization.
// The compiled code checks this flag value in a guard before devirtualized call and
// if it's true, starts to do deoptimization.
@@ -3912,6 +4025,7 @@ class HInvoke : public HVariableInputSizeInstruction {
bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
ArtMethod* GetResolvedMethod() const { return resolved_method_; }
+ void SetResolvedMethod(ArtMethod* method) { resolved_method_ = method; }
DECLARE_ABSTRACT_INSTRUCTION(Invoke);
@@ -3954,7 +4068,7 @@ class HInvoke : public HVariableInputSizeInstruction {
}
uint32_t number_of_arguments_;
- ArtMethod* const resolved_method_;
+ ArtMethod* resolved_method_;
const uint32_t dex_method_index_;
Intrinsics intrinsic_;
@@ -4111,6 +4225,10 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
dispatch_info_ = dispatch_info;
}
+ DispatchInfo GetDispatchInfo() const {
+ return dispatch_info_;
+ }
+
void AddSpecialInput(HInstruction* input) {
// We allow only one special input.
DCHECK(!IsStringInit() && !HasCurrentMethodInput());
@@ -5541,8 +5659,6 @@ class HLoadClass FINAL : public HInstruction {
// Use a known boot image Class* address, embedded in the code by the codegen.
// Used for boot image classes referenced by apps in AOT- and JIT-compiled code.
- // Note: codegen needs to emit a linker patch if indicated by compiler options'
- // GetIncludePatchInformation().
kBootImageAddress,
// Load from an entry in the .bss section using a PC-relative load.
@@ -5746,8 +5862,6 @@ class HLoadString FINAL : public HInstruction {
// Use a known boot image String* address, embedded in the code by the codegen.
// Used for boot image strings referenced by apps in AOT- and JIT-compiled code.
- // Note: codegen needs to emit a linker patch if indicated by compiler options'
- // GetIncludePatchInformation().
kBootImageAddress,
// Load from an entry in the .bss section using a PC-relative load.
@@ -6609,6 +6723,8 @@ class HParallelMove FINAL : public HTemplateInstruction<0> {
} // namespace art
+#include "nodes_vector.h"
+
#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
#include "nodes_shared.h"
#endif