diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/image_test.cc | 5 | ||||
| -rw-r--r-- | compiler/image_writer.cc | 23 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 30 | ||||
| -rw-r--r-- | compiler/optimizing/graph_visualizer.cc | 4 | ||||
| -rw-r--r-- | compiler/optimizing/induction_var_analysis.cc | 8 | ||||
| -rw-r--r-- | compiler/optimizing/induction_var_range.cc | 8 | ||||
| -rw-r--r-- | compiler/optimizing/instruction_simplifier_arm64.cc | 55 | ||||
| -rw-r--r-- | compiler/optimizing/instruction_simplifier_arm64.h | 7 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm.cc | 64 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 64 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 1 | ||||
| -rw-r--r-- | compiler/optimizing/nodes_arm64.h | 60 |
12 files changed, 321 insertions, 8 deletions
diff --git a/compiler/image_test.cc b/compiler/image_test.cc index 992af29545..5763cec43f 100644 --- a/compiler/image_test.cc +++ b/compiler/image_test.cc @@ -289,6 +289,11 @@ TEST_F(ImageTest, WriteReadLZ4) { TestWriteRead(ImageHeader::kStorageModeLZ4); } +TEST_F(ImageTest, WriteReadLZ4HC) { + TestWriteRead(ImageHeader::kStorageModeLZ4HC); +} + + TEST_F(ImageTest, ImageHeaderIsValid) { uint32_t image_begin = ART_BASE_ADDRESS; uint32_t image_size_ = 16 * KB; diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 5eff8f37ec..871435b85f 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -18,6 +18,7 @@ #include <sys/stat.h> #include <lz4.h> +#include <lz4hc.h> #include <memory> #include <numeric> @@ -224,18 +225,28 @@ bool ImageWriter::Write(int image_fd, char* image_data = reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader); size_t data_size; const char* image_data_to_write; + const uint64_t compress_start_time = NanoTime(); CHECK_EQ(image_header->storage_mode_, image_storage_mode_); switch (image_storage_mode_) { case ImageHeader::kStorageModeLZ4: { - size_t compressed_max_size = LZ4_compressBound(image_data_size); + const size_t compressed_max_size = LZ4_compressBound(image_data_size); compressed_data.reset(new char[compressed_max_size]); data_size = LZ4_compress( reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader), &compressed_data[0], image_data_size); - image_data_to_write = &compressed_data[0]; - VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size; + + break; + } + case ImageHeader::kStorageModeLZ4HC: { + // Bound is same as non HC. + const size_t compressed_max_size = LZ4_compressBound(image_data_size); + compressed_data.reset(new char[compressed_max_size]); + data_size = LZ4_compressHC( + reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader), + &compressed_data[0], + image_data_size); break; } case ImageHeader::kStorageModeUncompressed: { @@ -249,6 +260,12 @@ bool ImageWriter::Write(int image_fd, } } + if (compressed_data != nullptr) { + image_data_to_write = &compressed_data[0]; + VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size << " in " + << PrettyDuration(NanoTime() - compress_start_time); + } + // Write header first, as uncompressed. image_header->data_size_ = data_size; if (!image_file->WriteFully(image_info.image_->Begin(), sizeof(ImageHeader))) { diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index c27209f0b1..985dc056f6 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1862,6 +1862,36 @@ void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { HandleBinaryOp(instruction); } +void LocationsBuilderARM64::VisitArm64BitwiseNegatedRight(HArm64BitwiseNegatedRight* instr) { + DCHECK(Primitive::IsIntegralType(instr->GetType())) << instr->GetType(); + LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); + locations->SetInAt(0, Location::RequiresRegister()); + // There is no immediate variant of negated bitwise instructions in AArch64. + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitArm64BitwiseNegatedRight( + HArm64BitwiseNegatedRight* instr) { + Register dst = OutputRegister(instr); + Register lhs = InputRegisterAt(instr, 0); + Register rhs = InputRegisterAt(instr, 1); + + switch (instr->GetOpKind()) { + case HInstruction::kAnd: + __ Bic(dst, lhs, rhs); + break; + case HInstruction::kOr: + __ Orn(dst, lhs, rhs); + break; + case HInstruction::kXor: + __ Eon(dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unreachable"; + } +} + void LocationsBuilderARM64::VisitArm64DataProcWithShifterOp( HArm64DataProcWithShifterOp* instruction) { DCHECK(instruction->GetType() == Primitive::kPrimInt || diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index a3d6bcf450..b9638f2027 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -443,6 +443,10 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { #endif #ifdef ART_ENABLE_CODEGEN_arm64 + void VisitArm64BitwiseNegatedRight(HArm64BitwiseNegatedRight* instruction) OVERRIDE { + StartAttributeStream("kind") << instruction->GetOpKind(); + } + void VisitArm64DataProcWithShifterOp(HArm64DataProcWithShifterOp* instruction) OVERRIDE { StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind(); if (HArm64DataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())) { diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc index a1e1cde9df..82a898a9f1 100644 --- a/compiler/optimizing/induction_var_analysis.cc +++ b/compiler/optimizing/induction_var_analysis.cc @@ -552,9 +552,11 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop, if (!IsExact(stride_expr, &stride_value)) { return; } - // Rewrite condition i != U into i < U or i > U if end condition is reached exactly. - if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLT)) || - (stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGT)))) { + // Rewrite condition i != U into strict end condition i < U or i > U if this end condition + // is reached exactly (tested by verifying if the loop has a unit stride and the non-strict + // condition would be always taken). + if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLE)) || + (stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGE)))) { cmp = stride_value > 0 ? kCondLT : kCondGT; } // Normalize a linear loop control with a nonzero stride: diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc index b162696a42..f9b6910acd 100644 --- a/compiler/optimizing/induction_var_range.cc +++ b/compiler/optimizing/induction_var_range.cc @@ -216,6 +216,14 @@ bool InductionVarRange::IsConstant(HInductionVarAnalysis::InductionInfo* info, } } } while (RefineOuter(&v_min, &v_max)); + // Exploit array length + c >= c, with c <= 0 to avoid arithmetic wrap-around anomalies + // (e.g. array length == maxint and c == 1 would yield minint). + if (request == kAtLeast) { + if (v_min.a_constant == 1 && v_min.b_constant <= 0 && v_min.instruction->IsArrayLength()) { + *value = v_min.b_constant; + return true; + } + } } return false; } diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index 83126a5c4d..c2bbdccc29 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -180,6 +180,53 @@ bool InstructionSimplifierArm64Visitor::TryMergeIntoUsersShifterOperand(HInstruc return true; } +bool InstructionSimplifierArm64Visitor::TryMergeNegatedInput(HBinaryOperation* op) { + DCHECK(op->IsAnd() || op->IsOr() || op->IsXor()) << op->DebugName(); + HInstruction* left = op->GetLeft(); + HInstruction* right = op->GetRight(); + + // Only consider the case where there is exactly one Not, with 2 Not's De + // Morgan's laws should be applied instead. + if (left->IsNot() ^ right->IsNot()) { + HInstruction* hnot = (left->IsNot() ? left : right); + HInstruction* hother = (left->IsNot() ? right : left); + + // Only do the simplification if the Not has only one use and can thus be + // safely removed. Even though ARM64 negated bitwise operations do not have + // an immediate variant (only register), we still do the simplification when + // `hother` is a constant, because it removes an instruction if the constant + // cannot be encoded as an immediate: + // mov r0, #large_constant + // neg r2, r1 + // and r0, r0, r2 + // becomes: + // mov r0, #large_constant + // bic r0, r0, r1 + if (hnot->HasOnlyOneNonEnvironmentUse()) { + // Replace code looking like + // NOT tmp, mask + // AND dst, src, tmp (respectively ORR, EOR) + // with + // BIC dst, src, mask (respectively ORN, EON) + HInstruction* src = hnot->AsNot()->GetInput(); + + HArm64BitwiseNegatedRight* neg_op = new (GetGraph()->GetArena()) + HArm64BitwiseNegatedRight(op->GetType(), op->GetKind(), hother, src, op->GetDexPc()); + + op->GetBlock()->ReplaceAndRemoveInstructionWith(op, neg_op); + hnot->GetBlock()->RemoveInstruction(hnot); + RecordSimplification(); + return true; + } + } + + return false; +} + +void InstructionSimplifierArm64Visitor::VisitAnd(HAnd* instruction) { + TryMergeNegatedInput(instruction); +} + void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) { TryExtractArrayAccessAddress(instruction, instruction->GetArray(), @@ -200,6 +247,10 @@ void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) { } } +void InstructionSimplifierArm64Visitor::VisitOr(HOr* instruction) { + TryMergeNegatedInput(instruction); +} + void InstructionSimplifierArm64Visitor::VisitShl(HShl* instruction) { if (instruction->InputAt(1)->IsConstant()) { TryMergeIntoUsersShifterOperand(instruction); @@ -232,5 +283,9 @@ void InstructionSimplifierArm64Visitor::VisitUShr(HUShr* instruction) { } } +void InstructionSimplifierArm64Visitor::VisitXor(HXor* instruction) { + TryMergeNegatedInput(instruction); +} + } // namespace arm64 } // namespace art diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h index 37a34c0373..cf8458713f 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.h +++ b/compiler/optimizing/instruction_simplifier_arm64.h @@ -51,14 +51,21 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor { return TryMergeIntoShifterOperand(use, bitfield_op, true); } + // For bitwise operations (And/Or/Xor) with a negated input, try to use + // a negated bitwise instruction. + bool TryMergeNegatedInput(HBinaryOperation* op); + // HInstruction visitors, sorted alphabetically. + void VisitAnd(HAnd* instruction) OVERRIDE; void VisitArrayGet(HArrayGet* instruction) OVERRIDE; void VisitArraySet(HArraySet* instruction) OVERRIDE; void VisitMul(HMul* instruction) OVERRIDE; + void VisitOr(HOr* instruction) OVERRIDE; void VisitShl(HShl* instruction) OVERRIDE; void VisitShr(HShr* instruction) OVERRIDE; void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE; void VisitUShr(HUShr* instruction) OVERRIDE; + void VisitXor(HXor* instruction) OVERRIDE; OptimizingCompilerStats* stats_; }; diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 8cbdcbbcaf..8e22f8677d 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -1909,6 +1909,69 @@ void IntrinsicCodeGeneratorARM::VisitShortReverseBytes(HInvoke* invoke) { __ revsh(out, in); } +void IntrinsicLocationsBuilderARM::VisitStringGetCharsNoCheck(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetInAt(2, Location::RequiresRegister()); + locations->SetInAt(3, Location::RequiresRegister()); + locations->SetInAt(4, Location::RequiresRegister()); + + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorARM::VisitStringGetCharsNoCheck(HInvoke* invoke) { + ArmAssembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + // Check assumption that sizeof(Char) is 2 (used in scaling below). + const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + DCHECK_EQ(char_size, 2u); + + // Location of data in char array buffer. + const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value(); + + // Location of char array data in string. + const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value(); + + // void getCharsNoCheck(int srcBegin, int srcEnd, char[] dst, int dstBegin); + // Since getChars() calls getCharsNoCheck() - we use registers rather than constants. + Register srcObj = locations->InAt(0).AsRegister<Register>(); + Register srcBegin = locations->InAt(1).AsRegister<Register>(); + Register srcEnd = locations->InAt(2).AsRegister<Register>(); + Register dstObj = locations->InAt(3).AsRegister<Register>(); + Register dstBegin = locations->InAt(4).AsRegister<Register>(); + + Register src_ptr = locations->GetTemp(0).AsRegister<Register>(); + Register src_ptr_end = locations->GetTemp(1).AsRegister<Register>(); + Register dst_ptr = locations->GetTemp(2).AsRegister<Register>(); + Register tmp = locations->GetTemp(3).AsRegister<Register>(); + + // src range to copy. + __ add(src_ptr, srcObj, ShifterOperand(value_offset)); + __ add(src_ptr_end, src_ptr, ShifterOperand(srcEnd, LSL, 1)); + __ add(src_ptr, src_ptr, ShifterOperand(srcBegin, LSL, 1)); + + // dst to be copied. + __ add(dst_ptr, dstObj, ShifterOperand(data_offset)); + __ add(dst_ptr, dst_ptr, ShifterOperand(dstBegin, LSL, 1)); + + // Do the copy. + Label loop, done; + __ Bind(&loop); + __ cmp(src_ptr, ShifterOperand(src_ptr_end)); + __ b(&done, EQ); + __ ldrh(tmp, Address(src_ptr, char_size, Address::PostIndex)); + __ strh(tmp, Address(dst_ptr, char_size, Address::PostIndex)); + __ b(&loop); + __ Bind(&done); +} + // Unimplemented intrinsics. #define UNIMPLEMENTED_INTRINSIC(Name) \ @@ -1933,7 +1996,6 @@ UNIMPLEMENTED_INTRINSIC(MathRoundFloat) // Could be done by changing rounding UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) // High register pressure. UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) -UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) UNIMPLEMENTED_INTRINSIC(FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite) diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index b5f15fe22d..19ccb3d568 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -1602,6 +1602,69 @@ void IntrinsicCodeGeneratorARM64::VisitMathNextAfter(HInvoke* invoke) { GenFPToFPCall(invoke, GetVIXLAssembler(), codegen_, kQuickNextAfter); } +void IntrinsicLocationsBuilderARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetInAt(2, Location::RequiresRegister()); + locations->SetInAt(3, Location::RequiresRegister()); + locations->SetInAt(4, Location::RequiresRegister()); + + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) { + vixl::MacroAssembler* masm = GetVIXLAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + // Check assumption that sizeof(Char) is 2 (used in scaling below). + const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + DCHECK_EQ(char_size, 2u); + + // Location of data in char array buffer. + const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value(); + + // Location of char array data in string. + const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value(); + + // void getCharsNoCheck(int srcBegin, int srcEnd, char[] dst, int dstBegin); + // Since getChars() calls getCharsNoCheck() - we use registers rather than constants. + Register srcObj = XRegisterFrom(locations->InAt(0)); + Register srcBegin = XRegisterFrom(locations->InAt(1)); + Register srcEnd = XRegisterFrom(locations->InAt(2)); + Register dstObj = XRegisterFrom(locations->InAt(3)); + Register dstBegin = XRegisterFrom(locations->InAt(4)); + + Register src_ptr = XRegisterFrom(locations->GetTemp(0)); + Register src_ptr_end = XRegisterFrom(locations->GetTemp(1)); + + UseScratchRegisterScope temps(masm); + Register dst_ptr = temps.AcquireX(); + Register tmp = temps.AcquireW(); + + // src range to copy. + __ Add(src_ptr, srcObj, Operand(value_offset)); + __ Add(src_ptr_end, src_ptr, Operand(srcEnd, LSL, 1)); + __ Add(src_ptr, src_ptr, Operand(srcBegin, LSL, 1)); + + // dst to be copied. + __ Add(dst_ptr, dstObj, Operand(data_offset)); + __ Add(dst_ptr, dst_ptr, Operand(dstBegin, LSL, 1)); + + // Do the copy. + vixl::Label loop, done; + __ Bind(&loop); + __ Cmp(src_ptr, src_ptr_end); + __ B(&done, eq); + __ Ldrh(tmp, MemOperand(src_ptr, char_size, vixl::PostIndex)); + __ Strh(tmp, MemOperand(dst_ptr, char_size, vixl::PostIndex)); + __ B(&loop); + __ Bind(&done); +} + // Unimplemented intrinsics. #define UNIMPLEMENTED_INTRINSIC(Name) \ @@ -1615,7 +1678,6 @@ UNIMPLEMENTED_INTRINSIC(LongBitCount) UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(SystemArrayCopy) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) -UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) UNIMPLEMENTED_INTRINSIC(FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite) diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 174b29a806..9eddfc7e0e 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1270,6 +1270,7 @@ class HLoopInformationOutwardIterator : public ValueObject { #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) #else #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \ + M(Arm64BitwiseNegatedRight, Instruction) \ M(Arm64DataProcWithShifterOp, Instruction) \ M(Arm64IntermediateAddress, Instruction) #endif diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h index 173852a55d..75a71e78b8 100644 --- a/compiler/optimizing/nodes_arm64.h +++ b/compiler/optimizing/nodes_arm64.h @@ -118,6 +118,66 @@ class HArm64IntermediateAddress : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress); }; +class HArm64BitwiseNegatedRight : public HBinaryOperation { + public: + HArm64BitwiseNegatedRight(Primitive::Type result_type, + InstructionKind op, + HInstruction* left, + HInstruction* right, + uint32_t dex_pc = kNoDexPc) + : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc), + op_kind_(op) { + DCHECK(op == HInstruction::kAnd || op == HInstruction::kOr || op == HInstruction::kXor) << op; + } + + template <typename T, typename U> + auto Compute(T x, U y) const -> decltype(x & ~y) { + static_assert(std::is_same<decltype(x & ~y), decltype(x | ~y)>::value && + std::is_same<decltype(x & ~y), decltype(x ^ ~y)>::value, + "Inconsistent negated bitwise types"); + switch (op_kind_) { + case HInstruction::kAnd: + return x & ~y; + case HInstruction::kOr: + return x | ~y; + case HInstruction::kXor: + return x ^ ~y; + default: + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); + } + } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant( + Compute(x->GetValue(), y->GetValue()), GetDexPc()); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant( + Compute(x->GetValue(), y->GetValue()), GetDexPc()); + } + HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, + HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + LOG(FATAL) << DebugName() << " is not defined for float values"; + UNREACHABLE(); + } + HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, + HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + LOG(FATAL) << DebugName() << " is not defined for double values"; + UNREACHABLE(); + } + + InstructionKind GetOpKind() const { return op_kind_; } + + DECLARE_INSTRUCTION(Arm64BitwiseNegatedRight); + + private: + // Specifies the bitwise operation, which will be then negated. + const InstructionKind op_kind_; + + DISALLOW_COPY_AND_ASSIGN(HArm64BitwiseNegatedRight); +}; + } // namespace art #endif // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_ |