summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-04-05 12:21:09 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2023-04-05 16:26:03 +0000
commitd2be01cdd96522828adcea8d245f48915dc7e507 (patch)
tree477f2d6c20712e021f20a15d4ca9af913e662e93
parent6df5f2da76cc1e9d1a7bee74da5cc13f5761701a (diff)
Clean up `IsZeroBitPattern(HInstruction*)` usage.
Remove duplicate helper function and use the existing function from `nodes.h` more. Test: m test-art-host-gtest Test: testrunner.py --host --optimizing Change-Id: Ib0d3b9b12c6950f06843cd71039562c5d6e7e4cf
-rw-r--r--compiler/optimizing/code_generator_arm64.cc5
-rw-r--r--compiler/optimizing/common_arm64.h6
-rw-r--r--compiler/optimizing/inliner.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc17
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc4
5 files changed, 14 insertions, 20 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 03b2f9eff2..de9ec296f9 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -77,7 +77,6 @@ using helpers::InputFPRegisterAt;
using helpers::InputOperandAt;
using helpers::InputRegisterAt;
using helpers::Int64FromLocation;
-using helpers::IsConstantZeroBitPattern;
using helpers::LocationFrom;
using helpers::OperandFromMemOperand;
using helpers::OutputCPURegister;
@@ -2247,7 +2246,7 @@ void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
- if (IsConstantZeroBitPattern(instruction->InputAt(1))) {
+ if (IsZeroBitPattern(instruction->InputAt(1))) {
locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
} else if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
locations->SetInAt(1, Location::RequiresFpuRegister());
@@ -2849,7 +2848,7 @@ void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
needs_type_check ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- if (IsConstantZeroBitPattern(instruction->InputAt(2))) {
+ if (IsZeroBitPattern(instruction->InputAt(2))) {
locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant()));
} else if (DataType::IsFloatingPointType(value_type)) {
locations->SetInAt(2, Location::RequiresFpuRegister());
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 18751c4efe..35abb82565 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -154,7 +154,7 @@ inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* inst
int index) {
HInstruction* input = instr->InputAt(index);
DataType::Type input_type = input->GetType();
- if (input->IsConstant() && input->AsConstant()->IsZeroBitPattern()) {
+ if (IsZeroBitPattern(input)) {
return (DataType::Size(input_type) >= vixl::aarch64::kXRegSizeInBytes)
? vixl::aarch64::Register(vixl::aarch64::xzr)
: vixl::aarch64::Register(vixl::aarch64::wzr);
@@ -381,10 +381,6 @@ inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
return instruction->IsAdd() || instruction->IsSub();
}
-inline bool IsConstantZeroBitPattern(const HInstruction* instruction) {
- return instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern();
-}
-
} // namespace helpers
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 41dc5eb206..da498d4277 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1695,7 +1695,7 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
bool needs_constructor_barrier = false;
for (size_t i = 0; i != number_of_iputs; ++i) {
HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]);
- if (!value->IsConstant() || !value->AsConstant()->IsZeroBitPattern()) {
+ if (!IsZeroBitPattern(value)) {
uint16_t field_index = iput_field_indexes[i];
bool is_final;
HInstanceFieldSet* iput =
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index a1f79ed70b..a7a69ba3b0 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -55,7 +55,6 @@ using helpers::DRegisterFrom;
using helpers::HeapOperand;
using helpers::LocationFrom;
using helpers::InputCPURegisterOrZeroRegAt;
-using helpers::IsConstantZeroBitPattern;
using helpers::OperandFrom;
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
@@ -4757,7 +4756,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke) {
uint32_t number_of_arguments = invoke->GetNumberOfArguments();
for (size_t arg_index = arguments_start; arg_index != number_of_arguments; ++arg_index) {
HInstruction* arg = invoke->InputAt(arg_index);
- if (IsConstantZeroBitPattern(arg)) {
+ if (IsZeroBitPattern(arg)) {
locations->SetInAt(arg_index, Location::ConstantLocation(arg->AsConstant()));
} else if (DataType::IsFloatingPointType(arg->GetType())) {
locations->SetInAt(arg_index, Location::RequiresFpuRegister());
@@ -5069,16 +5068,16 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo
// Add a temporary for old value and exclusive store result if floating point
// `expected` and/or `new_value` take scratch registers.
size_t available_scratch_registers =
- (IsConstantZeroBitPattern(invoke->InputAt(number_of_arguments - 1u)) ? 1u : 0u) +
- (IsConstantZeroBitPattern(invoke->InputAt(number_of_arguments - 2u)) ? 1u : 0u);
+ (IsZeroBitPattern(invoke->InputAt(number_of_arguments - 1u)) ? 1u : 0u) +
+ (IsZeroBitPattern(invoke->InputAt(number_of_arguments - 2u)) ? 1u : 0u);
size_t temps_needed = /* pointer, old value, store result */ 3u - available_scratch_registers;
// We can reuse the declaring class (if present) and offset temporary.
if (temps_needed > old_temp_count) {
locations->AddRegisterTemps(temps_needed - old_temp_count);
}
} else if ((value_type != DataType::Type::kReference && DataType::Size(value_type) != 1u) &&
- !IsConstantZeroBitPattern(invoke->InputAt(number_of_arguments - 2u)) &&
- !IsConstantZeroBitPattern(invoke->InputAt(number_of_arguments - 1u)) &&
+ !IsZeroBitPattern(invoke->InputAt(number_of_arguments - 2u)) &&
+ !IsZeroBitPattern(invoke->InputAt(number_of_arguments - 1u)) &&
GetExpectedVarHandleCoordinatesCount(invoke) == 2u) {
// Allocate a normal temporary for store result in the non-native byte order path
// because scratch registers are used by the byte-swapped `expected` and `new_value`.
@@ -5400,7 +5399,7 @@ static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke,
DCHECK(get_and_update_op == GetAndUpdateOp::kSet);
// We can reuse the declaring class temporary if present.
if (old_temp_count == 1u &&
- !IsConstantZeroBitPattern(invoke->InputAt(invoke->GetNumberOfArguments() - 1u))) {
+ !IsZeroBitPattern(invoke->InputAt(invoke->GetNumberOfArguments() - 1u))) {
// Add a temporary for `old_value` if floating point `new_value` takes a scratch register.
locations->AddTemp(Location::RequiresRegister());
}
@@ -5411,7 +5410,7 @@ static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke,
if (old_temp_count == 1u &&
(get_and_update_op != GetAndUpdateOp::kSet && get_and_update_op != GetAndUpdateOp::kAdd) &&
GetExpectedVarHandleCoordinatesCount(invoke) == 2u &&
- !IsConstantZeroBitPattern(invoke->InputAt(invoke->GetNumberOfArguments() - 1u))) {
+ !IsZeroBitPattern(invoke->InputAt(invoke->GetNumberOfArguments() - 1u))) {
DataType::Type value_type =
GetVarHandleExpectedValueType(invoke, /*expected_coordinates_count=*/ 2u);
if (value_type != DataType::Type::kReference && DataType::Size(value_type) != 1u) {
@@ -5731,7 +5730,7 @@ void VarHandleSlowPathARM64::EmitByteArrayViewCode(CodeGenerator* codegen_in) {
// Byte order check. For native byte order return to the main path.
if (access_mode_template == mirror::VarHandle::AccessModeTemplate::kSet &&
- IsConstantZeroBitPattern(invoke->InputAt(invoke->GetNumberOfArguments() - 1u))) {
+ IsZeroBitPattern(invoke->InputAt(invoke->GetNumberOfArguments() - 1u))) {
// There is no reason to differentiate between native byte order and byte-swap
// for setting a zero bit pattern. Just return to the main path.
__ B(GetNativeByteOrderLabel());
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 4df1088e20..266b5bc799 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -4590,7 +4590,7 @@ static void CreateVarHandleSetLocations(HInvoke* invoke,
HInstruction* arg = invoke->InputAt(number_of_arguments - 1u);
bool has_reverse_bytes_slow_path =
(expected_coordinates_count == 2u) &&
- !(arg->IsConstant() && arg->AsConstant()->IsZeroBitPattern());
+ !IsZeroBitPattern(arg);
if (Use64BitExclusiveLoadStore(atomic, codegen)) {
// We need 4 temporaries in the byte array view slow path. Otherwise, we need
// 2 or 3 temporaries for GenerateIntrinsicSet() depending on the value type.
@@ -5517,7 +5517,7 @@ void VarHandleSlowPathARMVIXL::EmitByteArrayViewCode(CodeGenerator* codegen_in)
// Byte order check. For native byte order return to the main path.
if (access_mode_template == mirror::VarHandle::AccessModeTemplate::kSet) {
HInstruction* arg = invoke->InputAt(invoke->GetNumberOfArguments() - 1u);
- if (arg->IsConstant() && arg->AsConstant()->IsZeroBitPattern()) {
+ if (IsZeroBitPattern(arg)) {
// There is no reason to differentiate between native byte order and byte-swap
// for setting a zero bit pattern. Just return to the main path.
__ B(GetNativeByteOrderLabel());