summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc73
-rw-r--r--compiler/optimizing/common_arm.h18
-rw-r--r--compiler/optimizing/graph_checker.cc5
-rw-r--r--compiler/optimizing/graph_test.cc1
-rw-r--r--compiler/optimizing/induction_var_analysis.cc61
-rw-r--r--compiler/optimizing/induction_var_analysis.h13
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc41
-rw-r--r--compiler/optimizing/induction_var_range.cc6
-rw-r--r--compiler/optimizing/instruction_simplifier.cc3
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc2
-rw-r--r--compiler/optimizing/linearize_test.cc1
-rw-r--r--compiler/optimizing/optimizing_compiler.cc3
-rw-r--r--compiler/optimizing/pretty_printer.h5
-rw-r--r--compiler/optimizing/pretty_printer_test.cc1
-rw-r--r--compiler/optimizing/ssa_test.cc5
15 files changed, 162 insertions, 76 deletions
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 55f3c3ceef..1c5aec01c6 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -46,8 +46,10 @@ using helpers::InputOperandAt;
using helpers::InputRegister;
using helpers::InputRegisterAt;
using helpers::InputSRegisterAt;
+using helpers::InputVRegister;
using helpers::InputVRegisterAt;
using helpers::Int32ConstantFrom;
+using helpers::Int64ConstantFrom;
using helpers::LocationFrom;
using helpers::LowRegisterFrom;
using helpers::LowSRegisterFrom;
@@ -56,6 +58,7 @@ using helpers::OutputSRegister;
using helpers::OutputVRegister;
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
+using helpers::Uint64ConstantFrom;
using vixl::ExactAssemblyScope;
using vixl::CodeBufferCheckScope;
@@ -1378,7 +1381,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
if (!skip_overflow_check) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
+ __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(kArm)));
// The load must immediately precede RecordPcInfo.
ExactAssemblyScope aas(GetVIXLAssembler(),
vixl32::kMaxInstructionSizeInBytes,
@@ -1795,7 +1798,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateLongComparesAndJumps(HCondition* c
break;
}
if (right.IsConstant()) {
- int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
+ int64_t value = Int64ConstantFrom(right);
int32_t val_low = Low32Bits(value);
int32_t val_high = High32Bits(value);
@@ -1880,7 +1883,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateTestAndBranch(HInstruction* instru
__ B(true_target);
}
} else {
- DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
+ DCHECK(cond->AsIntConstant()->IsFalse()) << Int32ConstantFrom(cond);
if (false_target != nullptr) {
__ B(false_target);
}
@@ -2482,9 +2485,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNeg(HNeg* neg) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- // TODO(VIXL): Consider introducing an InputVRegister()
- // helper function (equivalent to InputRegister()).
- __ Vneg(OutputVRegister(neg), InputVRegisterAt(neg, 0));
+ __ Vneg(OutputVRegister(neg), InputVRegister(neg));
break;
default:
@@ -2774,8 +2775,8 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve
} else {
DCHECK(in.IsConstant());
DCHECK(in.GetConstant()->IsLongConstant());
- int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
- __ Mov(OutputRegister(conversion), static_cast<int32_t>(value));
+ int32_t value = Int32ConstantFrom(in);
+ __ Mov(OutputRegister(conversion), value);
}
break;
@@ -3114,8 +3115,8 @@ void InstructionCodeGeneratorARMVIXL::VisitMul(HMul* mul) {
// Extra checks to protect caused by the existence of R1_R2.
// The algorithm is wrong if out.hi is either in1.lo or in2.lo:
// (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
- DCHECK_NE(out_hi.GetCode(), in1_lo.GetCode());
- DCHECK_NE(out_hi.GetCode(), in2_lo.GetCode());
+ DCHECK(!out_hi.Is(in1_lo));
+ DCHECK(!out_hi.Is(in2_lo));
// input: in1 - 64 bits, in2 - 64 bits
// output: out
@@ -3155,7 +3156,7 @@ void InstructionCodeGeneratorARMVIXL::DivRemOneOrMinusOne(HBinaryOperation* inst
vixl32::Register out = OutputRegister(instruction);
vixl32::Register dividend = InputRegisterAt(instruction, 0);
- int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ int32_t imm = Int32ConstantFrom(second);
DCHECK(imm == 1 || imm == -1);
if (instruction->IsRem()) {
@@ -3180,7 +3181,7 @@ void InstructionCodeGeneratorARMVIXL::DivRemByPowerOfTwo(HBinaryOperation* instr
vixl32::Register out = OutputRegister(instruction);
vixl32::Register dividend = InputRegisterAt(instruction, 0);
vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ int32_t imm = Int32ConstantFrom(second);
uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
int ctz_imm = CTZ(abs_imm);
@@ -3253,7 +3254,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateDivRemConstantIntegral(
Location second = instruction->GetLocations()->InAt(1);
DCHECK(second.IsConstant());
- int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ int32_t imm = Int32ConstantFrom(second);
if (imm == 0) {
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
@@ -3287,7 +3288,7 @@ void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- int32_t value = div->InputAt(1)->AsIntConstant()->GetValue();
+ int32_t value = Int32ConstantFrom(div->InputAt(1));
if (value == 1 || value == 0 || value == -1) {
// No temp register required.
} else {
@@ -3400,7 +3401,7 @@ void LocationsBuilderARMVIXL::VisitRem(HRem* rem) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- int32_t value = rem->InputAt(1)->AsIntConstant()->GetValue();
+ int32_t value = Int32ConstantFrom(rem->InputAt(1));
if (value == 1 || value == 0 || value == -1) {
// No temp register required.
} else {
@@ -3535,7 +3536,7 @@ void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instructi
__ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
} else {
DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ if (Int32ConstantFrom(value) == 0) {
__ B(slow_path->GetEntryLabel());
}
}
@@ -3549,7 +3550,7 @@ void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instructi
__ B(eq, slow_path->GetEntryLabel());
} else {
DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ if (Int64ConstantFrom(value) == 0) {
__ B(slow_path->GetEntryLabel());
}
}
@@ -3759,7 +3760,7 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) {
__ Lsr(out_reg, first_reg, out_reg);
}
} else {
- int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
+ int32_t cst = Int32ConstantFrom(second);
uint32_t shift_value = cst & kMaxIntShiftDistance;
if (shift_value == 0) { // ARM does not support shifting with 0 immediate.
__ Mov(out_reg, first_reg);
@@ -3844,7 +3845,7 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) {
// Register allocator doesn't create partial overlap.
DCHECK(!o_l.Is(high));
DCHECK(!o_h.Is(low));
- int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
+ int32_t cst = Int32ConstantFrom(second);
uint32_t shift_value = cst & kMaxLongShiftDistance;
if (shift_value > 32) {
if (op->IsShl()) {
@@ -4911,7 +4912,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
if (index.IsConstant()) {
- int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+ int32_t const_index = Int32ConstantFrom(index);
if (maybe_compressed_char_at) {
vixl32::Label uncompressed_load, done;
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
@@ -4945,7 +4946,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
// `TryExtractArrayAccessAddress()`.
if (kIsDebugBuild) {
HIntermediateAddress* tmp = array_instr->AsIntermediateAddress();
- DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset);
+ DCHECK_EQ(Uint64ConstantFrom(tmp->GetOffset()), data_offset);
}
temp = obj;
} else {
@@ -4990,7 +4991,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
vixl32::Register out = OutputRegister(instruction);
if (index.IsConstant()) {
size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ (Int32ConstantFrom(index) << TIMES_4) + data_offset;
GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset);
// TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method,
// we should use a scope and the assembler to emit the load instruction to guarantee that
@@ -5012,7 +5013,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
// `TryExtractArrayAccessAddress()`.
if (kIsDebugBuild) {
HIntermediateAddress* tmp = array_instr->AsIntermediateAddress();
- DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset);
+ DCHECK_EQ(Uint64ConstantFrom(tmp->GetOffset()), data_offset);
}
temp = obj;
} else {
@@ -5037,7 +5038,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimLong: {
if (index.IsConstant()) {
size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ (Int32ConstantFrom(index) << TIMES_8) + data_offset;
GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), obj, offset);
} else {
UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -5051,7 +5052,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimFloat: {
vixl32::SRegister out = SRegisterFrom(out_loc);
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset;
GetAssembler()->LoadSFromOffset(out, obj, offset);
} else {
UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -5064,7 +5065,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimDouble: {
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ size_t offset = (Int32ConstantFrom(index) << TIMES_8) + data_offset;
GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), obj, offset);
} else {
UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -5138,7 +5139,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimChar:
case Primitive::kPrimInt: {
if (index.IsConstant()) {
- int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+ int32_t const_index = Int32ConstantFrom(index);
uint32_t full_offset =
data_offset + (const_index << Primitive::ComponentSizeShift(value_type));
StoreOperandType store_type = GetStoreOperandType(value_type);
@@ -5153,7 +5154,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
// `TryExtractArrayAccessAddress()`.
if (kIsDebugBuild) {
HIntermediateAddress* tmp = array_instr->AsIntermediateAddress();
- DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == data_offset);
+ DCHECK_EQ(Uint64ConstantFrom(tmp->GetOffset()), data_offset);
}
temp = array;
} else {
@@ -5174,7 +5175,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
// Just setting null.
if (index.IsConstant()) {
size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ (Int32ConstantFrom(index) << TIMES_4) + data_offset;
GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
} else {
DCHECK(index.IsRegister()) << index;
@@ -5210,7 +5211,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
__ CompareAndBranchIfNonZero(value, &non_zero);
if (index.IsConstant()) {
size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ (Int32ConstantFrom(index) << TIMES_4) + data_offset;
GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
} else {
DCHECK(index.IsRegister()) << index;
@@ -5284,7 +5285,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
if (index.IsConstant()) {
size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ (Int32ConstantFrom(index) << TIMES_4) + data_offset;
GetAssembler()->StoreToOffset(kStoreWord, source, array, offset);
} else {
DCHECK(index.IsRegister()) << index;
@@ -5321,7 +5322,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
Location value = locations->InAt(2);
if (index.IsConstant()) {
size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ (Int32ConstantFrom(index) << TIMES_8) + data_offset;
GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), array, offset);
} else {
UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -5336,7 +5337,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
Location value = locations->InAt(2);
DCHECK(value.IsFpuRegister());
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset;
GetAssembler()->StoreSToOffset(SRegisterFrom(value), array, offset);
} else {
UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -5351,7 +5352,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
Location value = locations->InAt(2);
DCHECK(value.IsFpuRegisterPair());
if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ size_t offset = (Int32ConstantFrom(index) << TIMES_8) + data_offset;
GetAssembler()->StoreDToOffset(DRegisterFrom(value), array, offset);
} else {
UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -5416,7 +5417,7 @@ void InstructionCodeGeneratorARMVIXL::VisitIntermediateAddress(HIntermediateAddr
if (second.IsRegister()) {
__ Add(out, first, RegisterFrom(second));
} else {
- __ Add(out, first, second.GetConstant()->AsIntConstant()->GetValue());
+ __ Add(out, first, Int32ConstantFrom(second));
}
}
@@ -5612,7 +5613,7 @@ void ParallelMoveResolverARMVIXL::EmitMove(size_t index) {
GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
}
} else if (constant->IsLongConstant()) {
- int64_t value = constant->AsLongConstant()->GetValue();
+ int64_t value = Int64ConstantFrom(source);
if (destination.IsRegisterPair()) {
__ Mov(LowRegisterFrom(destination), Low32Bits(value));
__ Mov(HighRegisterFrom(destination), High32Bits(value));
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index eabdbad13c..21c3ae628a 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -122,10 +122,16 @@ inline vixl::aarch32::VRegister InputVRegisterAt(HInstruction* instr, int input_
if (type == Primitive::kPrimFloat) {
return InputSRegisterAt(instr, input_index);
} else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
return InputDRegisterAt(instr, input_index);
}
}
+inline vixl::aarch32::VRegister InputVRegister(HInstruction* instr) {
+ DCHECK_EQ(instr->InputCount(), 1u);
+ return InputVRegisterAt(instr, 0);
+}
+
inline vixl::aarch32::Register OutputRegister(HInstruction* instr) {
return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
}
@@ -140,8 +146,7 @@ inline vixl::aarch32::Register InputRegister(HInstruction* instr) {
return InputRegisterAt(instr, 0);
}
-inline int32_t Int32ConstantFrom(Location location) {
- HConstant* instr = location.GetConstant();
+inline int32_t Int32ConstantFrom(HInstruction* instr) {
if (instr->IsIntConstant()) {
return instr->AsIntConstant()->GetValue();
} else if (instr->IsNullConstant()) {
@@ -155,6 +160,10 @@ inline int32_t Int32ConstantFrom(Location location) {
}
}
+inline int32_t Int32ConstantFrom(Location location) {
+ return Int32ConstantFrom(location.GetConstant());
+}
+
inline int64_t Int64ConstantFrom(Location location) {
HConstant* instr = location.GetConstant();
if (instr->IsIntConstant()) {
@@ -167,6 +176,11 @@ inline int64_t Int64ConstantFrom(Location location) {
}
}
+inline uint64_t Uint64ConstantFrom(HInstruction* instr) {
+ DCHECK(instr->IsConstant()) << instr->DebugName();
+ return instr->AsConstant()->GetValueAsUint64();
+}
+
inline vixl::aarch32::Operand OperandFrom(Location location, Primitive::Type type) {
if (location.IsRegister()) {
return vixl::aarch32::Operand(RegisterFrom(location, type));
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 188ee3a8d1..34b52a87b5 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -20,12 +20,15 @@
#include <string>
#include <sstream>
+#include "android-base/stringprintf.h"
+
#include "base/arena_containers.h"
#include "base/bit_vector-inl.h"
-#include "base/stringprintf.h"
namespace art {
+using android::base::StringPrintf;
+
static bool IsAllowedToJumpToExitBlock(HInstruction* instruction) {
return instruction->IsThrow() || instruction->IsReturn() || instruction->IsReturnVoid();
}
diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc
index d5305646a8..28ee3a5e8b 100644
--- a/compiler/optimizing/graph_test.cc
+++ b/compiler/optimizing/graph_test.cc
@@ -15,7 +15,6 @@
*/
#include "base/arena_allocator.h"
-#include "base/stringprintf.h"
#include "builder.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index c240c67e79..b21bc09cbd 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -211,7 +211,7 @@ uint32_t HInductionVarAnalysis::VisitDescendant(HLoopInformation* loop, HInstruc
void HInductionVarAnalysis::ClassifyTrivial(HLoopInformation* loop, HInstruction* instruction) {
InductionInfo* info = nullptr;
if (instruction->IsPhi()) {
- info = TransferPhi(loop, instruction, /* input_index */ 0);
+ info = TransferPhi(loop, instruction, /*input_index*/ 0, /*adjust_input_size*/ 0);
} else if (instruction->IsAdd()) {
info = TransferAddSub(LookupInfo(loop, instruction->InputAt(0)),
LookupInfo(loop, instruction->InputAt(1)), kAdd);
@@ -224,11 +224,13 @@ void HInductionVarAnalysis::ClassifyTrivial(HLoopInformation* loop, HInstruction
info = TransferMul(LookupInfo(loop, instruction->InputAt(0)),
LookupInfo(loop, instruction->InputAt(1)));
} else if (instruction->IsShl()) {
- HInstruction* mulc = GetMultConstantForShift(loop, instruction);
+ HInstruction* mulc = GetShiftConstant(loop, instruction, /*initial*/ nullptr);
if (mulc != nullptr) {
info = TransferMul(LookupInfo(loop, instruction->InputAt(0)),
LookupInfo(loop, mulc));
}
+ } else if (instruction->IsSelect()) {
+ info = TransferPhi(loop, instruction, /*input_index*/ 0, /*adjust_input_size*/ 1);
} else if (instruction->IsTypeConversion()) {
info = TransferCnv(LookupInfo(loop, instruction->InputAt(0)),
instruction->AsTypeConversion()->GetInputType(),
@@ -270,7 +272,7 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
// Singleton is wrap-around induction if all internal links have the same meaning.
if (size == 1) {
- InductionInfo* update = TransferPhi(loop, phi, /* input_index */ 1);
+ InductionInfo* update = TransferPhi(loop, phi, /*input_index*/ 1, /*adjust_input_size*/ 0);
if (update != nullptr) {
AssignInfo(loop, phi, CreateInduction(kWrapAround,
kNop,
@@ -305,10 +307,15 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
update = SolveOp(
loop, phi, instruction, instruction->InputAt(0), instruction->InputAt(1), kRem);
} else if (instruction->IsShl()) {
- HInstruction* mulc = GetMultConstantForShift(loop, instruction);
+ HInstruction* mulc = GetShiftConstant(loop, instruction, /*initial*/ nullptr);
if (mulc != nullptr) {
update = SolveOp(loop, phi, instruction, instruction->InputAt(0), mulc, kMul);
}
+ } else if (instruction->IsShr() || instruction->IsUShr()) {
+ HInstruction* divc = GetShiftConstant(loop, instruction, initial);
+ if (divc != nullptr) {
+ update = SolveOp(loop, phi, instruction, instruction->InputAt(0), divc, kDiv);
+ }
} else if (instruction->IsXor()) {
update = SolveOp(
loop, phi, instruction, instruction->InputAt(0), instruction->InputAt(1), kXor);
@@ -316,6 +323,8 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
update = SolveTest(loop, phi, instruction, 0);
} else if (instruction->IsNotEqual()) {
update = SolveTest(loop, phi, instruction, 1);
+ } else if (instruction->IsSelect()) {
+ update = SolvePhi(instruction, /*input_index*/ 0, /*adjust_input_size*/ 1); // acts like Phi
} else if (instruction->IsTypeConversion()) {
update = SolveCnv(instruction->AsTypeConversion());
}
@@ -326,7 +335,7 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
}
// Success if all internal links received the same temporary meaning.
- InductionInfo* induction = SolvePhi(phi, /* input_index */ 1);
+ InductionInfo* induction = SolvePhi(phi, /*input_index*/ 1, /*adjust_input_size*/ 0);
if (induction != nullptr) {
switch (induction->induction_class) {
case kInvariant:
@@ -385,12 +394,13 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::RotatePeriodicInduc
HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferPhi(HLoopInformation* loop,
HInstruction* phi,
- size_t input_index) {
+ size_t input_index,
+ size_t adjust_input_size) {
// Match all phi inputs from input_index onwards exactly.
HInputsRef inputs = phi->GetInputs();
DCHECK_LT(input_index, inputs.size());
InductionInfo* a = LookupInfo(loop, inputs[input_index]);
- for (size_t i = input_index + 1; i < inputs.size(); i++) {
+ for (size_t i = input_index + 1, n = inputs.size() - adjust_input_size; i < n; i++) {
InductionInfo* b = LookupInfo(loop, inputs[i]);
if (!InductionEqual(a, b)) {
return nullptr;
@@ -504,13 +514,14 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferCnv(Inducti
}
HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolvePhi(HInstruction* phi,
- size_t input_index) {
+ size_t input_index,
+ size_t adjust_input_size) {
// Match all phi inputs from input_index onwards exactly.
HInputsRef inputs = phi->GetInputs();
DCHECK_LT(input_index, inputs.size());
auto ita = cycle_.find(inputs[input_index]);
if (ita != cycle_.end()) {
- for (size_t i = input_index + 1; i < inputs.size(); i++) {
+ for (size_t i = input_index + 1, n = inputs.size() - adjust_input_size; i < n; i++) {
auto itb = cycle_.find(inputs[i]);
if (itb == cycle_.end() ||
!HInductionVarAnalysis::InductionEqual(ita->second, itb->second)) {
@@ -527,7 +538,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolvePhiAllInputs(
HInstruction* entry_phi,
HInstruction* phi) {
// Match all phi inputs.
- InductionInfo* match = SolvePhi(phi, /* input_index */ 0);
+ InductionInfo* match = SolvePhi(phi, /*input_index*/ 0, /*adjust_input_size*/ 0);
if (match != nullptr) {
return match;
}
@@ -542,7 +553,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolvePhiAllInputs(
InductionInfo* initial = LookupInfo(loop, entry_phi->InputAt(0));
return CreateInduction(kPeriodic, kNop, a, initial, /*fetch*/ nullptr, type_);
}
- InductionInfo* b = SolvePhi(phi, /* input_index */ 1);
+ InductionInfo* b = SolvePhi(phi, /*input_index*/ 1, /*adjust_input_size*/ 0);
if (b != nullptr && b->induction_class == kPeriodic) {
return CreateInduction(kPeriodic, kNop, a, b, /*fetch*/ nullptr, type_);
}
@@ -574,14 +585,14 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveAddSub(HLoopIn
return CreateInvariantOp(op, a, b);
}
}
- } else if (op == kAdd && b->induction_class == kLinear) {
+ } else if (b->induction_class == kLinear) {
// Solve within a tight cycle that adds a term that is already classified as a linear
// induction for a polynomial induction k = k + i (represented as sum over linear terms).
if (x == entry_phi && entry_phi->InputCount() == 2 && instruction == entry_phi->InputAt(1)) {
InductionInfo* initial = LookupInfo(loop, entry_phi->InputAt(0));
return CreateInduction(kPolynomial,
kNop,
- b,
+ op == kAdd ? b : TransferNeg(b),
initial,
/*fetch*/ nullptr,
type_);
@@ -1038,13 +1049,23 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInv
return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, b->type);
}
-HInstruction* HInductionVarAnalysis::GetMultConstantForShift(HLoopInformation* loop,
- HInstruction* instruction) {
- // Obtain the constant needed to treat shift as equivalent multiplication. This yields an
- // existing instruction if the constant is already there. Otherwise, this has a side effect
- // on the HIR. The restriction on the shift factor avoids generating a negative constant
- // (viz. 1 << 31 and 1L << 63 set the sign bit). The code assumes that generalization for
- // shift factors outside [0,32) and [0,64) ranges is done by earlier simplification.
+HInstruction* HInductionVarAnalysis::GetShiftConstant(HLoopInformation* loop,
+ HInstruction* instruction,
+ InductionInfo* initial) {
+ DCHECK(instruction->IsShl() || instruction->IsShr() || instruction->IsUShr());
+ // Shift-rights are only the same as division for non-negative initial inputs.
+ // Otherwise we would round incorrectly.
+ if (initial != nullptr) {
+ int64_t value = -1;
+ if (!IsAtLeast(initial, &value) || value < 0) {
+ return nullptr;
+ }
+ }
+ // Obtain the constant needed to treat shift as equivalent multiplication or division.
+ // This yields an existing instruction if the constant is already there. Otherwise, this
+ // has a side effect on the HIR. The restriction on the shift factor avoids generating a
+ // negative constant (viz. 1 << 31 and 1L << 63 set the sign bit). The code assumes that
+ // generalization for shift factors outside [0,32) and [0,64) ranges is done earlier.
InductionInfo* b = LookupInfo(loop, instruction->InputAt(1));
int64_t value = -1;
if (IsExact(b, &value)) {
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 4720f2d61c..293aa70525 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -115,7 +115,7 @@ class HInductionVarAnalysis : public HOptimization {
InductionInfo* op_a;
InductionInfo* op_b;
HInstruction* fetch;
- Primitive::Type type; // precision of induction
+ Primitive::Type type; // precision of operation
};
bool IsVisitedNode(HInstruction* instruction) const {
@@ -160,14 +160,17 @@ class HInductionVarAnalysis : public HOptimization {
InductionInfo* RotatePeriodicInduction(InductionInfo* induction, InductionInfo* last);
// Transfer operations.
- InductionInfo* TransferPhi(HLoopInformation* loop, HInstruction* phi, size_t input_index);
+ InductionInfo* TransferPhi(HLoopInformation* loop,
+ HInstruction* phi,
+ size_t input_index,
+ size_t adjust_input_size);
InductionInfo* TransferAddSub(InductionInfo* a, InductionInfo* b, InductionOp op);
InductionInfo* TransferNeg(InductionInfo* a);
InductionInfo* TransferMul(InductionInfo* a, InductionInfo* b);
InductionInfo* TransferCnv(InductionInfo* a, Primitive::Type from, Primitive::Type to);
// Solvers.
- InductionInfo* SolvePhi(HInstruction* phi, size_t input_index);
+ InductionInfo* SolvePhi(HInstruction* phi, size_t input_index, size_t adjust_input_size);
InductionInfo* SolvePhiAllInputs(HLoopInformation* loop,
HInstruction* entry_phi,
HInstruction* phi);
@@ -220,7 +223,9 @@ class HInductionVarAnalysis : public HOptimization {
InductionInfo* LookupInfo(HLoopInformation* loop, HInstruction* instruction);
InductionInfo* CreateConstant(int64_t value, Primitive::Type type);
InductionInfo* CreateSimplifiedInvariant(InductionOp op, InductionInfo* a, InductionInfo* b);
- HInstruction* GetMultConstantForShift(HLoopInformation* loop, HInstruction* instruction);
+ HInstruction* GetShiftConstant(HLoopInformation* loop,
+ HInstruction* instruction,
+ InductionInfo* initial);
void AssignCycle(HPhi* phi);
ArenaSet<HInstruction*>* LookupCycle(HPhi* phi);
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 2d182f6483..f52a1aad5a 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -87,6 +87,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
constant2_ = graph_->GetIntConstant(2);
constant7_ = graph_->GetIntConstant(7);
constant100_ = graph_->GetIntConstant(100);
+ constantm1_ = graph_->GetIntConstant(-1);
float_constant0_ = graph_->GetFloatConstant(0.0f);
return_->AddInstruction(new (&allocator_) HReturnVoid());
exit_->AddInstruction(new (&allocator_) HExit());
@@ -196,6 +197,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
HInstruction* constant2_;
HInstruction* constant7_;
HInstruction* constant100_;
+ HInstruction* constantm1_;
HInstruction* float_constant0_;
// Loop specifics.
@@ -612,6 +614,45 @@ TEST_F(InductionVarAnalysisTest, FindGeometricDivInductionAndDerived) {
EXPECT_STREQ("", GetInductionInfo(div, 0).c_str());
}
+TEST_F(InductionVarAnalysisTest, FindGeometricShrInduction) {
+ // Setup:
+ // k = 100;
+ // for (int i = 0; i < 100; i++) {
+ // k = k >> 1; // geometric (/ 2)
+ // }
+ BuildLoopNest(1);
+ HPhi* k_header = InsertLoopPhi(0, 0);
+ k_header->AddInput(constant100_);
+
+ HInstruction* shr = InsertInstruction(
+ new (&allocator_) HShr(Primitive::kPrimInt, k_header, constant1_), 0);
+ k_header->AddInput(shr);
+ PerformInductionVarAnalysis();
+
+ // Note, only the phi in the cycle is classified.
+ EXPECT_STREQ("geo((100) * 2 ^ -i + (0)):PrimInt", GetInductionInfo(k_header, 0).c_str());
+ EXPECT_STREQ("", GetInductionInfo(shr, 0).c_str());
+}
+
+TEST_F(InductionVarAnalysisTest, FindNotGeometricShrInduction) {
+ // Setup:
+ // k = -1;
+ // for (int i = 0; i < 100; i++) {
+ // k = k >> 1; // initial value is negative
+ // }
+ BuildLoopNest(1);
+ HPhi* k_header = InsertLoopPhi(0, 0);
+ k_header->AddInput(constantm1_);
+
+ HInstruction* shr = InsertInstruction(
+ new (&allocator_) HShr(Primitive::kPrimInt, k_header, constant1_), 0);
+ k_header->AddInput(shr);
+ PerformInductionVarAnalysis();
+
+ EXPECT_STREQ("", GetInductionInfo(k_header, 0).c_str());
+ EXPECT_STREQ("", GetInductionInfo(shr, 0).c_str());
+}
+
TEST_F(InductionVarAnalysisTest, FindRemWrapAroundInductionAndDerived) {
// Setup:
// k = 100;
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index e665551012..7bcc3845e7 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -983,10 +983,10 @@ bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::Induc
int64_t a = 0;
int64_t b = 0;
int64_t m = 0;
- if (IsConstant(info->op_a->op_a, kExact, &a) && a >= 0 &&
- IsConstant(info->op_a->op_b, kExact, &b) && b >= 0 &&
+ if (IsConstant(info->op_a->op_a, kExact, &a) &&
+ IsConstant(info->op_a->op_b, kExact, &b) &&
IsConstant(trip->op_a, kExact, &m) && m >= 1) {
- // Evaluate bounds on sum_i=0^m-1(a * i + b) + c with a,b >= 0 for known
+ // Evaluate bounds on sum_i=0^m-1(a * i + b) + c for known
// maximum index value m as a * (m * (m-1)) / 2 + b * m + c.
// TODO: generalize
HInstruction* c_instr = nullptr;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index c615df1f1d..439e3b66db 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1897,7 +1897,8 @@ void InstructionSimplifierVisitor::SimplifyReturnThis(HInvoke* invoke) {
static bool NoEscapeForStringBufferReference(HInstruction* reference, HInstruction* user) {
if (user->IsInvokeStaticOrDirect()) {
// Any constructor on StringBuffer is okay.
- return user->AsInvokeStaticOrDirect()->GetResolvedMethod()->IsConstructor() &&
+ return user->AsInvokeStaticOrDirect()->GetResolvedMethod() != nullptr &&
+ user->AsInvokeStaticOrDirect()->GetResolvedMethod()->IsConstructor() &&
user->InputAt(0) == reference;
} else if (user->IsInvokeVirtual()) {
switch (user->AsInvokeVirtual()->GetIntrinsic()) {
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 95551c8fd9..641a5c92ea 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -1509,7 +1509,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
SlowPathCodeARMVIXL* slow_path = nullptr;
HInstruction* code_point = invoke->InputAt(1);
if (code_point->IsIntConstant()) {
- if (static_cast<uint32_t>(code_point->AsIntConstant()->GetValue()) >
+ if (static_cast<uint32_t>(Int32ConstantFrom(code_point)) >
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 13e14c53b5..3831aa6c91 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -18,7 +18,6 @@
#include "arch/x86/instruction_set_features_x86.h"
#include "base/arena_allocator.h"
-#include "base/stringprintf.h"
#include "builder.h"
#include "code_generator.h"
#include "code_generator_x86.h"
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index ba7012ab1a..0d0f62a55c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1193,7 +1193,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}
uint8_t* stack_map_data = nullptr;
uint8_t* roots_data = nullptr;
- code_cache->ReserveData(
+ uint32_t data_size = code_cache->ReserveData(
self, stack_map_size, number_of_roots, method, &stack_map_data, &roots_data);
if (stack_map_data == nullptr || roots_data == nullptr) {
return false;
@@ -1212,6 +1212,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
codegen->GetFpuSpillMask(),
code_allocator.GetMemory().data(),
code_allocator.GetSize(),
+ data_size,
osr,
roots,
codegen->GetGraph()->HasShouldDeoptimizeFlag(),
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index 5891350894..c6579dc5e0 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -17,7 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_PRETTY_PRINTER_H_
#define ART_COMPILER_OPTIMIZING_PRETTY_PRINTER_H_
-#include "base/stringprintf.h"
+#include "android-base/stringprintf.h"
+
#include "nodes.h"
namespace art {
@@ -108,7 +109,7 @@ class StringPrettyPrinter : public HPrettyPrinter {
: HPrettyPrinter(graph), str_(""), current_block_(nullptr) { }
void PrintInt(int value) OVERRIDE {
- str_ += StringPrintf("%d", value);
+ str_ += android::base::StringPrintf("%d", value);
}
void PrintString(const char* value) OVERRIDE {
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 951cdfbd8b..1af94f3445 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -15,7 +15,6 @@
*/
#include "base/arena_allocator.h"
-#include "base/stringprintf.h"
#include "builder.h"
#include "dex_file.h"
#include "dex_instruction.h"
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 429763423c..f69f417efc 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -14,8 +14,9 @@
* limitations under the License.
*/
+#include "android-base/stringprintf.h"
+
#include "base/arena_allocator.h"
-#include "base/stringprintf.h"
#include "builder.h"
#include "dex_file.h"
#include "dex_instruction.h"
@@ -35,7 +36,7 @@ class SsaPrettyPrinter : public HPrettyPrinter {
explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
void PrintInt(int value) OVERRIDE {
- str_ += StringPrintf("%d", value);
+ str_ += android::base::StringPrintf("%d", value);
}
void PrintString(const char* value) OVERRIDE {