summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc25
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc30
-rw-r--r--compiler/optimizing/code_generator_mips.cc16
-rw-r--r--compiler/optimizing/code_generator_mips64.cc14
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc24
-rw-r--r--compiler/optimizing/code_generator_vector_arm_vixl.cc20
-rw-r--r--compiler/optimizing/code_generator_vector_mips.cc44
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc44
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc24
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc24
-rw-r--r--compiler/optimizing/code_generator_x86.cc23
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc23
-rw-r--r--compiler/optimizing/data_type-inl.h2
-rw-r--r--compiler/optimizing/data_type.cc2
-rw-r--r--compiler/optimizing/data_type.h27
-rw-r--r--compiler/optimizing/graph_visualizer.cc11
-rw-r--r--compiler/optimizing/inliner.cc9
-rw-r--r--compiler/optimizing/loop_optimization.cc68
-rw-r--r--compiler/optimizing/nodes_vector.h72
-rw-r--r--compiler/optimizing/nodes_vector_test.cc168
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc2
-rw-r--r--compiler/optimizing/register_allocator_graph_color.cc2
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.cc2
23 files changed, 315 insertions, 361 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 13bbffa1e3..3fd88e3e18 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1488,6 +1488,14 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
MacroAssembler* masm = GetVIXLAssembler();
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+ __ Ldrh(temp, MemOperand(kArtMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp, temp, 1);
+ __ Strh(temp, MemOperand(kArtMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ }
+
bool do_overflow_check =
FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod();
if (do_overflow_check) {
@@ -1881,6 +1889,8 @@ void CodeGeneratorARM64::Load(DataType::Type type,
DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type));
__ Ldr(dst, src);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -1959,6 +1969,8 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
__ Fmov(FPRegister(dst), temp);
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -1986,6 +1998,8 @@ void CodeGeneratorARM64::Store(DataType::Type type,
DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type));
__ Str(src, dst);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -2063,6 +2077,8 @@ void CodeGeneratorARM64::StoreRelease(HInstruction* instruction,
}
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -3501,6 +3517,15 @@ void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* s
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp1 = temps.AcquireX();
+ Register temp2 = temps.AcquireX();
+ __ Ldr(temp1, MemOperand(sp, 0));
+ __ Ldrh(temp2, MemOperand(temp1, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp2, temp2, 1);
+ __ Strh(temp2, MemOperand(temp1, ArtMethod::HotnessCountOffset().Int32Value()));
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 577fe00dcd..704a0d3b87 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2485,6 +2485,14 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp, temp, 1);
+ __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ }
+
if (HasEmptyFrame()) {
return;
}
@@ -2642,6 +2650,8 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(DataType::Typ
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -2657,6 +2667,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::T
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32: {
return LocationFrom(r0);
}
@@ -2665,6 +2676,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::T
return LocationFrom(s0);
}
+ case DataType::Type::kUint64:
case DataType::Type::kInt64: {
return LocationFrom(r0, r1);
}
@@ -2786,6 +2798,16 @@ void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock*
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Push(vixl32::Register(kMethodRegister));
+ GetAssembler()->LoadFromOffset(kLoadWord, kMethodRegister, sp, kArmWordSize);
+ __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp, temp, 1);
+ __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Pop(vixl32::Register(kMethodRegister));
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -5494,6 +5516,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -5738,6 +5762,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -6230,6 +6256,8 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -6519,6 +6547,8 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << value_type;
UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 5c8e46ed19..36c921986b 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -58,9 +58,11 @@ Location MipsReturnLocation(DataType::Type return_type) {
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
return Location::RegisterLocation(V0);
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterPairLocation(V0, V1);
@@ -140,6 +142,8 @@ Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(DataType::Type t
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -1276,6 +1280,10 @@ static dwarf::Reg DWARFReg(Register reg) {
void CodeGeneratorMIPS::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ LOG(WARNING) << "Unimplemented hotness update in mips backend";
+ }
+
bool do_overflow_check =
FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
@@ -2817,6 +2825,8 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -3132,6 +3142,8 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -6316,6 +6328,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
case DataType::Type::kFloat64:
load_type = kLoadDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -6469,6 +6483,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
case DataType::Type::kFloat64:
store_type = kStoreDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index bcfe051c90..6657582e2a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -55,8 +55,10 @@ Location Mips64ReturnLocation(DataType::Type return_type) {
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
case DataType::Type::kReference:
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterLocation(V0);
@@ -1079,6 +1081,10 @@ static dwarf::Reg DWARFReg(FpuRegister reg) {
void CodeGeneratorMIPS64::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ LOG(WARNING) << "Unimplemented hotness update in mips64 backend";
+ }
+
bool do_overflow_check =
FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod();
@@ -2404,6 +2410,8 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2707,6 +2715,8 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -4794,6 +4804,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
case DataType::Type::kReference:
load_type = kLoadUnsignedWord;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -4887,6 +4899,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
case DataType::Type::kFloat64:
store_type = kStoreDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 152a59c208..174efdf115 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -606,22 +606,20 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Smin(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
- } else {
- __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
- }
+ __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmin(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmin(dst.V2D(), lhs.V2D(), rhs.V2D());
break;
default:
@@ -656,22 +654,20 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Smax(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
- } else {
- __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
- }
+ __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmax(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmax(dst.V2D(), lhs.V2D(), rhs.V2D());
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index cc470ddb2e..7c3155ab73 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -431,13 +431,13 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(4u, instruction->GetVectorLength());
__ Vmin(DataTypeValue::S16, dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
- } else {
- __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
- }
+ __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -471,13 +471,13 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(4u, instruction->GetVectorLength());
__ Vmax(DataTypeValue::S16, dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
- } else {
- __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
- }
+ __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 3cf150a6b8..ed9de96496 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -613,32 +613,30 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Min_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Min_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uW(dst, lhs, rhs);
- } else {
- __ Min_sW(dst, lhs, rhs);
- }
+ __ Min_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Min_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uD(dst, lhs, rhs);
- } else {
- __ Min_sD(dst, lhs, rhs);
- }
+ __ Min_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
// TODO: Fix min(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminD(dst, lhs, rhs);
break;
default:
@@ -673,32 +671,30 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Max_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Max_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uW(dst, lhs, rhs);
- } else {
- __ Max_sW(dst, lhs, rhs);
- }
+ __ Max_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Max_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uD(dst, lhs, rhs);
- } else {
- __ Max_sD(dst, lhs, rhs);
- }
+ __ Max_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
// TODO: Fix max(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxD(dst, lhs, rhs);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 2d69533f21..9ea55ec8d7 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -612,32 +612,30 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Min_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Min_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uW(dst, lhs, rhs);
- } else {
- __ Min_sW(dst, lhs, rhs);
- }
+ __ Min_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Min_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uD(dst, lhs, rhs);
- } else {
- __ Min_sD(dst, lhs, rhs);
- }
+ __ Min_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
// TODO: Fix min(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminD(dst, lhs, rhs);
break;
default:
@@ -672,32 +670,30 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Max_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Max_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uW(dst, lhs, rhs);
- } else {
- __ Max_sW(dst, lhs, rhs);
- }
+ __ Max_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Max_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uD(dst, lhs, rhs);
- } else {
- __ Max_sD(dst, lhs, rhs);
- }
+ __ Max_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
// TODO: Fix max(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxD(dst, lhs, rhs);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 7b4b85d2fe..f2ffccc887 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -640,23 +640,21 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pminsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pminud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminud(dst, src);
- } else {
- __ pminsd(dst, src);
- }
+ __ pminsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minpd(dst, src);
break;
default:
@@ -691,23 +689,21 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pmaxsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmaxud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxud(dst, src);
- } else {
- __ pmaxsd(dst, src);
- }
+ __ pmaxsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxpd(dst, src);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 107030e6c2..e2b0485f89 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -623,23 +623,21 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pminsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pminud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminud(dst, src);
- } else {
- __ pminsd(dst, src);
- }
+ __ pminsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minpd(dst, src);
break;
default:
@@ -674,23 +672,21 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pmaxsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmaxud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxud(dst, src);
- } else {
- __ pmaxsd(dst, src);
- }
+ __ pmaxsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxpd(dst, src);
break;
default:
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index cbe9e0a35c..5fede80bc7 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1061,6 +1061,11 @@ void CodeGeneratorX86::GenerateFrameEntry() {
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ addw(Address(kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value()),
+ Immediate(1));
+ }
+
if (!skip_overflow_check) {
size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86);
__ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes)));
@@ -1129,9 +1134,11 @@ Location InvokeDexCallingConventionVisitorX86::GetReturnLocation(DataType::Type
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
return Location::RegisterLocation(EAX);
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterPairLocation(EAX, EDX);
@@ -1201,6 +1208,8 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type ty
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -1357,6 +1366,12 @@ void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* suc
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ pushl(EAX);
+ __ movl(EAX, Address(ESP, kX86WordSize));
+ __ addw(Address(EAX, ArtMethod::HotnessCountOffset().Int32Value()), Immediate(1));
+ __ popl(EAX);
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -4833,6 +4848,8 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -5006,6 +5023,8 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -5309,6 +5328,8 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -5560,6 +5581,8 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 510eec4f30..ae35ab5983 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1268,6 +1268,12 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
&& !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ addw(Address(CpuRegister(kMethodRegisterArgument),
+ ArtMethod::HotnessCountOffset().Int32Value()),
+ Immediate(1));
+ }
+
if (!skip_overflow_check) {
size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64);
__ testq(CpuRegister(RAX), Address(CpuRegister(RSP), -static_cast<int32_t>(reserved_bytes)));
@@ -1459,6 +1465,11 @@ void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock*
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ movq(CpuRegister(TMP), Address(CpuRegister(RSP), 0));
+ __ addw(Address(CpuRegister(TMP), ArtMethod::HotnessCountOffset().Int32Value()),
+ Immediate(1));
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -2262,7 +2273,9 @@ Location InvokeDexCallingConventionVisitorX86_64::GetReturnLocation(DataType::Ty
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterLocation(RAX);
@@ -2331,6 +2344,8 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(DataType::Type
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -4296,6 +4311,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -4459,6 +4476,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -4752,6 +4771,8 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -4991,6 +5012,8 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/data_type-inl.h b/compiler/optimizing/data_type-inl.h
index e389bad3ad..e2cf7a80fe 100644
--- a/compiler/optimizing/data_type-inl.h
+++ b/compiler/optimizing/data_type-inl.h
@@ -53,7 +53,9 @@ constexpr char DataType::TypeId(DataType::Type type) {
case DataType::Type::kInt8: return 'b'; // Java byte (B).
case DataType::Type::kUint16: return 'c'; // Java char (C).
case DataType::Type::kInt16: return 's'; // Java short (S).
+ case DataType::Type::kUint32: return 'u'; // Picked 'u' for unsigned.
case DataType::Type::kInt32: return 'i'; // Java int (I).
+ case DataType::Type::kUint64: return 'w'; // Picked 'w' for long unsigned.
case DataType::Type::kInt64: return 'j'; // Java long (J).
case DataType::Type::kFloat32: return 'f'; // Java float (F).
case DataType::Type::kFloat64: return 'd'; // Java double (D).
diff --git a/compiler/optimizing/data_type.cc b/compiler/optimizing/data_type.cc
index 3c99a76c17..cb354f46cc 100644
--- a/compiler/optimizing/data_type.cc
+++ b/compiler/optimizing/data_type.cc
@@ -25,7 +25,9 @@ static const char* kTypeNames[] = {
"Int8",
"Uint16",
"Int16",
+ "Uint32",
"Int32",
+ "Uint64",
"Int64",
"Float32",
"Float64",
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 548fe28cee..4a6c91459f 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -34,7 +34,9 @@ class DataType {
kInt8,
kUint16,
kInt16,
+ kUint32,
kInt32,
+ kUint64,
kInt64,
kFloat32,
kFloat64,
@@ -55,9 +57,11 @@ class DataType {
case Type::kUint16:
case Type::kInt16:
return 1;
+ case Type::kUint32:
case Type::kInt32:
case Type::kFloat32:
return 2;
+ case Type::kUint64:
case Type::kInt64:
case Type::kFloat64:
return 3;
@@ -80,9 +84,11 @@ class DataType {
case Type::kUint16:
case Type::kInt16:
return 2;
+ case Type::kUint32:
case Type::kInt32:
case Type::kFloat32:
return 4;
+ case Type::kUint64:
case Type::kInt64:
case Type::kFloat64:
return 8;
@@ -107,7 +113,9 @@ class DataType {
case Type::kInt8:
case Type::kUint16:
case Type::kInt16:
+ case Type::kUint32:
case Type::kInt32:
+ case Type::kUint64:
case Type::kInt64:
return true;
default:
@@ -120,11 +128,12 @@ class DataType {
}
static bool Is64BitType(Type type) {
- return type == Type::kInt64 || type == Type::kFloat64;
+ return type == Type::kUint64 || type == Type::kInt64 || type == Type::kFloat64;
}
static bool IsUnsignedType(Type type) {
- return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16;
+ return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16 ||
+ type == Type::kUint32 || type == Type::kUint64;
}
// Return the general kind of `type`, fusing integer-like types as Type::kInt.
@@ -133,10 +142,14 @@ class DataType {
case Type::kBool:
case Type::kUint8:
case Type::kInt8:
- case Type::kInt16:
case Type::kUint16:
+ case Type::kInt16:
+ case Type::kUint32:
case Type::kInt32:
return Type::kInt32;
+ case Type::kUint64:
+ case Type::kInt64:
+ return Type::kInt64;
default:
return type;
}
@@ -154,8 +167,12 @@ class DataType {
return std::numeric_limits<uint16_t>::min();
case Type::kInt16:
return std::numeric_limits<int16_t>::min();
+ case Type::kUint32:
+ return std::numeric_limits<uint32_t>::min();
case Type::kInt32:
return std::numeric_limits<int32_t>::min();
+ case Type::kUint64:
+ return std::numeric_limits<uint64_t>::min();
case Type::kInt64:
return std::numeric_limits<int64_t>::min();
default:
@@ -176,8 +193,12 @@ class DataType {
return std::numeric_limits<uint16_t>::max();
case Type::kInt16:
return std::numeric_limits<int16_t>::max();
+ case Type::kUint32:
+ return std::numeric_limits<uint32_t>::max();
case Type::kInt32:
return std::numeric_limits<int32_t>::max();
+ case Type::kUint64:
+ return std::numeric_limits<uint64_t>::max();
case Type::kInt64:
return std::numeric_limits<int64_t>::max();
default:
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 12c69889ab..6144162f68 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -533,20 +533,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
VisitVecBinaryOperation(hadd);
- StartAttributeStream("unsigned") << std::boolalpha << hadd->IsUnsigned() << std::noboolalpha;
StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
}
- void VisitVecMin(HVecMin* min) OVERRIDE {
- VisitVecBinaryOperation(min);
- StartAttributeStream("unsigned") << std::boolalpha << min->IsUnsigned() << std::noboolalpha;
- }
-
- void VisitVecMax(HVecMax* max) OVERRIDE {
- VisitVecBinaryOperation(max);
- StartAttributeStream("unsigned") << std::boolalpha << max->IsUnsigned() << std::noboolalpha;
- }
-
void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
VisitVecOperation(instruction);
StartAttributeStream("kind") << instruction->GetOpKind();
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 452be6feae..035e5ce3e1 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -392,8 +392,9 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
return single_impl;
}
-static bool AlwaysThrows(ArtMethod* method) {
- CodeItemDataAccessor accessor(method);
+static bool AlwaysThrows(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CodeItemDataAccessor accessor(method->DexInstructionData());
// Skip native methods, methods with try blocks, and methods that are too large.
if (!accessor.HasCodeItem() ||
accessor.TriesSize() != 0 ||
@@ -1418,7 +1419,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
bool same_dex_file = IsSameDexFile(*outer_compilation_unit_.GetDexFile(), *method->GetDexFile());
- CodeItemDataAccessor accessor(method);
+ CodeItemDataAccessor accessor(method->DexInstructionData());
if (!accessor.HasCodeItem()) {
LOG_FAIL_NO_STAT()
@@ -1697,7 +1698,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
- CodeItemDebugInfoAccessor code_item_accessor(resolved_method);
+ CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
Handle<mirror::DexCache> dex_cache = NewHandleIfDifferent(resolved_method->GetDexCache(),
caller_compilation_unit_.GetDexCache(),
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 3dc1ef7534..899496328e 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -30,46 +30,6 @@
namespace art {
-// TODO: Clean up the packed type detection so that we have the right type straight away
-// and do not need to go through this normalization.
-static inline void NormalizePackedType(/* inout */ DataType::Type* type,
- /* inout */ bool* is_unsigned) {
- switch (*type) {
- case DataType::Type::kBool:
- DCHECK(!*is_unsigned);
- break;
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- if (*is_unsigned) {
- *is_unsigned = false;
- *type = DataType::Type::kUint8;
- } else {
- *type = DataType::Type::kInt8;
- }
- break;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- if (*is_unsigned) {
- *is_unsigned = false;
- *type = DataType::Type::kUint16;
- } else {
- *type = DataType::Type::kInt16;
- }
- break;
- case DataType::Type::kInt32:
- case DataType::Type::kInt64:
- // We do not have kUint32 and kUint64 at the moment.
- break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- DCHECK(!*is_unsigned);
- break;
- default:
- LOG(FATAL) << "Unexpected type " << *type;
- UNREACHABLE();
- }
-}
-
// Enables vectorization (SIMDization) in the loop optimizer.
static constexpr bool kEnableVectorization = true;
@@ -1362,8 +1322,10 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
}
if (VectorizeUse(node, r, generate_code, type, restrictions)) {
if (generate_code) {
- NormalizePackedType(&type, &is_unsigned);
- GenerateVecOp(instruction, vector_map_->Get(r), nullptr, type);
+ GenerateVecOp(instruction,
+ vector_map_->Get(r),
+ nullptr,
+ HVecOperation::ToProperType(type, is_unsigned));
}
return true;
}
@@ -1865,18 +1827,26 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
case Intrinsics::kMathMinLongLong:
case Intrinsics::kMathMinFloatFloat:
case Intrinsics::kMathMinDoubleDouble: {
- NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+ HVecMin(global_allocator_,
+ opa,
+ opb,
+ HVecOperation::ToProperType(type, is_unsigned),
+ vector_length_,
+ dex_pc);
break;
}
case Intrinsics::kMathMaxIntInt:
case Intrinsics::kMathMaxLongLong:
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMaxDoubleDouble: {
- NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+ HVecMax(global_allocator_,
+ opa,
+ opb,
+ HVecOperation::ToProperType(type, is_unsigned),
+ vector_length_,
+ dex_pc);
break;
}
default:
@@ -1987,15 +1957,13 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
VectorizeUse(node, s, generate_code, type, restrictions)) {
if (generate_code) {
if (vector_mode_ == kVector) {
- NormalizePackedType(&type, &is_unsigned);
vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
global_allocator_,
vector_map_->Get(r),
vector_map_->Get(s),
- type,
+ HVecOperation::ToProperType(type, is_unsigned),
vector_length_,
is_rounded,
- is_unsigned,
kNoDexPc));
MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
} else {
@@ -2086,7 +2054,7 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
if (generate_code) {
- NormalizePackedType(&reduction_type, &is_unsigned);
+ reduction_type = HVecOperation::ToProperType(reduction_type, is_unsigned);
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
global_allocator_,
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 87dff8403b..ecabdf3b76 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -131,8 +131,6 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
// Maps an integral type to the same-size signed type and leaves other types alone.
- // Can be used to test relaxed type consistency in which packed same-size integral
- // types can co-exist, but other type mixes are an error.
static DataType::Type ToSignedType(DataType::Type type) {
switch (type) {
case DataType::Type::kBool: // 1-byte storage unit
@@ -160,6 +158,11 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
}
+ // Maps an integral type to the same-size (un)signed type. Leaves other types alone.
+ static DataType::Type ToProperType(DataType::Type type, bool is_unsigned) {
+ return is_unsigned ? ToUnsignedType(type) : ToSignedType(type);
+ }
+
// Helper method to determine if an instruction returns a SIMD value.
// TODO: This method is needed until we introduce SIMD as proper type.
static bool ReturnsSIMDValue(HInstruction* instruction) {
@@ -286,6 +289,8 @@ class HVecMemoryOperation : public HVecOperation {
};
// Packed type consistency checker ("same vector length" integral types may mix freely).
+// Tests relaxed type consistency in which packed same-size integral types can co-exist,
+// but other type mixes are an error.
inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type type) {
if (input->IsPhi()) {
return input->GetType() == HVecOperation::kSIMDType; // carries SIMD
@@ -518,7 +523,7 @@ class HVecAdd FINAL : public HVecBinaryOperation {
// Performs halving add on every component in the two vectors, viz.
// rounded [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
// truncated [ x1, .. , xn ] hadd [ y1, .. , yn ] = [ (x1 + y1) >> 1, .. , (xn + yn ) >> 1 ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecHalvingAdd FINAL : public HVecBinaryOperation {
public:
HVecHalvingAdd(ArenaAllocator* allocator,
@@ -527,21 +532,13 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
DataType::Type packed_type,
size_t vector_length,
bool is_rounded,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldHAddIsUnsigned>(is_unsigned);
SetPackedFlag<kFieldHAddIsRounded>(is_rounded);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldHAddIsUnsigned>(); }
bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
bool CanBeMoved() const OVERRIDE { return true; }
@@ -549,9 +546,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
DCHECK(other->IsVecHalvingAdd());
const HVecHalvingAdd* o = other->AsVecHalvingAdd();
- return HVecOperation::InstructionDataEquals(o) &&
- IsUnsigned() == o->IsUnsigned() &&
- IsRounded() == o->IsRounded();
+ return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded();
}
DECLARE_INSTRUCTION(VecHalvingAdd);
@@ -561,8 +556,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
private:
// Additional packed bits.
- static constexpr size_t kFieldHAddIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kFieldHAddIsRounded = kFieldHAddIsUnsigned + 1;
+ static constexpr size_t kFieldHAddIsRounded = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfHAddPackedBits = kFieldHAddIsRounded + 1;
static_assert(kNumberOfHAddPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
@@ -638,7 +632,7 @@ class HVecDiv FINAL : public HVecBinaryOperation {
// Takes minimum of every component in the two vectors,
// viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecMin FINAL : public HVecBinaryOperation {
public:
HVecMin(ArenaAllocator* allocator,
@@ -646,44 +640,23 @@ class HVecMin FINAL : public HVecBinaryOperation {
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldMinOpIsUnsigned>(is_unsigned);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldMinOpIsUnsigned>(); }
-
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
- DCHECK(other->IsVecMin());
- const HVecMin* o = other->AsVecMin();
- return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
- }
-
DECLARE_INSTRUCTION(VecMin);
protected:
DEFAULT_COPY_CONSTRUCTOR(VecMin);
-
- private:
- // Additional packed bits.
- static constexpr size_t kFieldMinOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kNumberOfMinOpPackedBits = kFieldMinOpIsUnsigned + 1;
- static_assert(kNumberOfMinOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
// Takes maximum of every component in the two vectors,
// viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecMax FINAL : public HVecBinaryOperation {
public:
HVecMax(ArenaAllocator* allocator,
@@ -691,39 +664,18 @@ class HVecMax FINAL : public HVecBinaryOperation {
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldMaxOpIsUnsigned>(is_unsigned);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldMaxOpIsUnsigned>(); }
-
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
- DCHECK(other->IsVecMax());
- const HVecMax* o = other->AsVecMax();
- return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
- }
-
DECLARE_INSTRUCTION(VecMax);
protected:
DEFAULT_COPY_CONSTRUCTOR(VecMax);
-
- private:
- // Additional packed bits.
- static constexpr size_t kFieldMaxOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kNumberOfMaxOpPackedBits = kFieldMaxOpIsUnsigned + 1;
- static_assert(kNumberOfMaxOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
// Bitwise-ands every component in the two vectors,
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index ab9d7594d9..af13449646 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -282,143 +282,53 @@ TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) {
EXPECT_FALSE(v0->Equals(v1)); // no longer equal
}
-TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
- HVecOperation* p0 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMin* v0 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v1 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v2 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v3 = new (GetAllocator()) HVecMin(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v4 = new (GetAllocator()) HVecMin(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v5 = new (GetAllocator()) HVecMin(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v6 = new (GetAllocator()) HVecMin(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* min_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
- EXPECT_FALSE(p0->CanBeMoved());
- EXPECT_FALSE(p1->CanBeMoved());
- EXPECT_FALSE(p2->CanBeMoved());
-
- for (HVecMin* min_insn : min_insns) {
- EXPECT_TRUE(min_insn->CanBeMoved());
- }
-
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_FALSE(v1->IsUnsigned());
- EXPECT_TRUE(v2->IsUnsigned());
-
- for (HVecMin* min_insn1 : min_insns) {
- for (HVecMin* min_insn2 : min_insns) {
- EXPECT_EQ(min_insn1 == min_insn2, min_insn1->Equals(min_insn2));
- }
- }
-}
-
-TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
- HVecOperation* p0 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMax* v0 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v1 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v2 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v3 = new (GetAllocator()) HVecMax(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v4 = new (GetAllocator()) HVecMax(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v5 = new (GetAllocator()) HVecMax(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v6 = new (GetAllocator()) HVecMax(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* max_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
- EXPECT_FALSE(p0->CanBeMoved());
- EXPECT_FALSE(p1->CanBeMoved());
- EXPECT_FALSE(p2->CanBeMoved());
-
- for (HVecMax* max_insn : max_insns) {
- EXPECT_TRUE(max_insn->CanBeMoved());
- }
-
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_FALSE(v1->IsUnsigned());
- EXPECT_TRUE(v2->IsUnsigned());
-
- for (HVecMax* max_insn1 : max_insns) {
- for (HVecMax* max_insn2 : max_insns) {
- EXPECT_EQ(max_insn1 == max_insn2, max_insn1->Equals(max_insn2));
- }
- }
-}
-
TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
+ HVecOperation* u0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kUint32, 4, kNoDexPc);
+ HVecOperation* u1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kUint16, 8, kNoDexPc);
+ HVecOperation* u2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kUint8, 16, kNoDexPc);
+
HVecOperation* p0 = new (GetAllocator())
HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 8, kNoDexPc);
HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 16, kNoDexPc);
HVecHalvingAdd* v0 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v1 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ false, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v2 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v3 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ false, kNoDexPc);
+
HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2,
- /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ false, kNoDexPc);
+
HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v12 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 };
+ GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ false, kNoDexPc);
+ HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 };
+
+ EXPECT_FALSE(u0->CanBeMoved());
+ EXPECT_FALSE(u1->CanBeMoved());
+ EXPECT_FALSE(u2->CanBeMoved());
EXPECT_FALSE(p0->CanBeMoved());
EXPECT_FALSE(p1->CanBeMoved());
EXPECT_FALSE(p2->CanBeMoved());
@@ -427,26 +337,18 @@ TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
EXPECT_TRUE(hadd_insn->CanBeMoved());
}
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_TRUE(v1->IsUnsigned());
- EXPECT_TRUE(!v2->IsUnsigned());
- EXPECT_TRUE(!v3->IsUnsigned());
- EXPECT_TRUE(v4->IsUnsigned());
-
EXPECT_TRUE(v0->IsRounded());
EXPECT_TRUE(!v1->IsRounded());
EXPECT_TRUE(v2->IsRounded());
EXPECT_TRUE(!v3->IsRounded());
EXPECT_TRUE(v4->IsRounded());
- EXPECT_TRUE(v5->IsRounded());
- EXPECT_TRUE(!v6->IsRounded());
- EXPECT_TRUE(v7->IsRounded());
- EXPECT_TRUE(!v8->IsRounded());
- EXPECT_TRUE(v9->IsRounded());
- EXPECT_TRUE(!v10->IsRounded());
- EXPECT_TRUE(v11->IsRounded());
- EXPECT_TRUE(!v12->IsRounded());
+ EXPECT_TRUE(!v5->IsRounded());
+ EXPECT_TRUE(v6->IsRounded());
+ EXPECT_TRUE(!v7->IsRounded());
+ EXPECT_TRUE(v8->IsRounded());
+ EXPECT_TRUE(!v9->IsRounded());
+ EXPECT_TRUE(v10->IsRounded());
+ EXPECT_TRUE(!v11->IsRounded());
for (HVecHalvingAdd* hadd_insn1 : hadd_insns) {
for (HVecHalvingAdd* hadd_insn2 : hadd_insns) {
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 1d3fe0334d..27f9ac3990 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -103,6 +103,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint
case DataType::Type::kFloat64:
slot += long_spill_slots;
FALLTHROUGH_INTENDED;
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
slot += float_spill_slots;
FALLTHROUGH_INTENDED;
@@ -110,6 +111,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint
slot += int_spill_slots;
FALLTHROUGH_INTENDED;
case DataType::Type::kReference:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
case DataType::Type::kUint16:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index ad5248e982..fa7ad82316 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1972,6 +1972,8 @@ void RegisterAllocatorGraphColor::AllocateSpillSlots(ArrayRef<InterferenceNode*
case DataType::Type::kInt16:
int_intervals.push_back(parent);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected type for interval " << node->GetInterval()->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index cfe63bd758..216fb57a96 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -1131,6 +1131,8 @@ void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) {
case DataType::Type::kInt16:
spill_slots = &int_spill_slots_;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
}