Clean up signed/unsigned in vectorizer.

Rationale:
Currently we have some remaining ugliness around signed and unsigned
SIMD operations due to lack of kUint32 and kUint64 in the HIR. By
"softly" introducing these types, ABS/MIN/MAX/HALVING_ADD/SAD_ACCUMULATE
operations can solely rely on the packed data types to distinguish
between signed and unsigned operations. Cleaner, and also allows for
some code removal in the current loop optimizer.

Bug: 72709770

Test: test-art-host test-art-target
Change-Id: I68e4cdfba325f622a7256adbe649735569cab2a3
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1380596..3fd88e3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1889,6 +1889,8 @@
       DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type));
       __ Ldr(dst, src);
       break;
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
   }
@@ -1967,6 +1969,8 @@
         __ Fmov(FPRegister(dst), temp);
         break;
       }
+      case DataType::Type::kUint32:
+      case DataType::Type::kUint64:
       case DataType::Type::kVoid:
         LOG(FATAL) << "Unreachable type " << type;
     }
@@ -1994,6 +1998,8 @@
       DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type));
       __ Str(src, dst);
       break;
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
   }
@@ -2071,6 +2077,8 @@
       }
       break;
     }
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
   }
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 18e7d1c..704a0d3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2650,6 +2650,8 @@
       }
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unexpected parameter type " << type;
       break;
@@ -2665,6 +2667,7 @@
     case DataType::Type::kInt8:
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
+    case DataType::Type::kUint32:
     case DataType::Type::kInt32: {
       return LocationFrom(r0);
     }
@@ -2673,6 +2676,7 @@
       return LocationFrom(s0);
     }
 
+    case DataType::Type::kUint64:
     case DataType::Type::kInt64: {
       return LocationFrom(r0, r1);
     }
@@ -5512,6 +5516,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << field_type;
       UNREACHABLE();
@@ -5756,6 +5762,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << load_type;
       UNREACHABLE();
@@ -6248,6 +6256,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
       UNREACHABLE();
@@ -6537,6 +6547,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << value_type;
       UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 51fb4da..36c9219 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -58,9 +58,11 @@
     case DataType::Type::kInt8:
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
+    case DataType::Type::kUint32:
     case DataType::Type::kInt32:
       return Location::RegisterLocation(V0);
 
+    case DataType::Type::kUint64:
     case DataType::Type::kInt64:
       return Location::RegisterPairLocation(V0, V1);
 
@@ -140,6 +142,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unexpected parameter type " << type;
       break;
@@ -2821,6 +2825,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << instruction->GetType();
       UNREACHABLE();
@@ -3136,6 +3142,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << instruction->GetType();
       UNREACHABLE();
@@ -6320,6 +6328,8 @@
     case DataType::Type::kFloat64:
       load_type = kLoadDoubleword;
       break;
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
       UNREACHABLE();
@@ -6473,6 +6483,8 @@
     case DataType::Type::kFloat64:
       store_type = kStoreDoubleword;
       break;
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
       UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 480b917..6657582 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -55,8 +55,10 @@
     case DataType::Type::kInt8:
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
+    case DataType::Type::kUint32:
     case DataType::Type::kInt32:
     case DataType::Type::kReference:
+    case DataType::Type::kUint64:
     case DataType::Type::kInt64:
       return Location::RegisterLocation(V0);
 
@@ -2408,6 +2410,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << instruction->GetType();
       UNREACHABLE();
@@ -2711,6 +2715,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << instruction->GetType();
       UNREACHABLE();
@@ -4798,6 +4804,8 @@
     case DataType::Type::kReference:
       load_type = kLoadUnsignedWord;
       break;
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
       UNREACHABLE();
@@ -4891,6 +4899,8 @@
     case DataType::Type::kFloat64:
       store_type = kStoreDoubleword;
       break;
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
       UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 152a59c..174efdf 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -606,22 +606,20 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ Smin(dst.V8H(), lhs.V8H(), rhs.V8H());
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
-      } else {
-        __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
-      }
+      __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ Fmin(dst.V4S(), lhs.V4S(), rhs.V4S());
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ Fmin(dst.V2D(), lhs.V2D(), rhs.V2D());
       break;
     default:
@@ -656,22 +654,20 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ Smax(dst.V8H(), lhs.V8H(), rhs.V8H());
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
-      } else {
-        __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
-      }
+      __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ Fmax(dst.V4S(), lhs.V4S(), rhs.V4S());
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ Fmax(dst.V2D(), lhs.V2D(), rhs.V2D());
       break;
     default:
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index cc470dd..7c3155a 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -431,13 +431,13 @@
       DCHECK_EQ(4u, instruction->GetVectorLength());
       __ Vmin(DataTypeValue::S16, dst, lhs, rhs);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(2u, instruction->GetVectorLength());
+      __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
-      } else {
-        __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
-      }
+      __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type";
@@ -471,13 +471,13 @@
       DCHECK_EQ(4u, instruction->GetVectorLength());
       __ Vmax(DataTypeValue::S16, dst, lhs, rhs);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(2u, instruction->GetVectorLength());
+      __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
-      } else {
-        __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
-      }
+      __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type";
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 3cf150a..ed9de96 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -613,32 +613,30 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ Min_sH(dst, lhs, rhs);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ Min_uW(dst, lhs, rhs);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Min_uW(dst, lhs, rhs);
-      } else {
-        __ Min_sW(dst, lhs, rhs);
-      }
+      __ Min_sW(dst, lhs, rhs);
+      break;
+    case DataType::Type::kUint64:
+      DCHECK_EQ(2u, instruction->GetVectorLength());
+      __ Min_uD(dst, lhs, rhs);
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Min_uD(dst, lhs, rhs);
-      } else {
-        __ Min_sD(dst, lhs, rhs);
-      }
+      __ Min_sD(dst, lhs, rhs);
       break;
     // When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
     // TODO: Fix min(x, NaN) cases for float and double.
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ FminW(dst, lhs, rhs);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ FminD(dst, lhs, rhs);
       break;
     default:
@@ -673,32 +671,30 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ Max_sH(dst, lhs, rhs);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ Max_uW(dst, lhs, rhs);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Max_uW(dst, lhs, rhs);
-      } else {
-        __ Max_sW(dst, lhs, rhs);
-      }
+      __ Max_sW(dst, lhs, rhs);
+      break;
+    case DataType::Type::kUint64:
+      DCHECK_EQ(2u, instruction->GetVectorLength());
+      __ Max_uD(dst, lhs, rhs);
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Max_uD(dst, lhs, rhs);
-      } else {
-        __ Max_sD(dst, lhs, rhs);
-      }
+      __ Max_sD(dst, lhs, rhs);
       break;
     // When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
     // TODO: Fix max(x, NaN) cases for float and double.
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ FmaxW(dst, lhs, rhs);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ FmaxD(dst, lhs, rhs);
       break;
     default:
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 2d69533..9ea55ec 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -612,32 +612,30 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ Min_sH(dst, lhs, rhs);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ Min_uW(dst, lhs, rhs);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Min_uW(dst, lhs, rhs);
-      } else {
-        __ Min_sW(dst, lhs, rhs);
-      }
+      __ Min_sW(dst, lhs, rhs);
+      break;
+    case DataType::Type::kUint64:
+      DCHECK_EQ(2u, instruction->GetVectorLength());
+      __ Min_uD(dst, lhs, rhs);
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Min_uD(dst, lhs, rhs);
-      } else {
-        __ Min_sD(dst, lhs, rhs);
-      }
+      __ Min_sD(dst, lhs, rhs);
       break;
     // When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
     // TODO: Fix min(x, NaN) cases for float and double.
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ FminW(dst, lhs, rhs);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ FminD(dst, lhs, rhs);
       break;
     default:
@@ -672,32 +670,30 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ Max_sH(dst, lhs, rhs);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ Max_uW(dst, lhs, rhs);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Max_uW(dst, lhs, rhs);
-      } else {
-        __ Max_sW(dst, lhs, rhs);
-      }
+      __ Max_sW(dst, lhs, rhs);
+      break;
+    case DataType::Type::kUint64:
+      DCHECK_EQ(2u, instruction->GetVectorLength());
+      __ Max_uD(dst, lhs, rhs);
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ Max_uD(dst, lhs, rhs);
-      } else {
-        __ Max_sD(dst, lhs, rhs);
-      }
+      __ Max_sD(dst, lhs, rhs);
       break;
     // When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
     // TODO: Fix max(x, NaN) cases for float and double.
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ FmaxW(dst, lhs, rhs);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ FmaxD(dst, lhs, rhs);
       break;
     default:
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 7b4b85d..f2ffccc 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -640,23 +640,21 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ pminsw(dst, src);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ pminud(dst, src);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ pminud(dst, src);
-      } else {
-        __ pminsd(dst, src);
-      }
+      __ pminsd(dst, src);
       break;
     // Next cases are sloppy wrt 0.0 vs -0.0.
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ minps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ minpd(dst, src);
       break;
     default:
@@ -691,23 +689,21 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ pmaxsw(dst, src);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ pmaxud(dst, src);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ pmaxud(dst, src);
-      } else {
-        __ pmaxsd(dst, src);
-      }
+      __ pmaxsd(dst, src);
       break;
     // Next cases are sloppy wrt 0.0 vs -0.0.
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ maxps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ maxpd(dst, src);
       break;
     default:
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 107030e..e2b0485 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -623,23 +623,21 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ pminsw(dst, src);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ pminud(dst, src);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ pminud(dst, src);
-      } else {
-        __ pminsd(dst, src);
-      }
+      __ pminsd(dst, src);
       break;
     // Next cases are sloppy wrt 0.0 vs -0.0.
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ minps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ minpd(dst, src);
       break;
     default:
@@ -674,23 +672,21 @@
       DCHECK_EQ(8u, instruction->GetVectorLength());
       __ pmaxsw(dst, src);
       break;
+    case DataType::Type::kUint32:
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      __ pmaxud(dst, src);
+      break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->IsUnsigned()) {
-        __ pmaxud(dst, src);
-      } else {
-        __ pmaxsd(dst, src);
-      }
+      __ pmaxsd(dst, src);
       break;
     // Next cases are sloppy wrt 0.0 vs -0.0.
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ maxps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      DCHECK(!instruction->IsUnsigned());
       __ maxpd(dst, src);
       break;
     default:
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index c52c7ff..5fede80 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1134,9 +1134,11 @@
     case DataType::Type::kInt8:
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
+    case DataType::Type::kUint32:
     case DataType::Type::kInt32:
       return Location::RegisterLocation(EAX);
 
+    case DataType::Type::kUint64:
     case DataType::Type::kInt64:
       return Location::RegisterPairLocation(EAX, EDX);
 
@@ -1206,6 +1208,8 @@
       }
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unexpected parameter type " << type;
       break;
@@ -4844,6 +4848,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << load_type;
       UNREACHABLE();
@@ -5017,6 +5023,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << field_type;
       UNREACHABLE();
@@ -5320,6 +5328,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
       UNREACHABLE();
@@ -5571,6 +5581,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << instruction->GetType();
       UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ee5918d..ae35ab5 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2273,7 +2273,9 @@
     case DataType::Type::kInt8:
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
+    case DataType::Type::kUint32:
     case DataType::Type::kInt32:
+    case DataType::Type::kUint64:
     case DataType::Type::kInt64:
       return Location::RegisterLocation(RAX);
 
@@ -2342,6 +2344,8 @@
       }
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unexpected parameter type " << type;
       break;
@@ -4307,6 +4311,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << load_type;
       UNREACHABLE();
@@ -4470,6 +4476,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << field_type;
       UNREACHABLE();
@@ -4763,6 +4771,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << type;
       UNREACHABLE();
@@ -5002,6 +5012,8 @@
       break;
     }
 
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unreachable type " << instruction->GetType();
       UNREACHABLE();
diff --git a/compiler/optimizing/data_type-inl.h b/compiler/optimizing/data_type-inl.h
index e389bad..e2cf7a8 100644
--- a/compiler/optimizing/data_type-inl.h
+++ b/compiler/optimizing/data_type-inl.h
@@ -53,7 +53,9 @@
     case DataType::Type::kInt8: return 'b';       // Java byte (B).
     case DataType::Type::kUint16: return 'c';     // Java char (C).
     case DataType::Type::kInt16: return 's';      // Java short (S).
+    case DataType::Type::kUint32: return 'u';     // Picked 'u' for unsigned.
     case DataType::Type::kInt32: return 'i';      // Java int (I).
+    case DataType::Type::kUint64: return 'w';     // Picked 'w' for long unsigned.
     case DataType::Type::kInt64: return 'j';      // Java long (J).
     case DataType::Type::kFloat32: return 'f';    // Java float (F).
     case DataType::Type::kFloat64: return 'd';    // Java double (D).
diff --git a/compiler/optimizing/data_type.cc b/compiler/optimizing/data_type.cc
index 3c99a76..cb354f4 100644
--- a/compiler/optimizing/data_type.cc
+++ b/compiler/optimizing/data_type.cc
@@ -25,7 +25,9 @@
     "Int8",
     "Uint16",
     "Int16",
+    "Uint32",
     "Int32",
+    "Uint64",
     "Int64",
     "Float32",
     "Float64",
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 548fe28..4a6c914 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -34,7 +34,9 @@
     kInt8,
     kUint16,
     kInt16,
+    kUint32,
     kInt32,
+    kUint64,
     kInt64,
     kFloat32,
     kFloat64,
@@ -55,9 +57,11 @@
       case Type::kUint16:
       case Type::kInt16:
         return 1;
+      case Type::kUint32:
       case Type::kInt32:
       case Type::kFloat32:
         return 2;
+      case Type::kUint64:
       case Type::kInt64:
       case Type::kFloat64:
         return 3;
@@ -80,9 +84,11 @@
       case Type::kUint16:
       case Type::kInt16:
         return 2;
+      case Type::kUint32:
       case Type::kInt32:
       case Type::kFloat32:
         return 4;
+      case Type::kUint64:
       case Type::kInt64:
       case Type::kFloat64:
         return 8;
@@ -107,7 +113,9 @@
       case Type::kInt8:
       case Type::kUint16:
       case Type::kInt16:
+      case Type::kUint32:
       case Type::kInt32:
+      case Type::kUint64:
       case Type::kInt64:
         return true;
       default:
@@ -120,11 +128,12 @@
   }
 
   static bool Is64BitType(Type type) {
-    return type == Type::kInt64 || type == Type::kFloat64;
+    return type == Type::kUint64 || type == Type::kInt64 || type == Type::kFloat64;
   }
 
   static bool IsUnsignedType(Type type) {
-    return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16;
+    return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16 ||
+        type == Type::kUint32 || type == Type::kUint64;
   }
 
   // Return the general kind of `type`, fusing integer-like types as Type::kInt.
@@ -133,10 +142,14 @@
       case Type::kBool:
       case Type::kUint8:
       case Type::kInt8:
-      case Type::kInt16:
       case Type::kUint16:
+      case Type::kInt16:
+      case Type::kUint32:
       case Type::kInt32:
         return Type::kInt32;
+      case Type::kUint64:
+      case Type::kInt64:
+        return Type::kInt64;
       default:
         return type;
     }
@@ -154,8 +167,12 @@
         return std::numeric_limits<uint16_t>::min();
       case Type::kInt16:
         return std::numeric_limits<int16_t>::min();
+      case Type::kUint32:
+        return std::numeric_limits<uint32_t>::min();
       case Type::kInt32:
         return std::numeric_limits<int32_t>::min();
+      case Type::kUint64:
+        return std::numeric_limits<uint64_t>::min();
       case Type::kInt64:
         return std::numeric_limits<int64_t>::min();
       default:
@@ -176,8 +193,12 @@
         return std::numeric_limits<uint16_t>::max();
       case Type::kInt16:
         return std::numeric_limits<int16_t>::max();
+      case Type::kUint32:
+        return std::numeric_limits<uint32_t>::max();
       case Type::kInt32:
         return std::numeric_limits<int32_t>::max();
+      case Type::kUint64:
+        return std::numeric_limits<uint64_t>::max();
       case Type::kInt64:
         return std::numeric_limits<int64_t>::max();
       default:
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 12c6988..6144162 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -533,20 +533,9 @@
 
   void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
     VisitVecBinaryOperation(hadd);
-    StartAttributeStream("unsigned") << std::boolalpha << hadd->IsUnsigned() << std::noboolalpha;
     StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
   }
 
-  void VisitVecMin(HVecMin* min) OVERRIDE {
-    VisitVecBinaryOperation(min);
-    StartAttributeStream("unsigned") << std::boolalpha << min->IsUnsigned() << std::noboolalpha;
-  }
-
-  void VisitVecMax(HVecMax* max) OVERRIDE {
-    VisitVecBinaryOperation(max);
-    StartAttributeStream("unsigned") << std::boolalpha << max->IsUnsigned() << std::noboolalpha;
-  }
-
   void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
     VisitVecOperation(instruction);
     StartAttributeStream("kind") << instruction->GetOpKind();
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 3dc1ef7..8994963 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -30,46 +30,6 @@
 
 namespace art {
 
-// TODO: Clean up the packed type detection so that we have the right type straight away
-// and do not need to go through this normalization.
-static inline void NormalizePackedType(/* inout */ DataType::Type* type,
-                                       /* inout */ bool* is_unsigned) {
-  switch (*type) {
-    case DataType::Type::kBool:
-      DCHECK(!*is_unsigned);
-      break;
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      if (*is_unsigned) {
-        *is_unsigned = false;
-        *type = DataType::Type::kUint8;
-      } else {
-        *type = DataType::Type::kInt8;
-      }
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      if (*is_unsigned) {
-        *is_unsigned = false;
-        *type = DataType::Type::kUint16;
-      } else {
-        *type = DataType::Type::kInt16;
-      }
-      break;
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      // We do not have kUint32 and kUint64 at the moment.
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK(!*is_unsigned);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected type " << *type;
-      UNREACHABLE();
-  }
-}
-
 // Enables vectorization (SIMDization) in the loop optimizer.
 static constexpr bool kEnableVectorization = true;
 
@@ -1362,8 +1322,10 @@
         }
         if (VectorizeUse(node, r, generate_code, type, restrictions)) {
           if (generate_code) {
-            NormalizePackedType(&type, &is_unsigned);
-            GenerateVecOp(instruction, vector_map_->Get(r), nullptr, type);
+            GenerateVecOp(instruction,
+                          vector_map_->Get(r),
+                          nullptr,
+                          HVecOperation::ToProperType(type, is_unsigned));
           }
           return true;
         }
@@ -1865,18 +1827,26 @@
           case Intrinsics::kMathMinLongLong:
           case Intrinsics::kMathMinFloatFloat:
           case Intrinsics::kMathMinDoubleDouble: {
-            NormalizePackedType(&type, &is_unsigned);
             vector = new (global_allocator_)
-                HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+                HVecMin(global_allocator_,
+                        opa,
+                        opb,
+                        HVecOperation::ToProperType(type, is_unsigned),
+                        vector_length_,
+                        dex_pc);
             break;
           }
           case Intrinsics::kMathMaxIntInt:
           case Intrinsics::kMathMaxLongLong:
           case Intrinsics::kMathMaxFloatFloat:
           case Intrinsics::kMathMaxDoubleDouble: {
-            NormalizePackedType(&type, &is_unsigned);
             vector = new (global_allocator_)
-                HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+                HVecMax(global_allocator_,
+                        opa,
+                        opb,
+                        HVecOperation::ToProperType(type, is_unsigned),
+                        vector_length_,
+                        dex_pc);
             break;
           }
           default:
@@ -1987,15 +1957,13 @@
           VectorizeUse(node, s, generate_code, type, restrictions)) {
         if (generate_code) {
           if (vector_mode_ == kVector) {
-            NormalizePackedType(&type, &is_unsigned);
             vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
                 global_allocator_,
                 vector_map_->Get(r),
                 vector_map_->Get(s),
-                type,
+                HVecOperation::ToProperType(type, is_unsigned),
                 vector_length_,
                 is_rounded,
-                is_unsigned,
                 kNoDexPc));
             MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
           } else {
@@ -2086,7 +2054,7 @@
       VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
       VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
     if (generate_code) {
-      NormalizePackedType(&reduction_type, &is_unsigned);
+      reduction_type = HVecOperation::ToProperType(reduction_type, is_unsigned);
       if (vector_mode_ == kVector) {
         vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
             global_allocator_,
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 87dff84..ecabdf3 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -131,8 +131,6 @@
   }
 
   // Maps an integral type to the same-size signed type and leaves other types alone.
-  // Can be used to test relaxed type consistency in which packed same-size integral
-  // types can co-exist, but other type mixes are an error.
   static DataType::Type ToSignedType(DataType::Type type) {
     switch (type) {
       case DataType::Type::kBool:  // 1-byte storage unit
@@ -160,6 +158,11 @@
     }
   }
 
+  // Maps an integral type to the same-size (un)signed type. Leaves other types alone.
+  static DataType::Type ToProperType(DataType::Type type, bool is_unsigned) {
+    return is_unsigned ? ToUnsignedType(type) : ToSignedType(type);
+  }
+
   // Helper method to determine if an instruction returns a SIMD value.
   // TODO: This method is needed until we introduce SIMD as proper type.
   static bool ReturnsSIMDValue(HInstruction* instruction) {
@@ -286,6 +289,8 @@
 };
 
 // Packed type consistency checker ("same vector length" integral types may mix freely).
+// Tests relaxed type consistency in which packed same-size integral types can co-exist,
+// but other type mixes are an error.
 inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type type) {
   if (input->IsPhi()) {
     return input->GetType() == HVecOperation::kSIMDType;  // carries SIMD
@@ -518,7 +523,7 @@
 // Performs halving add on every component in the two vectors, viz.
 // rounded   [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
 // truncated [ x1, .. , xn ] hadd  [ y1, .. , yn ] = [ (x1 + y1)     >> 1, .. , (xn + yn )    >> 1 ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
 class HVecHalvingAdd FINAL : public HVecBinaryOperation {
  public:
   HVecHalvingAdd(ArenaAllocator* allocator,
@@ -527,21 +532,13 @@
                  DataType::Type packed_type,
                  size_t vector_length,
                  bool is_rounded,
-                 bool is_unsigned,
                  uint32_t dex_pc)
       : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
-    // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
-    // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
-    DCHECK(!is_unsigned ||
-           packed_type == DataType::Type::kInt32 ||
-           packed_type == DataType::Type::kInt64) << packed_type;
     DCHECK(HasConsistentPackedTypes(left, packed_type));
     DCHECK(HasConsistentPackedTypes(right, packed_type));
-    SetPackedFlag<kFieldHAddIsUnsigned>(is_unsigned);
     SetPackedFlag<kFieldHAddIsRounded>(is_rounded);
   }
 
-  bool IsUnsigned() const { return GetPackedFlag<kFieldHAddIsUnsigned>(); }
   bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
 
   bool CanBeMoved() const OVERRIDE { return true; }
@@ -549,9 +546,7 @@
   bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
     DCHECK(other->IsVecHalvingAdd());
     const HVecHalvingAdd* o = other->AsVecHalvingAdd();
-    return HVecOperation::InstructionDataEquals(o) &&
-        IsUnsigned() == o->IsUnsigned() &&
-        IsRounded() == o->IsRounded();
+    return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded();
   }
 
   DECLARE_INSTRUCTION(VecHalvingAdd);
@@ -561,8 +556,7 @@
 
  private:
   // Additional packed bits.
-  static constexpr size_t kFieldHAddIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
-  static constexpr size_t kFieldHAddIsRounded = kFieldHAddIsUnsigned + 1;
+  static constexpr size_t kFieldHAddIsRounded = HVecOperation::kNumberOfVectorOpPackedBits;
   static constexpr size_t kNumberOfHAddPackedBits = kFieldHAddIsRounded + 1;
   static_assert(kNumberOfHAddPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
 };
@@ -638,7 +632,7 @@
 
 // Takes minimum of every component in the two vectors,
 // viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
 class HVecMin FINAL : public HVecBinaryOperation {
  public:
   HVecMin(ArenaAllocator* allocator,
@@ -646,44 +640,23 @@
           HInstruction* right,
           DataType::Type packed_type,
           size_t vector_length,
-          bool is_unsigned,
           uint32_t dex_pc)
       : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
-    // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
-    // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
-    DCHECK(!is_unsigned ||
-           packed_type == DataType::Type::kInt32 ||
-           packed_type == DataType::Type::kInt64) << packed_type;
     DCHECK(HasConsistentPackedTypes(left, packed_type));
     DCHECK(HasConsistentPackedTypes(right, packed_type));
-    SetPackedFlag<kFieldMinOpIsUnsigned>(is_unsigned);
   }
 
-  bool IsUnsigned() const { return GetPackedFlag<kFieldMinOpIsUnsigned>(); }
-
   bool CanBeMoved() const OVERRIDE { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
-    DCHECK(other->IsVecMin());
-    const HVecMin* o = other->AsVecMin();
-    return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
-  }
-
   DECLARE_INSTRUCTION(VecMin);
 
  protected:
   DEFAULT_COPY_CONSTRUCTOR(VecMin);
-
- private:
-  // Additional packed bits.
-  static constexpr size_t kFieldMinOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
-  static constexpr size_t kNumberOfMinOpPackedBits = kFieldMinOpIsUnsigned + 1;
-  static_assert(kNumberOfMinOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
 };
 
 // Takes maximum of every component in the two vectors,
 // viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
 class HVecMax FINAL : public HVecBinaryOperation {
  public:
   HVecMax(ArenaAllocator* allocator,
@@ -691,39 +664,18 @@
           HInstruction* right,
           DataType::Type packed_type,
           size_t vector_length,
-          bool is_unsigned,
           uint32_t dex_pc)
       : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
-    // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
-    // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
-    DCHECK(!is_unsigned ||
-           packed_type == DataType::Type::kInt32 ||
-           packed_type == DataType::Type::kInt64) << packed_type;
     DCHECK(HasConsistentPackedTypes(left, packed_type));
     DCHECK(HasConsistentPackedTypes(right, packed_type));
-    SetPackedFlag<kFieldMaxOpIsUnsigned>(is_unsigned);
   }
 
-  bool IsUnsigned() const { return GetPackedFlag<kFieldMaxOpIsUnsigned>(); }
-
   bool CanBeMoved() const OVERRIDE { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
-    DCHECK(other->IsVecMax());
-    const HVecMax* o = other->AsVecMax();
-    return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
-  }
-
   DECLARE_INSTRUCTION(VecMax);
 
  protected:
   DEFAULT_COPY_CONSTRUCTOR(VecMax);
-
- private:
-  // Additional packed bits.
-  static constexpr size_t kFieldMaxOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
-  static constexpr size_t kNumberOfMaxOpPackedBits = kFieldMaxOpIsUnsigned + 1;
-  static_assert(kNumberOfMaxOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
 };
 
 // Bitwise-ands every component in the two vectors,
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index ab9d759..af13449 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -282,143 +282,53 @@
   EXPECT_FALSE(v0->Equals(v1));  // no longer equal
 }
 
-TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
-  HVecOperation* p0 = new (GetAllocator())
-      HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
-  HVecOperation* p1 = new (GetAllocator())
-      HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
-  HVecOperation* p2 = new (GetAllocator())
-      HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
-  HVecMin* v0 = new (GetAllocator()) HVecMin(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
-  HVecMin* v1 = new (GetAllocator()) HVecMin(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
-  HVecMin* v2 = new (GetAllocator()) HVecMin(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
-  HVecMin* v3 = new (GetAllocator()) HVecMin(
-      GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
-  HVecMin* v4 = new (GetAllocator()) HVecMin(
-      GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
-  HVecMin* v5 = new (GetAllocator()) HVecMin(
-      GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
-  HVecMin* v6 = new (GetAllocator()) HVecMin(
-      GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
-  HVecMin* min_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
-  EXPECT_FALSE(p0->CanBeMoved());
-  EXPECT_FALSE(p1->CanBeMoved());
-  EXPECT_FALSE(p2->CanBeMoved());
-
-  for (HVecMin* min_insn : min_insns) {
-    EXPECT_TRUE(min_insn->CanBeMoved());
-  }
-
-  // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
-  EXPECT_TRUE(v0->IsUnsigned());
-  EXPECT_FALSE(v1->IsUnsigned());
-  EXPECT_TRUE(v2->IsUnsigned());
-
-  for (HVecMin* min_insn1 : min_insns) {
-    for (HVecMin* min_insn2 : min_insns) {
-      EXPECT_EQ(min_insn1 == min_insn2, min_insn1->Equals(min_insn2));
-    }
-  }
-}
-
-TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
-  HVecOperation* p0 = new (GetAllocator())
-      HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
-  HVecOperation* p1 = new (GetAllocator())
-      HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
-  HVecOperation* p2 = new (GetAllocator())
-      HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
-  HVecMax* v0 = new (GetAllocator()) HVecMax(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
-  HVecMax* v1 = new (GetAllocator()) HVecMax(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
-  HVecMax* v2 = new (GetAllocator()) HVecMax(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
-  HVecMax* v3 = new (GetAllocator()) HVecMax(
-      GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
-  HVecMax* v4 = new (GetAllocator()) HVecMax(
-      GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
-  HVecMax* v5 = new (GetAllocator()) HVecMax(
-      GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
-  HVecMax* v6 = new (GetAllocator()) HVecMax(
-      GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
-  HVecMax* max_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
-  EXPECT_FALSE(p0->CanBeMoved());
-  EXPECT_FALSE(p1->CanBeMoved());
-  EXPECT_FALSE(p2->CanBeMoved());
-
-  for (HVecMax* max_insn : max_insns) {
-    EXPECT_TRUE(max_insn->CanBeMoved());
-  }
-
-  // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
-  EXPECT_TRUE(v0->IsUnsigned());
-  EXPECT_FALSE(v1->IsUnsigned());
-  EXPECT_TRUE(v2->IsUnsigned());
-
-  for (HVecMax* max_insn1 : max_insns) {
-    for (HVecMax* max_insn2 : max_insns) {
-      EXPECT_EQ(max_insn1 == max_insn2, max_insn1->Equals(max_insn2));
-    }
-  }
-}
-
 TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
+  HVecOperation* u0 = new (GetAllocator())
+      HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kUint32, 4, kNoDexPc);
+  HVecOperation* u1 = new (GetAllocator())
+      HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kUint16, 8, kNoDexPc);
+  HVecOperation* u2 = new (GetAllocator())
+      HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kUint8, 16, kNoDexPc);
+
   HVecOperation* p0 = new (GetAllocator())
       HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
   HVecOperation* p1 = new (GetAllocator())
-      HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+      HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 8, kNoDexPc);
   HVecOperation* p2 = new (GetAllocator())
-      HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+      HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 16, kNoDexPc);
 
   HVecHalvingAdd* v0 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
-      /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+      GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ true, kNoDexPc);
   HVecHalvingAdd* v1 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
-      /*is_rounded*/ false, /*is_unsigned*/ true, kNoDexPc);
+      GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ false, kNoDexPc);
   HVecHalvingAdd* v2 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
-      /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+      GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ true, kNoDexPc);
   HVecHalvingAdd* v3 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
-      /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
-  HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p0, p0, DataType::Type::kInt32, 2,
-      /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
-  HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
-      /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
-  HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
-      /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
-  HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
-      /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
-  HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
-      /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
-  HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
-      /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
-  HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
-      /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
-  HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
-      /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
-  HVecHalvingAdd* v12 = new (GetAllocator()) HVecHalvingAdd(
-      GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
-      /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
-  HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 };
+      GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ false, kNoDexPc);
 
+  HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd(
+      GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ true, kNoDexPc);
+  HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd(
+      GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ false, kNoDexPc);
+  HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd(
+      GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ true, kNoDexPc);
+  HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd(
+      GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ false, kNoDexPc);
+
+  HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd(
+      GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ true, kNoDexPc);
+  HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd(
+      GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ false, kNoDexPc);
+  HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd(
+      GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ true, kNoDexPc);
+  HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd(
+      GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ false, kNoDexPc);
+
+  HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 };
+
+  EXPECT_FALSE(u0->CanBeMoved());
+  EXPECT_FALSE(u1->CanBeMoved());
+  EXPECT_FALSE(u2->CanBeMoved());
   EXPECT_FALSE(p0->CanBeMoved());
   EXPECT_FALSE(p1->CanBeMoved());
   EXPECT_FALSE(p2->CanBeMoved());
@@ -427,26 +337,18 @@
     EXPECT_TRUE(hadd_insn->CanBeMoved());
   }
 
-  // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
-  EXPECT_TRUE(v0->IsUnsigned());
-  EXPECT_TRUE(v1->IsUnsigned());
-  EXPECT_TRUE(!v2->IsUnsigned());
-  EXPECT_TRUE(!v3->IsUnsigned());
-  EXPECT_TRUE(v4->IsUnsigned());
-
   EXPECT_TRUE(v0->IsRounded());
   EXPECT_TRUE(!v1->IsRounded());
   EXPECT_TRUE(v2->IsRounded());
   EXPECT_TRUE(!v3->IsRounded());
   EXPECT_TRUE(v4->IsRounded());
-  EXPECT_TRUE(v5->IsRounded());
-  EXPECT_TRUE(!v6->IsRounded());
-  EXPECT_TRUE(v7->IsRounded());
-  EXPECT_TRUE(!v8->IsRounded());
-  EXPECT_TRUE(v9->IsRounded());
-  EXPECT_TRUE(!v10->IsRounded());
-  EXPECT_TRUE(v11->IsRounded());
-  EXPECT_TRUE(!v12->IsRounded());
+  EXPECT_TRUE(!v5->IsRounded());
+  EXPECT_TRUE(v6->IsRounded());
+  EXPECT_TRUE(!v7->IsRounded());
+  EXPECT_TRUE(v8->IsRounded());
+  EXPECT_TRUE(!v9->IsRounded());
+  EXPECT_TRUE(v10->IsRounded());
+  EXPECT_TRUE(!v11->IsRounded());
 
   for (HVecHalvingAdd* hadd_insn1 : hadd_insns) {
     for (HVecHalvingAdd* hadd_insn2 : hadd_insns) {
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 1d3fe03..27f9ac3 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -103,6 +103,7 @@
         case DataType::Type::kFloat64:
           slot += long_spill_slots;
           FALLTHROUGH_INTENDED;
+        case DataType::Type::kUint64:
         case DataType::Type::kInt64:
           slot += float_spill_slots;
           FALLTHROUGH_INTENDED;
@@ -110,6 +111,7 @@
           slot += int_spill_slots;
           FALLTHROUGH_INTENDED;
         case DataType::Type::kReference:
+        case DataType::Type::kUint32:
         case DataType::Type::kInt32:
         case DataType::Type::kUint16:
         case DataType::Type::kUint8:
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index ad5248e..fa7ad82 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1972,6 +1972,8 @@
         case DataType::Type::kInt16:
           int_intervals.push_back(parent);
           break;
+        case DataType::Type::kUint32:
+        case DataType::Type::kUint64:
         case DataType::Type::kVoid:
           LOG(FATAL) << "Unexpected type for interval " << node->GetInterval()->GetType();
           UNREACHABLE();
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index cfe63bd..216fb57 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -1131,6 +1131,8 @@
     case DataType::Type::kInt16:
       spill_slots = &int_spill_slots_;
       break;
+    case DataType::Type::kUint32:
+    case DataType::Type::kUint64:
     case DataType::Type::kVoid:
       LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
   }
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
index 66343ad..cfa0ae7 100644
--- a/test/651-checker-int-simd-minmax/src/Main.java
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -27,10 +27,10 @@
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>>      outer_loop:none
   //
   /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:d\d+>>  VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                                      loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                                      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Min:d\d+>>  VecMin [<<Get1>>,<<Get2>>] packed_type:Int32 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<Min>>]         loop:<<Loop>>      outer_loop:none
   private static void doitMin(int[] x, int[] y, int[] z) {
     int min = Math.min(x.length, Math.min(y.length, z.length));
     for (int i = 0; i < min; i++) {
@@ -46,10 +46,10 @@
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>>      outer_loop:none
   //
   /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:d\d+>>  VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                                      loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                                      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Max:d\d+>>  VecMax [<<Get1>>,<<Get2>>] packed_type:Int32 loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<Max>>]         loop:<<Loop>>      outer_loop:none
   private static void doitMax(int[] x, int[] y, int[] z) {
     int min = Math.min(x.length, Math.min(y.length, z.length));
     for (int i = 0; i < min; i++) {