summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc2
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc2
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc2
3 files changed, 3 insertions, 3 deletions
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 6d135a9bfb..43169ba7eb 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -1354,6 +1354,7 @@ void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) {
Register scratch;
switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt.
case DataType::Type::kUint16:
DCHECK_EQ(8u, instruction->GetVectorLength());
// Special handling of compressed/uncompressed string load.
@@ -1385,7 +1386,6 @@ void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kFloat32:
case DataType::Type::kInt64:
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 086ae07a06..2502275b3a 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -1205,6 +1205,7 @@ void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) {
XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>();
bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt.
case DataType::Type::kUint16:
DCHECK_EQ(8u, instruction->GetVectorLength());
// Special handling of compressed/uncompressed string load.
@@ -1232,7 +1233,6 @@ void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64:
DCHECK_LE(2u, instruction->GetVectorLength());
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 4d31ab68d1..4a67dafd8a 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -1178,6 +1178,7 @@ void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) {
XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>();
bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt.
case DataType::Type::kUint16:
DCHECK_EQ(8u, instruction->GetVectorLength());
// Special handling of compressed/uncompressed string load.
@@ -1205,7 +1206,6 @@ void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64:
DCHECK_LE(2u, instruction->GetVectorLength());