summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/jit/jit_compiler.cc2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc8
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc29
-rw-r--r--compiler/optimizing/code_generator_mips.cc78
-rw-r--r--compiler/optimizing/code_generator_mips64.cc76
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc24
-rw-r--r--compiler/optimizing/code_generator_vector_arm_vixl.cc20
-rw-r--r--compiler/optimizing/code_generator_vector_mips.cc44
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc44
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc24
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc24
-rw-r--r--compiler/optimizing/code_generator_x86.cc12
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc12
-rw-r--r--compiler/optimizing/data_type-inl.h2
-rw-r--r--compiler/optimizing/data_type.cc2
-rw-r--r--compiler/optimizing/data_type.h27
-rw-r--r--compiler/optimizing/graph_visualizer.cc11
-rw-r--r--compiler/optimizing/loop_optimization.cc68
-rw-r--r--compiler/optimizing/nodes_vector.h72
-rw-r--r--compiler/optimizing/nodes_vector_test.cc168
-rw-r--r--compiler/optimizing/optimizing_compiler.cc2
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc2
-rw-r--r--compiler/optimizing/register_allocator_graph_color.cc2
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.cc2
24 files changed, 309 insertions, 446 deletions
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 2c62095458..17b94d3bdf 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -76,7 +76,7 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou
const ArrayRef<mirror::Class*> types_array(types, count);
std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array);
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
CreateJITCodeEntry(std::move(elf_file));
}
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1380596ab2..3fd88e3e18 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1889,6 +1889,8 @@ void CodeGeneratorARM64::Load(DataType::Type type,
DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type));
__ Ldr(dst, src);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -1967,6 +1969,8 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
__ Fmov(FPRegister(dst), temp);
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -1994,6 +1998,8 @@ void CodeGeneratorARM64::Store(DataType::Type type,
DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type));
__ Str(src, dst);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -2071,6 +2077,8 @@ void CodeGeneratorARM64::StoreRelease(HInstruction* instruction,
}
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 18e7d1cc46..6d49b32dbc 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2498,8 +2498,23 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
}
if (!skip_overflow_check) {
+ // Using r4 instead of IP saves 2 bytes.
UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
+ vixl32::Register temp;
+ // TODO: Remove this check when R4 is made a callee-save register
+ // in ART compiled code (b/72801708). Currently we need to make
+ // sure r4 is not blocked, e.g. in special purpose
+ // TestCodeGeneratorARMVIXL; also asserting that r4 is available
+ // here.
+ if (!blocked_core_registers_[R4]) {
+ for (vixl32::Register reg : kParameterCoreRegistersVIXL) {
+ DCHECK(!reg.Is(r4));
+ }
+ DCHECK(!kCoreCalleeSaves.Includes(r4));
+ temp = r4;
+ } else {
+ temp = temps.Acquire();
+ }
__ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
// The load must immediately precede RecordPcInfo.
ExactAssemblyScope aas(GetVIXLAssembler(),
@@ -2650,6 +2665,8 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(DataType::Typ
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -2665,6 +2682,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::T
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32: {
return LocationFrom(r0);
}
@@ -2673,6 +2691,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::T
return LocationFrom(s0);
}
+ case DataType::Type::kUint64:
case DataType::Type::kInt64: {
return LocationFrom(r0, r1);
}
@@ -5512,6 +5531,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -5756,6 +5777,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -6248,6 +6271,8 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -6537,6 +6562,8 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << value_type;
UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 51fb4dacfd..855da2b18f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -58,9 +58,11 @@ Location MipsReturnLocation(DataType::Type return_type) {
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
return Location::RegisterLocation(V0);
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterPairLocation(V0, V1);
@@ -140,6 +142,8 @@ Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(DataType::Type t
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -391,7 +395,7 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -2821,6 +2825,8 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -3136,6 +3142,8 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -3275,26 +3283,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -3323,18 +3313,7 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
MipsLabel done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
instruction, is_type_check_slow_path_fatal);
@@ -6320,6 +6299,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
case DataType::Type::kFloat64:
load_type = kLoadDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -6473,6 +6454,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
case DataType::Type::kFloat64:
store_type = kStoreDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -7201,11 +7184,12 @@ void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -7253,13 +7237,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -7267,13 +7253,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
MipsLabel loop;
@@ -7283,7 +7271,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ Bne(out, cls, &loop);
@@ -7292,13 +7280,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
MipsLabel loop, success;
__ Bind(&loop);
@@ -7308,7 +7298,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnez(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ B(&done);
@@ -7318,13 +7308,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
MipsLabel success;
__ Beq(out, cls, &success);
@@ -7334,7 +7326,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 480b9178d2..8a06061c6a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -55,8 +55,10 @@ Location Mips64ReturnLocation(DataType::Type return_type) {
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
case DataType::Type::kReference:
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterLocation(V0);
@@ -350,7 +352,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -2408,6 +2410,8 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2711,6 +2715,8 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2830,26 +2836,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -2878,18 +2866,7 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
Mips64Label done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS64* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
instruction, is_type_check_slow_path_fatal);
@@ -4798,6 +4775,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
case DataType::Type::kReference:
load_type = kLoadUnsignedWord;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -4891,6 +4870,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
case DataType::Type::kFloat64:
store_type = kStoreDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -5518,11 +5499,12 @@ void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -5570,13 +5552,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -5584,13 +5568,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
Mips64Label loop;
@@ -5600,7 +5586,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ Bnec(out, cls, &loop);
@@ -5609,13 +5595,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
Mips64Label loop, success;
__ Bind(&loop);
@@ -5625,7 +5613,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnezc(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ Bc(&done);
@@ -5635,13 +5623,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
Mips64Label success;
__ Beqc(out, cls, &success);
@@ -5651,7 +5641,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 152a59c208..174efdf115 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -606,22 +606,20 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Smin(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
- } else {
- __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
- }
+ __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmin(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmin(dst.V2D(), lhs.V2D(), rhs.V2D());
break;
default:
@@ -656,22 +654,20 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Smax(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
- } else {
- __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
- }
+ __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmax(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmax(dst.V2D(), lhs.V2D(), rhs.V2D());
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index cc470ddb2e..7c3155ab73 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -431,13 +431,13 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(4u, instruction->GetVectorLength());
__ Vmin(DataTypeValue::S16, dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
- } else {
- __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
- }
+ __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -471,13 +471,13 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(4u, instruction->GetVectorLength());
__ Vmax(DataTypeValue::S16, dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
- } else {
- __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
- }
+ __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 3cf150a6b8..ed9de96496 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -613,32 +613,30 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Min_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Min_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uW(dst, lhs, rhs);
- } else {
- __ Min_sW(dst, lhs, rhs);
- }
+ __ Min_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Min_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uD(dst, lhs, rhs);
- } else {
- __ Min_sD(dst, lhs, rhs);
- }
+ __ Min_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
// TODO: Fix min(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminD(dst, lhs, rhs);
break;
default:
@@ -673,32 +671,30 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Max_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Max_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uW(dst, lhs, rhs);
- } else {
- __ Max_sW(dst, lhs, rhs);
- }
+ __ Max_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Max_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uD(dst, lhs, rhs);
- } else {
- __ Max_sD(dst, lhs, rhs);
- }
+ __ Max_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
// TODO: Fix max(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxD(dst, lhs, rhs);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 2d69533f21..9ea55ec8d7 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -612,32 +612,30 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Min_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Min_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uW(dst, lhs, rhs);
- } else {
- __ Min_sW(dst, lhs, rhs);
- }
+ __ Min_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Min_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uD(dst, lhs, rhs);
- } else {
- __ Min_sD(dst, lhs, rhs);
- }
+ __ Min_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
// TODO: Fix min(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminD(dst, lhs, rhs);
break;
default:
@@ -672,32 +670,30 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Max_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Max_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uW(dst, lhs, rhs);
- } else {
- __ Max_sW(dst, lhs, rhs);
- }
+ __ Max_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Max_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uD(dst, lhs, rhs);
- } else {
- __ Max_sD(dst, lhs, rhs);
- }
+ __ Max_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
// TODO: Fix max(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxD(dst, lhs, rhs);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 7b4b85d2fe..f2ffccc887 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -640,23 +640,21 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pminsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pminud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminud(dst, src);
- } else {
- __ pminsd(dst, src);
- }
+ __ pminsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minpd(dst, src);
break;
default:
@@ -691,23 +689,21 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pmaxsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmaxud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxud(dst, src);
- } else {
- __ pmaxsd(dst, src);
- }
+ __ pmaxsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxpd(dst, src);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 107030e6c2..e2b0485f89 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -623,23 +623,21 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pminsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pminud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminud(dst, src);
- } else {
- __ pminsd(dst, src);
- }
+ __ pminsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minpd(dst, src);
break;
default:
@@ -674,23 +672,21 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pmaxsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmaxud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxud(dst, src);
- } else {
- __ pmaxsd(dst, src);
- }
+ __ pmaxsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxpd(dst, src);
break;
default:
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index c52c7ff7f1..5fede80bc7 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1134,9 +1134,11 @@ Location InvokeDexCallingConventionVisitorX86::GetReturnLocation(DataType::Type
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
return Location::RegisterLocation(EAX);
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterPairLocation(EAX, EDX);
@@ -1206,6 +1208,8 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type ty
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -4844,6 +4848,8 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -5017,6 +5023,8 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -5320,6 +5328,8 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -5571,6 +5581,8 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ee5918de71..ae35ab5983 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2273,7 +2273,9 @@ Location InvokeDexCallingConventionVisitorX86_64::GetReturnLocation(DataType::Ty
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterLocation(RAX);
@@ -2342,6 +2344,8 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(DataType::Type
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -4307,6 +4311,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -4470,6 +4476,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -4763,6 +4771,8 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -5002,6 +5012,8 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/data_type-inl.h b/compiler/optimizing/data_type-inl.h
index e389bad3ad..e2cf7a80fe 100644
--- a/compiler/optimizing/data_type-inl.h
+++ b/compiler/optimizing/data_type-inl.h
@@ -53,7 +53,9 @@ constexpr char DataType::TypeId(DataType::Type type) {
case DataType::Type::kInt8: return 'b'; // Java byte (B).
case DataType::Type::kUint16: return 'c'; // Java char (C).
case DataType::Type::kInt16: return 's'; // Java short (S).
+ case DataType::Type::kUint32: return 'u'; // Picked 'u' for unsigned.
case DataType::Type::kInt32: return 'i'; // Java int (I).
+ case DataType::Type::kUint64: return 'w'; // Picked 'w' for long unsigned.
case DataType::Type::kInt64: return 'j'; // Java long (J).
case DataType::Type::kFloat32: return 'f'; // Java float (F).
case DataType::Type::kFloat64: return 'd'; // Java double (D).
diff --git a/compiler/optimizing/data_type.cc b/compiler/optimizing/data_type.cc
index 3c99a76c17..cb354f46cc 100644
--- a/compiler/optimizing/data_type.cc
+++ b/compiler/optimizing/data_type.cc
@@ -25,7 +25,9 @@ static const char* kTypeNames[] = {
"Int8",
"Uint16",
"Int16",
+ "Uint32",
"Int32",
+ "Uint64",
"Int64",
"Float32",
"Float64",
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 548fe28cee..4a6c91459f 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -34,7 +34,9 @@ class DataType {
kInt8,
kUint16,
kInt16,
+ kUint32,
kInt32,
+ kUint64,
kInt64,
kFloat32,
kFloat64,
@@ -55,9 +57,11 @@ class DataType {
case Type::kUint16:
case Type::kInt16:
return 1;
+ case Type::kUint32:
case Type::kInt32:
case Type::kFloat32:
return 2;
+ case Type::kUint64:
case Type::kInt64:
case Type::kFloat64:
return 3;
@@ -80,9 +84,11 @@ class DataType {
case Type::kUint16:
case Type::kInt16:
return 2;
+ case Type::kUint32:
case Type::kInt32:
case Type::kFloat32:
return 4;
+ case Type::kUint64:
case Type::kInt64:
case Type::kFloat64:
return 8;
@@ -107,7 +113,9 @@ class DataType {
case Type::kInt8:
case Type::kUint16:
case Type::kInt16:
+ case Type::kUint32:
case Type::kInt32:
+ case Type::kUint64:
case Type::kInt64:
return true;
default:
@@ -120,11 +128,12 @@ class DataType {
}
static bool Is64BitType(Type type) {
- return type == Type::kInt64 || type == Type::kFloat64;
+ return type == Type::kUint64 || type == Type::kInt64 || type == Type::kFloat64;
}
static bool IsUnsignedType(Type type) {
- return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16;
+ return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16 ||
+ type == Type::kUint32 || type == Type::kUint64;
}
// Return the general kind of `type`, fusing integer-like types as Type::kInt.
@@ -133,10 +142,14 @@ class DataType {
case Type::kBool:
case Type::kUint8:
case Type::kInt8:
- case Type::kInt16:
case Type::kUint16:
+ case Type::kInt16:
+ case Type::kUint32:
case Type::kInt32:
return Type::kInt32;
+ case Type::kUint64:
+ case Type::kInt64:
+ return Type::kInt64;
default:
return type;
}
@@ -154,8 +167,12 @@ class DataType {
return std::numeric_limits<uint16_t>::min();
case Type::kInt16:
return std::numeric_limits<int16_t>::min();
+ case Type::kUint32:
+ return std::numeric_limits<uint32_t>::min();
case Type::kInt32:
return std::numeric_limits<int32_t>::min();
+ case Type::kUint64:
+ return std::numeric_limits<uint64_t>::min();
case Type::kInt64:
return std::numeric_limits<int64_t>::min();
default:
@@ -176,8 +193,12 @@ class DataType {
return std::numeric_limits<uint16_t>::max();
case Type::kInt16:
return std::numeric_limits<int16_t>::max();
+ case Type::kUint32:
+ return std::numeric_limits<uint32_t>::max();
case Type::kInt32:
return std::numeric_limits<int32_t>::max();
+ case Type::kUint64:
+ return std::numeric_limits<uint64_t>::max();
case Type::kInt64:
return std::numeric_limits<int64_t>::max();
default:
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 12c69889ab..6144162f68 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -533,20 +533,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
VisitVecBinaryOperation(hadd);
- StartAttributeStream("unsigned") << std::boolalpha << hadd->IsUnsigned() << std::noboolalpha;
StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
}
- void VisitVecMin(HVecMin* min) OVERRIDE {
- VisitVecBinaryOperation(min);
- StartAttributeStream("unsigned") << std::boolalpha << min->IsUnsigned() << std::noboolalpha;
- }
-
- void VisitVecMax(HVecMax* max) OVERRIDE {
- VisitVecBinaryOperation(max);
- StartAttributeStream("unsigned") << std::boolalpha << max->IsUnsigned() << std::noboolalpha;
- }
-
void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
VisitVecOperation(instruction);
StartAttributeStream("kind") << instruction->GetOpKind();
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 3dc1ef7534..899496328e 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -30,46 +30,6 @@
namespace art {
-// TODO: Clean up the packed type detection so that we have the right type straight away
-// and do not need to go through this normalization.
-static inline void NormalizePackedType(/* inout */ DataType::Type* type,
- /* inout */ bool* is_unsigned) {
- switch (*type) {
- case DataType::Type::kBool:
- DCHECK(!*is_unsigned);
- break;
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- if (*is_unsigned) {
- *is_unsigned = false;
- *type = DataType::Type::kUint8;
- } else {
- *type = DataType::Type::kInt8;
- }
- break;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- if (*is_unsigned) {
- *is_unsigned = false;
- *type = DataType::Type::kUint16;
- } else {
- *type = DataType::Type::kInt16;
- }
- break;
- case DataType::Type::kInt32:
- case DataType::Type::kInt64:
- // We do not have kUint32 and kUint64 at the moment.
- break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- DCHECK(!*is_unsigned);
- break;
- default:
- LOG(FATAL) << "Unexpected type " << *type;
- UNREACHABLE();
- }
-}
-
// Enables vectorization (SIMDization) in the loop optimizer.
static constexpr bool kEnableVectorization = true;
@@ -1362,8 +1322,10 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
}
if (VectorizeUse(node, r, generate_code, type, restrictions)) {
if (generate_code) {
- NormalizePackedType(&type, &is_unsigned);
- GenerateVecOp(instruction, vector_map_->Get(r), nullptr, type);
+ GenerateVecOp(instruction,
+ vector_map_->Get(r),
+ nullptr,
+ HVecOperation::ToProperType(type, is_unsigned));
}
return true;
}
@@ -1865,18 +1827,26 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
case Intrinsics::kMathMinLongLong:
case Intrinsics::kMathMinFloatFloat:
case Intrinsics::kMathMinDoubleDouble: {
- NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+ HVecMin(global_allocator_,
+ opa,
+ opb,
+ HVecOperation::ToProperType(type, is_unsigned),
+ vector_length_,
+ dex_pc);
break;
}
case Intrinsics::kMathMaxIntInt:
case Intrinsics::kMathMaxLongLong:
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMaxDoubleDouble: {
- NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+ HVecMax(global_allocator_,
+ opa,
+ opb,
+ HVecOperation::ToProperType(type, is_unsigned),
+ vector_length_,
+ dex_pc);
break;
}
default:
@@ -1987,15 +1957,13 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
VectorizeUse(node, s, generate_code, type, restrictions)) {
if (generate_code) {
if (vector_mode_ == kVector) {
- NormalizePackedType(&type, &is_unsigned);
vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
global_allocator_,
vector_map_->Get(r),
vector_map_->Get(s),
- type,
+ HVecOperation::ToProperType(type, is_unsigned),
vector_length_,
is_rounded,
- is_unsigned,
kNoDexPc));
MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
} else {
@@ -2086,7 +2054,7 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
if (generate_code) {
- NormalizePackedType(&reduction_type, &is_unsigned);
+ reduction_type = HVecOperation::ToProperType(reduction_type, is_unsigned);
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
global_allocator_,
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 87dff8403b..ecabdf3b76 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -131,8 +131,6 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
// Maps an integral type to the same-size signed type and leaves other types alone.
- // Can be used to test relaxed type consistency in which packed same-size integral
- // types can co-exist, but other type mixes are an error.
static DataType::Type ToSignedType(DataType::Type type) {
switch (type) {
case DataType::Type::kBool: // 1-byte storage unit
@@ -160,6 +158,11 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
}
+ // Maps an integral type to the same-size (un)signed type. Leaves other types alone.
+ static DataType::Type ToProperType(DataType::Type type, bool is_unsigned) {
+ return is_unsigned ? ToUnsignedType(type) : ToSignedType(type);
+ }
+
// Helper method to determine if an instruction returns a SIMD value.
// TODO: This method is needed until we introduce SIMD as proper type.
static bool ReturnsSIMDValue(HInstruction* instruction) {
@@ -286,6 +289,8 @@ class HVecMemoryOperation : public HVecOperation {
};
// Packed type consistency checker ("same vector length" integral types may mix freely).
+// Tests relaxed type consistency in which packed same-size integral types can co-exist,
+// but other type mixes are an error.
inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type type) {
if (input->IsPhi()) {
return input->GetType() == HVecOperation::kSIMDType; // carries SIMD
@@ -518,7 +523,7 @@ class HVecAdd FINAL : public HVecBinaryOperation {
// Performs halving add on every component in the two vectors, viz.
// rounded [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
// truncated [ x1, .. , xn ] hadd [ y1, .. , yn ] = [ (x1 + y1) >> 1, .. , (xn + yn ) >> 1 ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecHalvingAdd FINAL : public HVecBinaryOperation {
public:
HVecHalvingAdd(ArenaAllocator* allocator,
@@ -527,21 +532,13 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
DataType::Type packed_type,
size_t vector_length,
bool is_rounded,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldHAddIsUnsigned>(is_unsigned);
SetPackedFlag<kFieldHAddIsRounded>(is_rounded);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldHAddIsUnsigned>(); }
bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
bool CanBeMoved() const OVERRIDE { return true; }
@@ -549,9 +546,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
DCHECK(other->IsVecHalvingAdd());
const HVecHalvingAdd* o = other->AsVecHalvingAdd();
- return HVecOperation::InstructionDataEquals(o) &&
- IsUnsigned() == o->IsUnsigned() &&
- IsRounded() == o->IsRounded();
+ return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded();
}
DECLARE_INSTRUCTION(VecHalvingAdd);
@@ -561,8 +556,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
private:
// Additional packed bits.
- static constexpr size_t kFieldHAddIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kFieldHAddIsRounded = kFieldHAddIsUnsigned + 1;
+ static constexpr size_t kFieldHAddIsRounded = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfHAddPackedBits = kFieldHAddIsRounded + 1;
static_assert(kNumberOfHAddPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
@@ -638,7 +632,7 @@ class HVecDiv FINAL : public HVecBinaryOperation {
// Takes minimum of every component in the two vectors,
// viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecMin FINAL : public HVecBinaryOperation {
public:
HVecMin(ArenaAllocator* allocator,
@@ -646,44 +640,23 @@ class HVecMin FINAL : public HVecBinaryOperation {
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldMinOpIsUnsigned>(is_unsigned);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldMinOpIsUnsigned>(); }
-
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
- DCHECK(other->IsVecMin());
- const HVecMin* o = other->AsVecMin();
- return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
- }
-
DECLARE_INSTRUCTION(VecMin);
protected:
DEFAULT_COPY_CONSTRUCTOR(VecMin);
-
- private:
- // Additional packed bits.
- static constexpr size_t kFieldMinOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kNumberOfMinOpPackedBits = kFieldMinOpIsUnsigned + 1;
- static_assert(kNumberOfMinOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
// Takes maximum of every component in the two vectors,
// viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecMax FINAL : public HVecBinaryOperation {
public:
HVecMax(ArenaAllocator* allocator,
@@ -691,39 +664,18 @@ class HVecMax FINAL : public HVecBinaryOperation {
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldMaxOpIsUnsigned>(is_unsigned);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldMaxOpIsUnsigned>(); }
-
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
- DCHECK(other->IsVecMax());
- const HVecMax* o = other->AsVecMax();
- return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
- }
-
DECLARE_INSTRUCTION(VecMax);
protected:
DEFAULT_COPY_CONSTRUCTOR(VecMax);
-
- private:
- // Additional packed bits.
- static constexpr size_t kFieldMaxOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kNumberOfMaxOpPackedBits = kFieldMaxOpIsUnsigned + 1;
- static_assert(kNumberOfMaxOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
// Bitwise-ands every component in the two vectors,
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index ab9d7594d9..af13449646 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -282,143 +282,53 @@ TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) {
EXPECT_FALSE(v0->Equals(v1)); // no longer equal
}
-TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
- HVecOperation* p0 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMin* v0 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v1 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v2 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v3 = new (GetAllocator()) HVecMin(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v4 = new (GetAllocator()) HVecMin(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v5 = new (GetAllocator()) HVecMin(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v6 = new (GetAllocator()) HVecMin(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* min_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
- EXPECT_FALSE(p0->CanBeMoved());
- EXPECT_FALSE(p1->CanBeMoved());
- EXPECT_FALSE(p2->CanBeMoved());
-
- for (HVecMin* min_insn : min_insns) {
- EXPECT_TRUE(min_insn->CanBeMoved());
- }
-
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_FALSE(v1->IsUnsigned());
- EXPECT_TRUE(v2->IsUnsigned());
-
- for (HVecMin* min_insn1 : min_insns) {
- for (HVecMin* min_insn2 : min_insns) {
- EXPECT_EQ(min_insn1 == min_insn2, min_insn1->Equals(min_insn2));
- }
- }
-}
-
-TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
- HVecOperation* p0 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMax* v0 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v1 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v2 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v3 = new (GetAllocator()) HVecMax(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v4 = new (GetAllocator()) HVecMax(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v5 = new (GetAllocator()) HVecMax(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v6 = new (GetAllocator()) HVecMax(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* max_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
- EXPECT_FALSE(p0->CanBeMoved());
- EXPECT_FALSE(p1->CanBeMoved());
- EXPECT_FALSE(p2->CanBeMoved());
-
- for (HVecMax* max_insn : max_insns) {
- EXPECT_TRUE(max_insn->CanBeMoved());
- }
-
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_FALSE(v1->IsUnsigned());
- EXPECT_TRUE(v2->IsUnsigned());
-
- for (HVecMax* max_insn1 : max_insns) {
- for (HVecMax* max_insn2 : max_insns) {
- EXPECT_EQ(max_insn1 == max_insn2, max_insn1->Equals(max_insn2));
- }
- }
-}
-
TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
+ HVecOperation* u0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kUint32, 4, kNoDexPc);
+ HVecOperation* u1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kUint16, 8, kNoDexPc);
+ HVecOperation* u2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kUint8, 16, kNoDexPc);
+
HVecOperation* p0 = new (GetAllocator())
HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 8, kNoDexPc);
HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 16, kNoDexPc);
HVecHalvingAdd* v0 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v1 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ false, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v2 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v3 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ false, kNoDexPc);
+
HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2,
- /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ false, kNoDexPc);
+
HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v12 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 };
+ GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ false, kNoDexPc);
+ HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 };
+
+ EXPECT_FALSE(u0->CanBeMoved());
+ EXPECT_FALSE(u1->CanBeMoved());
+ EXPECT_FALSE(u2->CanBeMoved());
EXPECT_FALSE(p0->CanBeMoved());
EXPECT_FALSE(p1->CanBeMoved());
EXPECT_FALSE(p2->CanBeMoved());
@@ -427,26 +337,18 @@ TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
EXPECT_TRUE(hadd_insn->CanBeMoved());
}
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_TRUE(v1->IsUnsigned());
- EXPECT_TRUE(!v2->IsUnsigned());
- EXPECT_TRUE(!v3->IsUnsigned());
- EXPECT_TRUE(v4->IsUnsigned());
-
EXPECT_TRUE(v0->IsRounded());
EXPECT_TRUE(!v1->IsRounded());
EXPECT_TRUE(v2->IsRounded());
EXPECT_TRUE(!v3->IsRounded());
EXPECT_TRUE(v4->IsRounded());
- EXPECT_TRUE(v5->IsRounded());
- EXPECT_TRUE(!v6->IsRounded());
- EXPECT_TRUE(v7->IsRounded());
- EXPECT_TRUE(!v8->IsRounded());
- EXPECT_TRUE(v9->IsRounded());
- EXPECT_TRUE(!v10->IsRounded());
- EXPECT_TRUE(v11->IsRounded());
- EXPECT_TRUE(!v12->IsRounded());
+ EXPECT_TRUE(!v5->IsRounded());
+ EXPECT_TRUE(v6->IsRounded());
+ EXPECT_TRUE(!v7->IsRounded());
+ EXPECT_TRUE(v8->IsRounded());
+ EXPECT_TRUE(!v9->IsRounded());
+ EXPECT_TRUE(v10->IsRounded());
+ EXPECT_TRUE(!v11->IsRounded());
for (HVecHalvingAdd* hadd_insn1 : hadd_insns) {
for (HVecHalvingAdd* hadd_insn2 : hadd_insns) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 47ef194574..b3f23a0dcd 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1410,7 +1410,7 @@ void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDe
GetCompilerDriver()->GetInstructionSetFeatures(),
mini_debug_info,
ArrayRef<const debug::MethodDebugInfo>(&info, 1));
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
JITCodeEntry* entry = CreateJITCodeEntry(elf_file);
IncrementJITCodeEntryRefcount(entry, info.code_address);
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 1d3fe0334d..27f9ac3990 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -103,6 +103,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint
case DataType::Type::kFloat64:
slot += long_spill_slots;
FALLTHROUGH_INTENDED;
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
slot += float_spill_slots;
FALLTHROUGH_INTENDED;
@@ -110,6 +111,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint
slot += int_spill_slots;
FALLTHROUGH_INTENDED;
case DataType::Type::kReference:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
case DataType::Type::kUint16:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index ad5248e982..fa7ad82316 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1972,6 +1972,8 @@ void RegisterAllocatorGraphColor::AllocateSpillSlots(ArrayRef<InterferenceNode*
case DataType::Type::kInt16:
int_intervals.push_back(parent);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected type for interval " << node->GetInterval()->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index cfe63bd758..216fb57a96 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -1131,6 +1131,8 @@ void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) {
case DataType::Type::kInt16:
spill_slots = &int_spill_slots_;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
}