summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_mips.cc16
-rw-r--r--compiler/optimizing/data_type.h2
-rw-r--r--compiler/optimizing/emit_swap_mips_test.cc32
-rw-r--r--compiler/optimizing/instruction_simplifier.cc12
-rw-r--r--compiler/optimizing/loop_optimization.cc53
-rw-r--r--compiler/optimizing/nodes_vector.h16
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc12
7 files changed, 79 insertions, 64 deletions
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 893692a04d..2f65e8c958 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1300,7 +1300,7 @@ void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot
// automatically unspilled when the scratch scope object is destroyed).
ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
// If V0 spills onto the stack, SP-relative offsets need to be adjusted.
- int stack_offset = ensure_scratch.IsSpilled() ? kMipsWordSize : 0;
+ int stack_offset = ensure_scratch.IsSpilled() ? kStackAlignment : 0;
for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
__ LoadFromOffset(kLoadWord,
Register(ensure_scratch.GetRegister()),
@@ -6244,8 +6244,11 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
InvokeRuntimeCallingConvention calling_convention;
__ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
// Do implicit Null check
- __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ locations->GetTemp(0).AsRegister<Register>(),
+ 0,
+ null_checker);
codegen_->InvokeRuntime(kQuickA64Load, instruction, dex_pc);
CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
if (type == DataType::Type::kFloat64) {
@@ -6398,8 +6401,11 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
InvokeRuntimeCallingConvention calling_convention;
__ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
// Do implicit Null check.
- __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ locations->GetTemp(0).AsRegister<Register>(),
+ 0,
+ null_checker);
if (type == DataType::Type::kFloat64) {
// Pass FP parameters in core registers.
if (value_location.IsFpuRegister()) {
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 3b67efe100..75a7fbe6ca 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -123,7 +123,7 @@ class DataType {
}
static bool IsUnsignedType(Type type) {
- return type == Type::kUint8 || type == Type::kUint16;
+ return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16;
}
// Return the general kind of `type`, fusing integer-like types as Type::kInt.
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 36e932c67a..b63914faf7 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -238,14 +238,14 @@ TEST_F(EmitSwapMipsTest, TwoStackSlots) {
DataType::Type::kInt32,
nullptr);
const char* expected =
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $v0, 0($sp)\n"
- "lw $v0, 56($sp)\n"
- "lw $t8, 52($sp)\n"
- "sw $v0, 52($sp)\n"
- "sw $t8, 56($sp)\n"
+ "lw $v0, 68($sp)\n"
+ "lw $t8, 64($sp)\n"
+ "sw $v0, 64($sp)\n"
+ "sw $t8, 68($sp)\n"
"lw $v0, 0($sp)\n"
- "addiu $sp, $sp, 4\n";
+ "addiu $sp, $sp, 16\n";
DriverWrapper(moves_, expected, "TwoStackSlots");
}
@@ -261,18 +261,18 @@ TEST_F(EmitSwapMipsTest, TwoDoubleStackSlots) {
DataType::Type::kInt64,
nullptr);
const char* expected =
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $v0, 0($sp)\n"
- "lw $v0, 60($sp)\n"
- "lw $t8, 52($sp)\n"
- "sw $v0, 52($sp)\n"
- "sw $t8, 60($sp)\n"
- "lw $v0, 64($sp)\n"
- "lw $t8, 56($sp)\n"
- "sw $v0, 56($sp)\n"
- "sw $t8, 64($sp)\n"
+ "lw $v0, 72($sp)\n"
+ "lw $t8, 64($sp)\n"
+ "sw $v0, 64($sp)\n"
+ "sw $t8, 72($sp)\n"
+ "lw $v0, 76($sp)\n"
+ "lw $t8, 68($sp)\n"
+ "sw $v0, 68($sp)\n"
+ "sw $t8, 76($sp)\n"
"lw $v0, 0($sp)\n"
- "addiu $sp, $sp, 4\n";
+ "addiu $sp, $sp, 16\n";
DriverWrapper(moves_, expected, "TwoDoubleStackSlots");
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index d81a752853..189d5aea56 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1044,12 +1044,14 @@ void InstructionSimplifierVisitor::VisitArraySet(HArraySet* instruction) {
}
static bool IsTypeConversionLossless(DataType::Type input_type, DataType::Type result_type) {
+ // Make sure all implicit conversions have been simplified and no new ones have been introduced.
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << "," << result_type;
// The conversion to a larger type is loss-less with the exception of two cases,
// - conversion to the unsigned type Uint16, where we may lose some bits, and
// - conversion from float to long, the only FP to integral conversion with smaller FP type.
// For integral to FP conversions this holds because the FP mantissa is large enough.
// Note: The size check excludes Uint8 as the result type.
- DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type));
return DataType::Size(result_type) > DataType::Size(input_type) &&
result_type != DataType::Type::kUint16 &&
!(result_type == DataType::Type::kInt64 && input_type == DataType::Type::kFloat32);
@@ -1253,7 +1255,10 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) {
if (input_cst != nullptr) {
int64_t value = Int64FromConstant(input_cst);
- if (value == -1) {
+ if (value == -1 ||
+ // Similar cases under zero extension.
+ (DataType::IsUnsignedType(input_other->GetType()) &&
+ ((DataType::MaxValueOfIntegralType(input_other->GetType()) & ~value) == 0))) {
// Replace code looking like
// AND dst, src, 0xFFF...FF
// with
@@ -1332,6 +1337,9 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) {
TryReplaceFieldOrArrayGetType(input_other, new_type)) {
instruction->ReplaceWith(input_other);
instruction->GetBlock()->RemoveInstruction(instruction);
+ } else if (DataType::IsTypeConversionImplicit(input_other->GetType(), new_type)) {
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
} else {
HTypeConversion* type_conversion = new (GetGraph()->GetAllocator()) HTypeConversion(
new_type, input_other, instruction->GetDexPc());
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 69c6b94c6b..6a4faaf438 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -123,7 +123,7 @@ static bool IsSignExtensionAndGet(HInstruction* instruction,
/*out*/ HInstruction** operand) {
// Accept any already wider constant that would be handled properly by sign
// extension when represented in the *width* of the given narrower data type
- // (the fact that Uint16 normally zero extends does not matter here).
+ // (the fact that Uint8/Uint16 normally zero extend does not matter here).
int64_t value = 0;
if (IsInt64AndGet(instruction, /*out*/ &value)) {
switch (type) {
@@ -221,31 +221,6 @@ static bool IsZeroExtensionAndGet(HInstruction* instruction,
return false;
}
}
- // A sign (or zero) extension followed by an explicit removal of just the
- // higher sign bits is equivalent to a zero extension of the underlying operand.
- //
- // TODO: move this into simplifier and use new type system instead.
- //
- if (instruction->IsAnd()) {
- int64_t mask = 0;
- HInstruction* a = instruction->InputAt(0);
- HInstruction* b = instruction->InputAt(1);
- // In (a & b) find (mask & b) or (a & mask) with sign or zero extension on the non-mask.
- if ((IsInt64AndGet(a, /*out*/ &mask) && (IsSignExtensionAndGet(b, type, /*out*/ operand) ||
- IsZeroExtensionAndGet(b, type, /*out*/ operand))) ||
- (IsInt64AndGet(b, /*out*/ &mask) && (IsSignExtensionAndGet(a, type, /*out*/ operand) ||
- IsZeroExtensionAndGet(a, type, /*out*/ operand)))) {
- switch ((*operand)->GetType()) {
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- return mask == std::numeric_limits<uint8_t>::max();
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- return mask == std::numeric_limits<uint16_t>::max();
- default: return false;
- }
- }
- }
// An explicit widening conversion of an unsigned expression zero-extends.
if (instruction->IsTypeConversion()) {
HInstruction* conv = instruction->InputAt(0);
@@ -277,10 +252,15 @@ static bool IsNarrowerOperands(HInstruction* a,
/*out*/ HInstruction** r,
/*out*/ HInstruction** s,
/*out*/ bool* is_unsigned) {
- if (IsSignExtensionAndGet(a, type, r) && IsSignExtensionAndGet(b, type, s)) {
+ // Look for a matching sign extension.
+ DataType::Type stype = HVecOperation::ToSignedType(type);
+ if (IsSignExtensionAndGet(a, stype, r) && IsSignExtensionAndGet(b, stype, s)) {
*is_unsigned = false;
return true;
- } else if (IsZeroExtensionAndGet(a, type, r) && IsZeroExtensionAndGet(b, type, s)) {
+ }
+ // Look for a matching zero extension.
+ DataType::Type utype = HVecOperation::ToUnsignedType(type);
+ if (IsZeroExtensionAndGet(a, utype, r) && IsZeroExtensionAndGet(b, utype, s)) {
*is_unsigned = true;
return true;
}
@@ -292,10 +272,15 @@ static bool IsNarrowerOperand(HInstruction* a,
DataType::Type type,
/*out*/ HInstruction** r,
/*out*/ bool* is_unsigned) {
- if (IsSignExtensionAndGet(a, type, r)) {
+ // Look for a matching sign extension.
+ DataType::Type stype = HVecOperation::ToSignedType(type);
+ if (IsSignExtensionAndGet(a, stype, r)) {
*is_unsigned = false;
return true;
- } else if (IsZeroExtensionAndGet(a, type, r)) {
+ }
+ // Look for a matching zero extension.
+ DataType::Type utype = HVecOperation::ToUnsignedType(type);
+ if (IsZeroExtensionAndGet(a, utype, r)) {
*is_unsigned = true;
return true;
}
@@ -1162,7 +1147,6 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
size_t size_vec = DataType::Size(type);
size_t size_from = DataType::Size(from);
size_t size_to = DataType::Size(to);
- DataType::Type ctype = size_from == size_vec ? from : type;
// Accept an integral conversion
// (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
// (1b) widening from at least vector type, and
@@ -1172,7 +1156,7 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) ||
(size_to >= size_from &&
size_from >= size_vec &&
- VectorizeUse(node, opa, generate_code, ctype, restrictions))) {
+ VectorizeUse(node, opa, generate_code, type, restrictions))) {
if (generate_code) {
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, vector_map_->Get(opa)); // operand pass-through
@@ -1578,12 +1562,13 @@ void HLoopOptimization::GenerateVecMem(HInstruction* org,
// Scalar store or load.
DCHECK(vector_mode_ == kSequential);
if (opb != nullptr) {
+ DataType::Type component_type = org->AsArraySet()->GetComponentType();
vector = new (global_allocator_) HArraySet(
- org->InputAt(0), opa, opb, type, org->GetSideEffects(), dex_pc);
+ org->InputAt(0), opa, opb, component_type, org->GetSideEffects(), dex_pc);
} else {
bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
vector = new (global_allocator_) HArrayGet(
- org->InputAt(0), opa, type, org->GetSideEffects(), dex_pc, is_string_char_at);
+ org->InputAt(0), opa, org->GetType(), org->GetSideEffects(), dex_pc, is_string_char_at);
}
}
vector_map_->Put(org, vector);
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 4e78e4e6a2..17540b9770 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -136,6 +136,20 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
}
+ // Maps an integral type to the same-size unsigned type and leaves other types alone.
+ static DataType::Type ToUnsignedType(DataType::Type type) {
+ switch (type) {
+ case DataType::Type::kBool: // 1-byte storage unit
+ case DataType::Type::kInt8:
+ return DataType::Type::kUint8;
+ case DataType::Type::kInt16:
+ return DataType::Type::kUint16;
+ default:
+ DCHECK(type != DataType::Type::kVoid && type != DataType::Type::kReference) << type;
+ return type;
+ }
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(VecOperation);
protected:
@@ -254,6 +268,8 @@ inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type
}
DCHECK(input->IsVecOperation());
DataType::Type input_type = input->AsVecOperation()->GetPackedType();
+ DCHECK_EQ(HVecOperation::ToUnsignedType(input_type) == HVecOperation::ToUnsignedType(type),
+ HVecOperation::ToSignedType(input_type) == HVecOperation::ToSignedType(type));
return HVecOperation::ToSignedType(input_type) == HVecOperation::ToSignedType(type);
}
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index fde55cb92f..1e82c4b0f7 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -330,10 +330,10 @@ static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
static constexpr uint8_t expected_asm_kMips_adjust_head[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
- 0x08, 0x00, 0x80, 0x14, 0xFC, 0xFF, 0xBD, 0x27,
+ 0x08, 0x00, 0x80, 0x14, 0xF0, 0xFF, 0xBD, 0x27,
0x00, 0x00, 0xBF, 0xAF, 0x00, 0x00, 0x10, 0x04, 0x02, 0x00, 0x01, 0x3C,
0x18, 0x00, 0x21, 0x34, 0x21, 0x08, 0x3F, 0x00, 0x00, 0x00, 0xBF, 0x8F,
- 0x09, 0x00, 0x20, 0x00, 0x04, 0x00, 0xBD, 0x27,
+ 0x09, 0x00, 0x20, 0x00, 0x10, 0x00, 0xBD, 0x27,
};
static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F, 0x34, 0x00, 0xB0, 0x8F,
@@ -342,7 +342,7 @@ static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
};
static constexpr uint8_t expected_cfi_kMips_adjust[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x50, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
+ 0x50, 0x0E, 0x50, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu sp, sp, -64
@@ -356,8 +356,8 @@ static constexpr uint8_t expected_cfi_kMips_adjust[] = {
// 0x00000010: sdc1 f22, +40(sp)
// 0x00000014: sdc1 f20, +32(sp)
// 0x00000018: bnez a0, 0x0000003c ; +36
-// 0x0000001c: addiu sp, sp, -4
-// 0x00000020: .cfi_def_cfa_offset: 68
+// 0x0000001c: addiu sp, sp, -16
+// 0x00000020: .cfi_def_cfa_offset: 80
// 0x00000020: sw ra, +0(sp)
// 0x00000024: nal
// 0x00000028: lui at, 2
@@ -365,7 +365,7 @@ static constexpr uint8_t expected_cfi_kMips_adjust[] = {
// 0x00000030: addu at, at, ra
// 0x00000034: lw ra, +0(sp)
// 0x00000038: jr at
-// 0x0000003c: addiu sp, sp, 4
+// 0x0000003c: addiu sp, sp, 16
// 0x00000040: .cfi_def_cfa_offset: 64
// 0x00000040: nop
// ...