summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/boolean_simplifier.cc31
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc27
-rw-r--r--compiler/optimizing/builder.cc21
-rw-r--r--compiler/optimizing/code_generator.cc15
-rw-r--r--compiler/optimizing/code_generator_arm.cc136
-rw-r--r--compiler/optimizing/code_generator_arm64.cc59
-rw-r--r--compiler/optimizing/code_generator_mips.cc4186
-rw-r--r--compiler/optimizing/code_generator_mips.h362
-rw-r--r--compiler/optimizing/code_generator_mips64.cc110
-rw-r--r--compiler/optimizing/code_generator_mips64.h3
-rw-r--r--compiler/optimizing/code_generator_x86.cc366
-rw-r--r--compiler/optimizing/code_generator_x86.h7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc244
-rw-r--r--compiler/optimizing/code_generator_x86_64.h7
-rw-r--r--compiler/optimizing/codegen_test.cc143
-rw-r--r--compiler/optimizing/common_arm64.h4
-rw-r--r--compiler/optimizing/constant_area_fixups_x86.cc132
-rw-r--r--compiler/optimizing/constant_folding_test.cc2
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc2
-rw-r--r--compiler/optimizing/gvn_test.cc20
-rw-r--r--compiler/optimizing/induction_var_analysis.cc7
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc34
-rw-r--r--compiler/optimizing/induction_var_range.cc167
-rw-r--r--compiler/optimizing/induction_var_range.h48
-rw-r--r--compiler/optimizing/induction_var_range_test.cc140
-rw-r--r--compiler/optimizing/inliner.cc3
-rw-r--r--compiler/optimizing/instruction_simplifier.cc19
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc779
-rw-r--r--compiler/optimizing/intrinsics_x86.cc8
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc8
-rw-r--r--compiler/optimizing/licm_test.cc2
-rw-r--r--compiler/optimizing/nodes.cc59
-rw-r--r--compiler/optimizing/nodes.h251
-rw-r--r--compiler/optimizing/nodes_test.cc15
-rw-r--r--compiler/optimizing/nodes_x86.h39
-rw-r--r--compiler/optimizing/optimizing_compiler.cc30
-rw-r--r--compiler/optimizing/reference_type_propagation.cc23
-rw-r--r--compiler/optimizing/register_allocator.cc9
-rw-r--r--compiler/optimizing/register_allocator_test.cc27
39 files changed, 6981 insertions, 564 deletions
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index 5b346872b0..f985745e7a 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -69,19 +69,17 @@ static HInstruction* GetOppositeCondition(HInstruction* cond) {
if (cond->IsCondition()) {
HInstruction* lhs = cond->InputAt(0);
HInstruction* rhs = cond->InputAt(1);
- if (cond->IsEqual()) {
- return new (allocator) HNotEqual(lhs, rhs);
- } else if (cond->IsNotEqual()) {
- return new (allocator) HEqual(lhs, rhs);
- } else if (cond->IsLessThan()) {
- return new (allocator) HGreaterThanOrEqual(lhs, rhs);
- } else if (cond->IsLessThanOrEqual()) {
- return new (allocator) HGreaterThan(lhs, rhs);
- } else if (cond->IsGreaterThan()) {
- return new (allocator) HLessThanOrEqual(lhs, rhs);
- } else {
- DCHECK(cond->IsGreaterThanOrEqual());
- return new (allocator) HLessThan(lhs, rhs);
+ switch (cond->AsCondition()->GetOppositeCondition()) { // get *opposite*
+ case kCondEQ: return new (allocator) HEqual(lhs, rhs);
+ case kCondNE: return new (allocator) HNotEqual(lhs, rhs);
+ case kCondLT: return new (allocator) HLessThan(lhs, rhs);
+ case kCondLE: return new (allocator) HLessThanOrEqual(lhs, rhs);
+ case kCondGT: return new (allocator) HGreaterThan(lhs, rhs);
+ case kCondGE: return new (allocator) HGreaterThanOrEqual(lhs, rhs);
+ case kCondB: return new (allocator) HBelow(lhs, rhs);
+ case kCondBE: return new (allocator) HBelowOrEqual(lhs, rhs);
+ case kCondA: return new (allocator) HAbove(lhs, rhs);
+ case kCondAE: return new (allocator) HAboveOrEqual(lhs, rhs);
}
} else if (cond->IsIntConstant()) {
HIntConstant* int_const = cond->AsIntConstant();
@@ -91,11 +89,10 @@ static HInstruction* GetOppositeCondition(HInstruction* cond) {
DCHECK(int_const->IsOne());
return graph->GetIntConstant(0);
}
- } else {
- // General case when 'cond' is another instruction of type boolean,
- // as verified by SSAChecker.
- return new (allocator) HBooleanNot(cond);
}
+ // General case when 'cond' is another instruction of type boolean,
+ // as verified by SSAChecker.
+ return new (allocator) HBooleanNot(cond);
}
void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index ce6dc75741..c9afdf2147 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -71,9 +71,9 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
HInstruction* parameter1 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot); // array
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimInt); // i
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -168,9 +168,9 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
HInstruction* parameter1 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot); // array
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimInt); // i
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -232,9 +232,9 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
HInstruction* parameter1 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot); // array
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimInt); // i
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -295,7 +295,8 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_5 = graph_->GetIntConstant(5);
@@ -363,7 +364,8 @@ static HInstruction* BuildSSAGraph1(HGraph* graph,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_initial = graph->GetIntConstant(initial);
@@ -477,7 +479,8 @@ static HInstruction* BuildSSAGraph2(HGraph *graph,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_initial = graph->GetIntConstant(initial);
@@ -689,7 +692,8 @@ static HInstruction* BuildSSAGraph4(HGraph* graph,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_initial = graph->GetIntConstant(initial);
@@ -791,7 +795,8 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_0 = graph_->GetIntConstant(0);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 21540e8ed7..5dd5be3259 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -159,9 +159,13 @@ void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
int locals_index = locals_.size() - number_of_parameters;
int parameter_index = 0;
+ const DexFile::MethodId& referrer_method_id =
+ dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
- HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++,
+ HParameterValue* parameter = new (arena_) HParameterValue(*dex_file_,
+ referrer_method_id.class_idx_,
+ parameter_index++,
Primitive::kPrimNot,
true);
entry_block_->AddInstruction(parameter);
@@ -170,11 +174,16 @@ void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
number_of_parameters--;
}
- uint32_t pos = 1;
- for (int i = 0; i < number_of_parameters; i++) {
- HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++,
- Primitive::GetType(shorty[pos++]),
- false);
+ const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
+ const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
+ for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
+ HParameterValue* parameter = new (arena_) HParameterValue(
+ *dex_file_,
+ arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
+ parameter_index++,
+ Primitive::GetType(shorty[shorty_pos]),
+ false);
+ ++shorty_pos;
entry_block_->AddInstruction(parameter);
HLocal* local = GetLocalAt(locals_index++);
// Store the parameter value in the local that the dex code will use
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6a743ebbc9..1c62dfa859 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -32,6 +32,10 @@
#include "code_generator_x86_64.h"
#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+#include "code_generator_mips.h"
+#endif
+
#ifdef ART_ENABLE_CODEGEN_mips64
#include "code_generator_mips64.h"
#endif
@@ -742,11 +746,12 @@ CodeGenerator* CodeGenerator::Create(HGraph* graph,
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
- UNUSED(compiler_options);
- UNUSED(graph);
- UNUSED(isa_features);
- return nullptr;
+ case kMips: {
+ return new mips::CodeGeneratorMIPS(graph,
+ *isa_features.AsMipsInstructionSetFeatures(),
+ compiler_options,
+ stats);
+ }
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 8c1820b6cd..92a5878476 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -409,7 +409,7 @@ class ArraySetSlowPathARM : public SlowPathCode {
#undef __
#define __ down_cast<ArmAssembler*>(GetAssembler())->
-inline Condition ARMSignedOrFPCondition(IfCondition cond) {
+inline Condition ARMCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
@@ -417,19 +417,30 @@ inline Condition ARMSignedOrFPCondition(IfCondition cond) {
case kCondLE: return LE;
case kCondGT: return GT;
case kCondGE: return GE;
+ case kCondB: return LO;
+ case kCondBE: return LS;
+ case kCondA: return HI;
+ case kCondAE: return HS;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
+// Maps signed condition to unsigned condition.
inline Condition ARMUnsignedCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
+ // Signed to unsigned.
case kCondLT: return LO;
case kCondLE: return LS;
case kCondGT: return HI;
case kCondGE: return HS;
+ // Unsigned remain unchanged.
+ case kCondB: return LO;
+ case kCondBE: return LS;
+ case kCondA: return HI;
+ case kCondAE: return HS;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -1130,8 +1141,7 @@ void LocationsBuilderARM::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
@@ -1149,12 +1159,13 @@ void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
__ vmstat(); // transfer FP status register to ARM APSR.
+ // TODO: merge into a single branch (except "equal or unordered" and "not equal")
if (cond->IsFPConditionTrueIfNaN()) {
__ b(true_label, VS); // VS for unordered.
} else if (cond->IsFPConditionFalseIfNaN()) {
__ b(false_label, VS); // VS for unordered.
}
- __ b(true_label, ARMSignedOrFPCondition(cond->GetCondition()));
+ __ b(true_label, ARMCondition(cond->GetCondition()));
}
void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
@@ -1169,10 +1180,11 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
Register left_low = left.AsRegisterPairLow<Register>();
IfCondition true_high_cond = if_cond;
IfCondition false_high_cond = cond->GetOppositeCondition();
- Condition final_condition = ARMUnsignedCondition(if_cond);
+ Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
+ // TODO: consider avoiding jumps with temporary and CMP low+SBC high
switch (if_cond) {
case kCondEQ:
case kCondNE:
@@ -1190,6 +1202,18 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
case kCondGE:
true_high_cond = kCondGT;
break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
}
if (right.IsConstant()) {
int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
@@ -1198,12 +1222,12 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
GenerateCompareWithImmediate(left_high, val_high);
if (if_cond == kCondNE) {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
} else {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
GenerateCompareWithImmediate(left_low, val_low);
@@ -1213,17 +1237,18 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
__ cmp(left_high, ShifterOperand(right_high));
if (if_cond == kCondNE) {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
} else {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
__ cmp(left_low, ShifterOperand(right_low));
}
// The last comparison might be unsigned.
+ // TODO: optimize cases where this is always true/false
__ b(true_label, final_condition);
}
@@ -1315,7 +1340,7 @@ void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instructio
DCHECK(right.IsConstant());
GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- __ b(true_target, ARMSignedOrFPCondition(cond->AsCondition()->GetCondition()));
+ __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
}
}
if (false_target != nullptr) {
@@ -1351,8 +1376,7 @@ void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
@@ -1417,11 +1441,11 @@ void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
GenerateCompareWithImmediate(left.AsRegister<Register>(),
CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- __ it(ARMSignedOrFPCondition(cond->GetCondition()), kItElse);
+ __ it(ARMCondition(cond->GetCondition()), kItElse);
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
- ARMSignedOrFPCondition(cond->GetCondition()));
+ ARMCondition(cond->GetCondition()));
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
- ARMSignedOrFPCondition(cond->GetOppositeCondition()));
+ ARMCondition(cond->GetOppositeCondition()));
return;
}
case Primitive::kPrimLong:
@@ -1500,6 +1524,38 @@ void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* c
VisitCondition(comp);
}
+void LocationsBuilderARM::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderARM::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderARM::VisitLocal(HLocal* local) {
local->SetLocations(nullptr);
}
@@ -1512,9 +1568,8 @@ void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
load->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
@@ -1541,8 +1596,7 @@ void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -1551,9 +1605,8 @@ void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
@@ -1562,9 +1615,8 @@ void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -1573,9 +1625,8 @@ void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
@@ -1584,9 +1635,8 @@ void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1595,9 +1645,8 @@ void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1612,8 +1661,7 @@ void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -1623,8 +1671,7 @@ void LocationsBuilderARM::VisitReturn(HReturn* ret) {
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
-void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3270,8 +3317,7 @@ void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -4232,13 +4278,11 @@ void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -5286,15 +5330,13 @@ Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_m
return DeduplicateMethodLiteral(target_method, &call_patches_);
}
-void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c94da86d2c..f68b11b504 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -77,6 +77,10 @@ inline Condition ARM64Condition(IfCondition cond) {
case kCondLE: return le;
case kCondGT: return gt;
case kCondGE: return ge;
+ case kCondB: return lo;
+ case kCondBE: return ls;
+ case kCondA: return hi;
+ case kCondAE: return hs;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -1326,8 +1330,7 @@ enum UnimplementedInstructionBreakCode {
};
#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
- void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
- UNUSED(instr); \
+ void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \
__ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
} \
void LocationsBuilderARM64::Visit##name(H##name* instr) { \
@@ -1937,7 +1940,11 @@ void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
M(LessThan) \
M(LessThanOrEqual) \
M(GreaterThan) \
- M(GreaterThanOrEqual)
+ M(GreaterThanOrEqual) \
+ M(Below) \
+ M(BelowOrEqual) \
+ M(Above) \
+ M(AboveOrEqual)
#define DEFINE_CONDITION_VISITORS(Name) \
void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \
void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
@@ -2175,8 +2182,8 @@ void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
- UNUSED(constant);
+void InstructionCodeGeneratorARM64::VisitDoubleConstant(
+ HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
@@ -2184,8 +2191,7 @@ void LocationsBuilderARM64::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
@@ -2194,8 +2200,7 @@ void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
- UNUSED(constant);
+void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
@@ -2348,8 +2353,7 @@ void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
@@ -2682,9 +2686,8 @@ void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
@@ -2692,9 +2695,8 @@ void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
@@ -3085,9 +3087,8 @@ void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
load->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
@@ -3124,9 +3125,8 @@ void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
@@ -3393,8 +3393,7 @@ void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -3464,8 +3463,7 @@ void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
-void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3473,8 +3471,7 @@ void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
instruction->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3518,8 +3515,7 @@ void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM64::VisitSub(HSub* instruction) {
@@ -3636,9 +3632,8 @@ void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -3737,15 +3732,13 @@ void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
HandleBinaryOp(instruction);
}
-void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
new file mode 100644
index 0000000000..4404aa3289
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -0,0 +1,4186 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_mips.h"
+
+#include "arch/mips/entrypoints_direct_mips.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "art_method.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "gc/accounting/card_table.h"
+#include "intrinsics.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "offsets.h"
+#include "thread.h"
+#include "utils/assembler.h"
+#include "utils/mips/assembler_mips.h"
+#include "utils/stack_checks.h"
+
+namespace art {
+namespace mips {
+
+static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = A0;
+
+// We need extra temporary/scratch registers (in addition to AT) in some cases.
+static constexpr Register TMP = T8;
+static constexpr FRegister FTMP = F8;
+
+// ART Thread Register.
+static constexpr Register TR = S1;
+
+Location MipsReturnLocation(Primitive::Type return_type) {
+ switch (return_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ return Location::RegisterLocation(V0);
+
+ case Primitive::kPrimLong:
+ return Location::RegisterPairLocation(V0, V1);
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ return Location::FpuRegisterLocation(F0);
+
+ case Primitive::kPrimVoid:
+ return Location();
+ }
+ UNREACHABLE();
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(Primitive::Type type) const {
+ return MipsReturnLocation(type);
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetMethodLocation() const {
+ return Location::RegisterLocation(kMethodRegisterArgument);
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(Primitive::Type type) {
+ Location next_location;
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ uint32_t gp_index = gp_index_++;
+ if (gp_index < calling_convention.GetNumberOfRegisters()) {
+ next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index));
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Location::StackSlot(stack_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t gp_index = gp_index_;
+ gp_index_ += 2;
+ if (gp_index + 1 < calling_convention.GetNumberOfRegisters()) {
+ if (calling_convention.GetRegisterAt(gp_index) == A1) {
+ gp_index_++; // Skip A1, and use A2_A3 instead.
+ gp_index++;
+ }
+ Register low_even = calling_convention.GetRegisterAt(gp_index);
+ Register high_odd = calling_convention.GetRegisterAt(gp_index + 1);
+ DCHECK_EQ(low_even + 1, high_odd);
+ next_location = Location::RegisterPairLocation(low_even, high_odd);
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Location::DoubleStackSlot(stack_offset);
+ }
+ break;
+ }
+
+ // Note: both float and double types are stored in even FPU registers. On 32 bit FPU, double
+ // will take up the even/odd pair, while floats are stored in even regs only.
+ // On 64 bit FPU, both double and float are stored in even registers only.
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ uint32_t float_index = float_index_++;
+ if (float_index < calling_convention.GetNumberOfFpuRegisters()) {
+ next_location = Location::FpuRegisterLocation(
+ calling_convention.GetFpuRegisterAt(float_index));
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
+ : Location::StackSlot(stack_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected parameter type " << type;
+ break;
+ }
+
+ // Space on the stack is reserved for all arguments.
+ stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
+
+ return next_location;
+}
+
+Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
+ return MipsReturnLocation(type);
+}
+
+#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+
+class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(locations->InAt(0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimInt,
+ locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt);
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickThrowArrayBounds));
+ CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
+
+ private:
+ HBoundsCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
+};
+
+class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickThrowDivZero));
+ CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
+};
+
+class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ LoadClassSlowPathMIPS(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
+
+ int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType);
+ bool direct = do_clinit_ ? IsDirectEntrypoint(kQuickInitializeStaticStorage)
+ : IsDirectEntrypoint(kQuickInitializeType);
+
+ mips_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this, direct);
+ if (do_clinit_) {
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ } else {
+ CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ }
+
+ // Move the class to the desired location.
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ Primitive::Type type = at_->GetType();
+ mips_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
+ }
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
+};
+
+class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit LoadStringSlowPathMIPS(HLoadString* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickResolveString));
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+ Primitive::Type type = instruction_->GetType();
+ mips_codegen->MoveLocation(locations->Out(),
+ calling_convention.GetReturnLocation(type),
+ type);
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
+};
+
+class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit NullCheckSlowPathMIPS(HNullCheck* instr) : instruction_(instr) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickThrowNullPointer));
+ CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
+
+ private:
+ HNullCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
+};
+
+class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
+ : instruction_(instruction), successor_(successor) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickTestSuspend));
+ CheckEntrypointTypes<kQuickTestSuspend, void, void>();
+ RestoreLiveRegisters(codegen, instruction_->GetLocations());
+ if (successor_ == nullptr) {
+ __ B(GetReturnLabel());
+ } else {
+ __ B(mips_codegen->GetLabelOf(successor_));
+ }
+ }
+
+ MipsLabel* GetReturnLabel() {
+ DCHECK(successor_ == nullptr);
+ return &return_label_;
+ }
+
+ const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+
+ private:
+ HSuspendCheck* const instruction_;
+ // If not null, the block to branch to after the suspend check.
+ HBasicBlock* const successor_;
+
+ // If `successor_` is null, the label to branch to after the suspend check.
+ MipsLabel return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS);
+};
+
+class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit TypeCheckSlowPathMIPS(HInstruction* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
+ uint32_t dex_pc = instruction_->GetDexPc();
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ object_class,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
+
+ if (instruction_->IsInstanceOf()) {
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ dex_pc,
+ this,
+ IsDirectEntrypoint(kQuickInstanceofNonTrivial));
+ Primitive::Type ret_type = instruction_->GetType();
+ Location ret_loc = calling_convention.GetReturnLocation(ret_type);
+ mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial,
+ uint32_t,
+ const mirror::Class*,
+ const mirror::Class*>();
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction_,
+ dex_pc,
+ this,
+ IsDirectEntrypoint(kQuickCheckCast));
+ CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ }
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
+
+ private:
+ HInstruction* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
+};
+
+class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit DeoptimizationSlowPathMIPS(HInstruction* instruction)
+ : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ DCHECK(instruction_->IsDeoptimize());
+ HDeoptimize* deoptimize = instruction_->AsDeoptimize();
+ uint32_t dex_pc = deoptimize->GetDexPc();
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ dex_pc,
+ this,
+ IsDirectEntrypoint(kQuickDeoptimize));
+ }
+
+ const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
+
+ private:
+ HInstruction* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
+};
+
+CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
+ const MipsInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
+ : CodeGenerator(graph,
+ kNumberOfCoreRegisters,
+ kNumberOfFRegisters,
+ kNumberOfRegisterPairs,
+ ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
+ arraysize(kCoreCalleeSaves)),
+ ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
+ arraysize(kFpuCalleeSaves)),
+ compiler_options,
+ stats),
+ block_labels_(nullptr),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this),
+ move_resolver_(graph->GetArena(), this),
+ assembler_(&isa_features),
+ isa_features_(isa_features) {
+ // Save RA (containing the return address) to mimic Quick.
+ AddAllocatedRegister(Location::RegisterLocation(RA));
+}
+
+#undef __
+#define __ down_cast<MipsAssembler*>(GetAssembler())->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+
+void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
+ // Ensure that we fix up branches.
+ __ FinalizeCode();
+
+ // Adjust native pc offsets in stack maps.
+ for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t new_position = __ GetAdjustedPosition(old_position);
+ DCHECK_GE(new_position, old_position);
+ stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+ }
+
+ // Adjust pc offsets for the disassembly information.
+ if (disasm_info_ != nullptr) {
+ GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
+ frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
+ frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
+ for (auto& it : *disasm_info_->GetInstructionIntervals()) {
+ it.second.start = __ GetAdjustedPosition(it.second.start);
+ it.second.end = __ GetAdjustedPosition(it.second.end);
+ }
+ for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
+ it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
+ it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
+ }
+ }
+
+ CodeGenerator::Finalize(allocator);
+}
+
+MipsAssembler* ParallelMoveResolverMIPS::GetAssembler() const {
+ return codegen_->GetAssembler();
+}
+
+void ParallelMoveResolverMIPS::EmitMove(size_t index) {
+ DCHECK_LT(index, moves_.size());
+ MoveOperands* move = moves_[index];
+ codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
+}
+
+void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
+ DCHECK_LT(index, moves_.size());
+ MoveOperands* move = moves_[index];
+ Primitive::Type type = move->GetType();
+ Location loc1 = move->GetDestination();
+ Location loc2 = move->GetSource();
+
+ DCHECK(!loc1.IsConstant());
+ DCHECK(!loc2.IsConstant());
+
+ if (loc1.Equals(loc2)) {
+ return;
+ }
+
+ if (loc1.IsRegister() && loc2.IsRegister()) {
+ // Swap 2 GPRs.
+ Register r1 = loc1.AsRegister<Register>();
+ Register r2 = loc2.AsRegister<Register>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ } else if (loc1.IsFpuRegister() && loc2.IsFpuRegister()) {
+ FRegister f1 = loc1.AsFpuRegister<FRegister>();
+ FRegister f2 = loc2.AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ MovS(FTMP, f2);
+ __ MovS(f2, f1);
+ __ MovS(f1, FTMP);
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ MovD(FTMP, f2);
+ __ MovD(f2, f1);
+ __ MovD(f1, FTMP);
+ }
+ } else if ((loc1.IsRegister() && loc2.IsFpuRegister()) ||
+ (loc1.IsFpuRegister() && loc2.IsRegister())) {
+ // Swap FPR and GPR.
+ DCHECK_EQ(type, Primitive::kPrimFloat); // Can only swap a float.
+ FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
+ : loc2.AsFpuRegister<FRegister>();
+ Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>()
+ : loc2.AsRegister<Register>();
+ __ Move(TMP, r2);
+ __ Mfc1(r2, f1);
+ __ Mtc1(TMP, f1);
+ } else if (loc1.IsRegisterPair() && loc2.IsRegisterPair()) {
+ // Swap 2 GPR register pairs.
+ Register r1 = loc1.AsRegisterPairLow<Register>();
+ Register r2 = loc2.AsRegisterPairLow<Register>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ r1 = loc1.AsRegisterPairHigh<Register>();
+ r2 = loc2.AsRegisterPairHigh<Register>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ } else if ((loc1.IsRegisterPair() && loc2.IsFpuRegister()) ||
+ (loc1.IsFpuRegister() && loc2.IsRegisterPair())) {
+ // Swap FPR and GPR register pair.
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
+ : loc2.AsFpuRegister<FRegister>();
+ Register r2_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
+ : loc2.AsRegisterPairLow<Register>();
+ Register r2_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
+ : loc2.AsRegisterPairHigh<Register>();
+ // Use 2 temporary registers because we can't first swap the low 32 bits of an FPR and
+ // then swap the high 32 bits of the same FPR. mtc1 makes the high 32 bits of an FPR
+ // unpredictable and the following mfch1 will fail.
+ __ Mfc1(TMP, f1);
+ __ Mfhc1(AT, f1);
+ __ Mtc1(r2_l, f1);
+ __ Mthc1(r2_h, f1);
+ __ Move(r2_l, TMP);
+ __ Move(r2_h, AT);
+ } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
+ } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
+ } else {
+ LOG(FATAL) << "Swap between " << loc1 << " and " << loc2 << " is unsupported";
+ }
+}
+
+void ParallelMoveResolverMIPS::RestoreScratch(int reg) {
+ __ Pop(static_cast<Register>(reg));
+}
+
+void ParallelMoveResolverMIPS::SpillScratch(int reg) {
+ __ Push(static_cast<Register>(reg));
+}
+
+void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot) {
+ // Allocate a scratch register other than TMP, if available.
+ // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
+ // automatically unspilled when the scratch scope object is destroyed).
+ ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
+ // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
+ int stack_offset = ensure_scratch.IsSpilled() ? kMipsWordSize : 0;
+ for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
+ __ LoadFromOffset(kLoadWord,
+ Register(ensure_scratch.GetRegister()),
+ SP,
+ index1 + stack_offset);
+ __ LoadFromOffset(kLoadWord,
+ TMP,
+ SP,
+ index2 + stack_offset);
+ __ StoreToOffset(kStoreWord,
+ Register(ensure_scratch.GetRegister()),
+ SP,
+ index2 + stack_offset);
+ __ StoreToOffset(kStoreWord, TMP, SP, index1 + stack_offset);
+ }
+}
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::MipsCore(static_cast<int>(reg));
+}
+
+// TODO: mapping of floating-point registers to DWARF.
+
+void CodeGeneratorMIPS::GenerateFrameEntry() {
+ __ Bind(&frame_entry_label_);
+
+ bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
+
+ if (do_overflow_check) {
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ SP,
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
+ RecordPcInfo(nullptr, 0);
+ }
+
+ if (HasEmptyFrame()) {
+ return;
+ }
+
+ // Make sure the frame size isn't unreasonably large.
+ if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
+ LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
+ }
+
+ // Spill callee-saved registers.
+ // Note that their cumulative size is small and they can be indexed using
+ // 16-bit offsets.
+
+ // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+ uint32_t ofs = FrameEntrySpillSize();
+ bool unaligned_float = ofs & 0x7;
+ bool fpu_32bit = isa_features_.Is32BitFloatingPoint();
+ __ IncreaseFrameSize(ofs);
+
+ for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ ofs -= kMipsWordSize;
+ __ Sw(reg, SP, ofs);
+ __ cfi().RelOffset(DWARFReg(reg), ofs);
+ }
+ }
+
+ for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
+ FRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ ofs -= kMipsDoublewordSize;
+ // TODO: Change the frame to avoid unaligned accesses for fpu registers.
+ if (unaligned_float) {
+ if (fpu_32bit) {
+ __ Swc1(reg, SP, ofs);
+ __ Swc1(static_cast<FRegister>(reg + 1), SP, ofs + 4);
+ } else {
+ __ Mfhc1(TMP, reg);
+ __ Swc1(reg, SP, ofs);
+ __ Sw(TMP, SP, ofs + 4);
+ }
+ } else {
+ __ Sdc1(reg, SP, ofs);
+ }
+ // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
+ }
+ }
+
+ // Allocate the rest of the frame and store the current method pointer
+ // at its end.
+
+ __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+ static_assert(IsInt<16>(kCurrentMethodStackOffset),
+ "kCurrentMethodStackOffset must fit into int16_t");
+ __ Sw(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+}
+
+void CodeGeneratorMIPS::GenerateFrameExit() {
+ __ cfi().RememberState();
+
+ if (!HasEmptyFrame()) {
+ // Deallocate the rest of the frame.
+
+ __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+ // Restore callee-saved registers.
+ // Note that their cumulative size is small and they can be indexed using
+ // 16-bit offsets.
+
+ // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+ uint32_t ofs = 0;
+ bool unaligned_float = FrameEntrySpillSize() & 0x7;
+ bool fpu_32bit = isa_features_.Is32BitFloatingPoint();
+
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ FRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ if (unaligned_float) {
+ if (fpu_32bit) {
+ __ Lwc1(reg, SP, ofs);
+ __ Lwc1(static_cast<FRegister>(reg + 1), SP, ofs + 4);
+ } else {
+ __ Lwc1(reg, SP, ofs);
+ __ Lw(TMP, SP, ofs + 4);
+ __ Mthc1(TMP, reg);
+ }
+ } else {
+ __ Ldc1(reg, SP, ofs);
+ }
+ ofs += kMipsDoublewordSize;
+ // TODO: __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ __ Lw(reg, SP, ofs);
+ ofs += kMipsWordSize;
+ __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ DCHECK_EQ(ofs, FrameEntrySpillSize());
+ __ DecreaseFrameSize(ofs);
+ }
+
+ __ Jr(RA);
+ __ Nop();
+
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(GetFrameSize());
+}
+
+void CodeGeneratorMIPS::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorMIPS::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ if (src.Equals(dst)) {
+ return;
+ }
+
+ if (src.IsConstant()) {
+ MoveConstant(dst, src.GetConstant());
+ } else {
+ if (Primitive::Is64BitType(dst_type)) {
+ Move64(dst, src);
+ } else {
+ Move32(dst, src);
+ }
+ }
+}
+
+void CodeGeneratorMIPS::Move32(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+
+ if (destination.IsRegister()) {
+ if (source.IsRegister()) {
+ __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
+ } else if (source.IsFpuRegister()) {
+ __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
+ }
+ } else if (destination.IsFpuRegister()) {
+ if (source.IsRegister()) {
+ __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
+ } else if (source.IsFpuRegister()) {
+ __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+ }
+ } else {
+ DCHECK(destination.IsStackSlot()) << destination;
+ if (source.IsRegister()) {
+ __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
+ } else if (source.IsFpuRegister()) {
+ __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, destination.GetStackIndex());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
+ }
+ }
+}
+
+void CodeGeneratorMIPS::Move64(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+
+ if (destination.IsRegisterPair()) {
+ if (source.IsRegisterPair()) {
+ __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
+ __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
+ } else if (source.IsFpuRegister()) {
+ Register dst_high = destination.AsRegisterPairHigh<Register>();
+ Register dst_low = destination.AsRegisterPairLow<Register>();
+ FRegister src = source.AsFpuRegister<FRegister>();
+ __ Mfc1(dst_low, src);
+ __ Mfhc1(dst_high, src);
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ int32_t off = source.GetStackIndex();
+ Register r = destination.AsRegisterPairLow<Register>();
+ __ LoadFromOffset(kLoadDoubleword, r, SP, off);
+ }
+ } else if (destination.IsFpuRegister()) {
+ if (source.IsRegisterPair()) {
+ FRegister dst = destination.AsFpuRegister<FRegister>();
+ Register src_high = source.AsRegisterPairHigh<Register>();
+ Register src_low = source.AsRegisterPairLow<Register>();
+ __ Mtc1(src_low, dst);
+ __ Mthc1(src_high, dst);
+ } else if (source.IsFpuRegister()) {
+ __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+ }
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ int32_t off = destination.GetStackIndex();
+ if (source.IsRegisterPair()) {
+ __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, off);
+ } else if (source.IsFpuRegister()) {
+ __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, off);
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, TMP, SP, off);
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
+ __ StoreToOffset(kStoreWord, TMP, SP, off + 4);
+ }
+ }
+}
+
+void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
+ if (c->IsIntConstant() || c->IsNullConstant()) {
+ // Move 32 bit constant.
+ int32_t value = GetInt32ValueOf(c);
+ if (destination.IsRegister()) {
+ Register dst = destination.AsRegister<Register>();
+ __ LoadConst32(dst, value);
+ } else {
+ DCHECK(destination.IsStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ } else if (c->IsLongConstant()) {
+ // Move 64 bit constant.
+ int64_t value = GetInt64ValueOf(c);
+ if (destination.IsRegisterPair()) {
+ Register r_h = destination.AsRegisterPairHigh<Register>();
+ Register r_l = destination.AsRegisterPairLow<Register>();
+ __ LoadConst64(r_h, r_l, value);
+ } else {
+ DCHECK(destination.IsDoubleStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ } else if (c->IsFloatConstant()) {
+ // Move 32 bit float constant.
+ int32_t value = GetInt32ValueOf(c);
+ if (destination.IsFpuRegister()) {
+ __ LoadSConst32(destination.AsFpuRegister<FRegister>(), value, TMP);
+ } else {
+ DCHECK(destination.IsStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ } else {
+ // Move 64 bit double constant.
+ DCHECK(c->IsDoubleConstant()) << c->DebugName();
+ int64_t value = GetInt64ValueOf(c);
+ if (destination.IsFpuRegister()) {
+ FRegister fd = destination.AsFpuRegister<FRegister>();
+ __ LoadDConst64(fd, value, TMP);
+ } else {
+ DCHECK(destination.IsDoubleStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ }
+}
+
+void CodeGeneratorMIPS::MoveConstant(Location destination, int32_t value) {
+ DCHECK(destination.IsRegister());
+ Register dst = destination.AsRegister<Register>();
+ __ LoadConst32(dst, value);
+}
+
+void CodeGeneratorMIPS::Move(HInstruction* instruction,
+ Location location,
+ HInstruction* move_for) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type type = instruction->GetType();
+ DCHECK_NE(type, Primitive::kPrimVoid);
+
+ if (instruction->IsCurrentMethod()) {
+ Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ } else if (instruction->IsIntConstant()
+ || instruction->IsLongConstant()
+ || instruction->IsNullConstant()) {
+ MoveConstant(location, instruction->AsConstant());
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
+ } else if (instruction->IsLoadLocal()) {
+ uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+ if (Primitive::Is64BitType(type)) {
+ Move64(location, Location::DoubleStackSlot(stack_slot));
+ } else {
+ Move32(location, Location::StackSlot(stack_slot));
+ }
+ } else {
+ DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
+ if (Primitive::Is64BitType(type)) {
+ Move64(location, locations->Out());
+ } else {
+ Move32(location, locations->Out());
+ }
+ }
+}
+
+void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
+Location CodeGeneratorMIPS::GetStackLocation(HLoadLocal* load) const {
+ Primitive::Type type = load->GetType();
+
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type " << type;
+ }
+
+ LOG(FATAL) << "Unreachable";
+ return Location::NoLocation();
+}
+
+void CodeGeneratorMIPS::MarkGCCard(Register object, Register value) {
+ MipsLabel done;
+ Register card = AT;
+ Register temp = TMP;
+ __ Beqz(value, &done);
+ __ LoadFromOffset(kLoadWord,
+ card,
+ TR,
+ Thread::CardTableOffset<kMipsWordSize>().Int32Value());
+ __ Srl(temp, object, gc::accounting::CardTable::kCardShift);
+ __ Addu(temp, card, temp);
+ __ Sb(card, temp, 0);
+ __ Bind(&done);
+}
+
+void CodeGeneratorMIPS::SetupBlockedRegisters(bool is_baseline) const {
+ // Don't allocate the dalvik style register pair passing.
+ blocked_register_pairs_[A1_A2] = true;
+
+ // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
+ blocked_core_registers_[ZERO] = true;
+ blocked_core_registers_[K0] = true;
+ blocked_core_registers_[K1] = true;
+ blocked_core_registers_[GP] = true;
+ blocked_core_registers_[SP] = true;
+ blocked_core_registers_[RA] = true;
+
+ // AT and TMP(T8) are used as temporary/scratch registers
+ // (similar to how AT is used by MIPS assemblers).
+ blocked_core_registers_[AT] = true;
+ blocked_core_registers_[TMP] = true;
+ blocked_fpu_registers_[FTMP] = true;
+
+ // Reserve suspend and thread registers.
+ blocked_core_registers_[S0] = true;
+ blocked_core_registers_[TR] = true;
+
+ // Reserve T9 for function calls
+ blocked_core_registers_[T9] = true;
+
+ // Reserve odd-numbered FPU registers.
+ for (size_t i = 1; i < kNumberOfFRegisters; i += 2) {
+ blocked_fpu_registers_[i] = true;
+ }
+
+ if (is_baseline) {
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ blocked_core_registers_[kCoreCalleeSaves[i]] = true;
+ }
+
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
+ }
+ }
+
+ UpdateBlockedPairRegisters();
+}
+
+void CodeGeneratorMIPS::UpdateBlockedPairRegisters() const {
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ MipsManagedRegister current =
+ MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (blocked_core_registers_[current.AsRegisterPairLow()]
+ || blocked_core_registers_[current.AsRegisterPairHigh()]) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
+}
+
+Location CodeGeneratorMIPS::AllocateFreeRegister(Primitive::Type type) const {
+ switch (type) {
+ case Primitive::kPrimLong: {
+ size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
+ MipsManagedRegister pair =
+ MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
+
+ blocked_core_registers_[pair.AsRegisterPairLow()] = true;
+ blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
+ UpdateBlockedPairRegisters();
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
+ }
+
+ case Primitive::kPrimByte:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
+ // Block all register pairs that contain `reg`.
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ MipsManagedRegister current =
+ MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
+ return Location::RegisterLocation(reg);
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFRegisters);
+ return Location::FpuRegisterLocation(reg);
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ UNREACHABLE();
+}
+
+size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
+ return kMipsWordSize;
+}
+
+size_t CodeGeneratorMIPS::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ LoadFromOffset(kLoadWord, Register(reg_id), SP, stack_index);
+ return kMipsWordSize;
+}
+
+size_t CodeGeneratorMIPS::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ StoreDToOffset(FRegister(reg_id), SP, stack_index);
+ return kMipsDoublewordSize;
+}
+
+size_t CodeGeneratorMIPS::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ LoadDFromOffset(FRegister(reg_id), SP, stack_index);
+ return kMipsDoublewordSize;
+}
+
+void CodeGeneratorMIPS::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << MipsManagedRegister::FromCoreRegister(Register(reg));
+}
+
+void CodeGeneratorMIPS::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << MipsManagedRegister::FromFRegister(FRegister(reg));
+}
+
+void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ InvokeRuntime(GetThreadOffset<kMipsWordSize>(entrypoint).Int32Value(),
+ instruction,
+ dex_pc,
+ slow_path,
+ IsDirectEntrypoint(entrypoint));
+}
+
+constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
+
+void CodeGeneratorMIPS::InvokeRuntime(int32_t entry_point_offset,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path,
+ bool is_direct_entrypoint) {
+ if (is_direct_entrypoint) {
+ // Reserve argument space on stack (for $a0-$a3) for
+ // entrypoints that directly reference native implementations.
+ // Called function may use this space to store $a0-$a3 regs.
+ __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
+ }
+ __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
+ __ Jalr(T9);
+ __ Nop();
+ if (is_direct_entrypoint) {
+ __ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
+ }
+ RecordPcInfo(instruction, dex_pc, slow_path);
+}
+
+void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
+ Register class_reg) {
+ __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
+ __ LoadConst32(AT, mirror::Class::kStatusInitialized);
+ __ Blt(TMP, AT, slow_path->GetEntryLabel());
+ // Even if the initialized flag is set, we need to ensure consistent memory ordering.
+ __ Sync(0);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
+ __ Sync(0); // Only stype 0 is supported.
+}
+
+void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
+ HBasicBlock* successor) {
+ SuspendCheckSlowPathMIPS* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor);
+ codegen_->AddSlowPath(slow_path);
+
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ TMP,
+ TR,
+ Thread::ThreadFlagsOffset<kMipsWordSize>().Int32Value());
+ if (successor == nullptr) {
+ __ Bnez(TMP, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetReturnLabel());
+ } else {
+ __ Beqz(TMP, codegen_->GetLabelOf(successor));
+ __ B(slow_path->GetEntryLabel());
+ // slow_path will return to GetLabelOf(successor).
+ }
+}
+
+InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
+ CodeGeneratorMIPS* codegen)
+ : HGraphVisitor(graph),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
+ DCHECK_EQ(instruction->InputCount(), 2U);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type type = instruction->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ HInstruction* right = instruction->InputAt(1);
+ bool can_use_imm = false;
+ if (right->IsConstant()) {
+ int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant());
+ if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
+ can_use_imm = IsUint<16>(imm);
+ } else if (instruction->IsAdd()) {
+ can_use_imm = IsInt<16>(imm);
+ } else {
+ DCHECK(instruction->IsSub());
+ can_use_imm = IsInt<16>(-imm);
+ }
+ }
+ if (can_use_imm)
+ locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
+ else
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // TODO: can 2nd param be const?
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->IsAdd() || instruction->IsSub()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ } else {
+ DCHECK(instruction->IsAnd() || instruction->IsOr() || instruction->IsXor());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(instruction->IsAdd() || instruction->IsSub());
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Location rhs_location = locations->InAt(1);
+
+ Register rhs_reg = ZERO;
+ int32_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ if (instruction->IsAnd()) {
+ if (use_imm)
+ __ Andi(dst, lhs, rhs_imm);
+ else
+ __ And(dst, lhs, rhs_reg);
+ } else if (instruction->IsOr()) {
+ if (use_imm)
+ __ Ori(dst, lhs, rhs_imm);
+ else
+ __ Or(dst, lhs, rhs_reg);
+ } else if (instruction->IsXor()) {
+ if (use_imm)
+ __ Xori(dst, lhs, rhs_imm);
+ else
+ __ Xor(dst, lhs, rhs_reg);
+ } else if (instruction->IsAdd()) {
+ if (use_imm)
+ __ Addiu(dst, lhs, rhs_imm);
+ else
+ __ Addu(dst, lhs, rhs_reg);
+ } else {
+ DCHECK(instruction->IsSub());
+ if (use_imm)
+ __ Addiu(dst, lhs, -rhs_imm);
+ else
+ __ Subu(dst, lhs, rhs_reg);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // TODO: can 2nd param be const?
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+ Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+
+ if (instruction->IsAnd()) {
+ __ And(dst_low, lhs_low, rhs_low);
+ __ And(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsOr()) {
+ __ Or(dst_low, lhs_low, rhs_low);
+ __ Or(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsXor()) {
+ __ Xor(dst_low, lhs_low, rhs_low);
+ __ Xor(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsAdd()) {
+ __ Addu(dst_low, lhs_low, rhs_low);
+ __ Sltu(TMP, dst_low, lhs_low);
+ __ Addu(dst_high, lhs_high, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ } else {
+ DCHECK(instruction->IsSub());
+ __ Subu(dst_low, lhs_low, rhs_low);
+ __ Sltu(TMP, lhs_low, dst_low);
+ __ Subu(dst_high, lhs_high, rhs_high);
+ __ Subu(dst_high, dst_high, TMP);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (instruction->IsAdd()) {
+ if (type == Primitive::kPrimFloat) {
+ __ AddS(dst, lhs, rhs);
+ } else {
+ __ AddD(dst, lhs, rhs);
+ }
+ } else {
+ DCHECK(instruction->IsSub());
+ if (type == Primitive::kPrimFloat) {
+ __ SubS(dst, lhs, rhs);
+ } else {
+ __ SubD(dst, lhs, rhs);
+ }
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected binary operation type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected shift type " << type;
+ }
+}
+
+static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
+
+void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ LocationSummary* locations = instr->GetLocations();
+ Primitive::Type type = instr->GetType();
+
+ Location rhs_location = locations->InAt(1);
+ bool use_imm = rhs_location.IsConstant();
+ Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>();
+ int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
+ uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue;
+ uint32_t shift_value = rhs_imm & shift_mask;
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ if (use_imm) {
+ if (instr->IsShl()) {
+ __ Sll(dst, lhs, shift_value);
+ } else if (instr->IsShr()) {
+ __ Sra(dst, lhs, shift_value);
+ } else {
+ __ Srl(dst, lhs, shift_value);
+ }
+ } else {
+ if (instr->IsShl()) {
+ __ Sllv(dst, lhs, rhs_reg);
+ } else if (instr->IsShr()) {
+ __ Srav(dst, lhs, rhs_reg);
+ } else {
+ __ Srlv(dst, lhs, rhs_reg);
+ }
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ if (use_imm) {
+ if (shift_value == 0) {
+ codegen_->Move64(locations->Out(), locations->InAt(0));
+ } else if (shift_value < kMipsBitsPerWord) {
+ if (instr->IsShl()) {
+ __ Sll(dst_low, lhs_low, shift_value);
+ __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Sll(dst_high, lhs_high, shift_value);
+ __ Or(dst_high, dst_high, TMP);
+ } else if (instr->IsShr()) {
+ __ Sra(dst_high, lhs_high, shift_value);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ } else {
+ __ Srl(dst_high, lhs_high, shift_value);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ }
+ } else {
+ shift_value -= kMipsBitsPerWord;
+ if (instr->IsShl()) {
+ __ Sll(dst_high, lhs_low, shift_value);
+ __ Move(dst_low, ZERO);
+ } else if (instr->IsShr()) {
+ __ Sra(dst_low, lhs_high, shift_value);
+ __ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
+ } else {
+ __ Srl(dst_low, lhs_high, shift_value);
+ __ Move(dst_high, ZERO);
+ }
+ }
+ } else {
+ MipsLabel done;
+ if (instr->IsShl()) {
+ __ Sllv(dst_low, lhs_low, rhs_reg);
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Srl(TMP, lhs_low, 1);
+ __ Srlv(TMP, TMP, AT);
+ __ Sllv(dst_high, lhs_high, rhs_reg);
+ __ Or(dst_high, dst_high, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(dst_high, dst_low);
+ __ Move(dst_low, ZERO);
+ } else if (instr->IsShr()) {
+ __ Srav(dst_high, lhs_high, rhs_reg);
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Sll(TMP, lhs_high, 1);
+ __ Sllv(TMP, TMP, AT);
+ __ Srlv(dst_low, lhs_low, rhs_reg);
+ __ Or(dst_low, dst_low, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(dst_low, dst_high);
+ __ Sra(dst_high, dst_high, 31);
+ } else {
+ __ Srlv(dst_high, lhs_high, rhs_reg);
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Sll(TMP, lhs_high, 1);
+ __ Sllv(TMP, TMP, AT);
+ __ Srlv(dst_low, lhs_low, rhs_reg);
+ __ Or(dst_low, dst_low, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(dst_low, dst_high);
+ __ Move(dst_high, ZERO);
+ }
+ __ Bind(&done);
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected shift operation type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Location index = locations->InAt(1);
+ Primitive::Type type = instruction->GetType();
+
+ switch (type) {
+ case Primitive::kPrimBoolean: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
+ } else {
+ __ Addu(TMP, obj, index.AsRegister<Register>());
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
+ } else {
+ __ Addu(TMP, obj, index.AsRegister<Register>());
+ __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadWord, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ Register out = locations->Out().AsRegisterPairLow<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ FRegister out = locations->Out().AsFpuRegister<FRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadSFromOffset(out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ LoadSFromOffset(out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ FRegister out = locations->Out().AsFpuRegister<FRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ LoadDFromOffset(out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ LoadDFromOffset(out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool is_object = value_type == Primitive::kPrimNot;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (is_object) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Location index = locations->InAt(1);
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool needs_runtime_call = locations->WillCall();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+
+ switch (value_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ StoreToOffset(kStoreByte, value, obj, offset);
+ } else {
+ __ Addu(TMP, obj, index.AsRegister<Register>());
+ __ StoreToOffset(kStoreByte, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ StoreToOffset(kStoreHalfword, value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ if (!needs_runtime_call) {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreToOffset(kStoreWord, value, obj, offset);
+ } else {
+ DCHECK(index.IsRegister()) << index;
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreWord, value, TMP, data_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (needs_write_barrier) {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->MarkGCCard(obj, value);
+ }
+ } else {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickAputObject));
+ CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegisterPairLow<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ StoreToOffset(kStoreDoubleword, value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
+ DCHECK(locations->InAt(2).IsFpuRegister());
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreSToOffset(value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ StoreSToOffset(value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
+ DCHECK(locations->InAt(2).IsFpuRegister());
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ StoreDToOffset(value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ StoreDToOffset(value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+
+ // Ints and objects are handled in the switch.
+ if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+}
+
+void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ Register index = locations->InAt(0).AsRegister<Register>();
+ Register length = locations->InAt(1).AsRegister<Register>();
+
+ // length is limited by the maximum positive signed 32-bit integer.
+ // Unsigned comparison of length and index checks for index < 0
+ // and for length <= index simultaneously.
+ __ Bgeu(index, length, slow_path->GetEntryLabel());
+}
+
+void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Note that TypeCheckSlowPathMIPS uses this register too.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register cls = locations->InAt(1).AsRegister<Register>();
+ Register obj_cls = locations->GetTemp(0).AsRegister<Register>();
+
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ Beqz(obj, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
+ __ Bne(obj_cls, cls, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class is not null.
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path,
+ check->GetLocations()->InAt(0).AsRegister<Register>());
+}
+
+void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
+ Primitive::Type in_type = compare->InputAt(0)->GetType();
+
+ LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
+
+ switch (in_type) {
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Output overlaps because it is written before doing the low comparison.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type for compare operation " << in_type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type in_type = instruction->InputAt(0)->GetType();
+
+ // 0 if: left == right
+ // 1 if: left > right
+ // -1 if: left < right
+ switch (in_type) {
+ case Primitive::kPrimLong: {
+ MipsLabel done;
+ Register res = locations->Out().AsRegister<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+ Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+ // TODO: more efficient (direct) comparison with a constant.
+ __ Slt(TMP, lhs_high, rhs_high);
+ __ Slt(AT, rhs_high, lhs_high); // Inverted: is actually gt.
+ __ Subu(res, AT, TMP); // Result -1:1:0 for [ <, >, == ].
+ __ Bnez(res, &done); // If we compared ==, check if lower bits are also equal.
+ __ Sltu(TMP, lhs_low, rhs_low);
+ __ Sltu(AT, rhs_low, lhs_low); // Inverted: is actually gt.
+ __ Subu(res, AT, TMP); // Result -1:1:0 for [ <, >, == ].
+ __ Bind(&done);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ int32_t entry_point_offset;
+ bool direct;
+ if (in_type == Primitive::kPrimFloat) {
+ if (instruction->IsGtBias()) {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmpgFloat);
+ direct = IsDirectEntrypoint(kQuickCmpgFloat);
+ } else {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmplFloat);
+ direct = IsDirectEntrypoint(kQuickCmplFloat);
+ }
+ } else {
+ if (instruction->IsGtBias()) {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmpgDouble);
+ direct = IsDirectEntrypoint(kQuickCmpgDouble);
+ } else {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmplDouble);
+ direct = IsDirectEntrypoint(kQuickCmplDouble);
+ }
+ }
+ codegen_->InvokeRuntime(entry_point_offset,
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ direct);
+ if (in_type == Primitive::kPrimFloat) {
+ if (instruction->IsGtBias()) {
+ CheckEntrypointTypes<kQuickCmpgFloat, int32_t, float, float>();
+ } else {
+ CheckEntrypointTypes<kQuickCmplFloat, int32_t, float, float>();
+ }
+ } else {
+ if (instruction->IsGtBias()) {
+ CheckEntrypointTypes<kQuickCmpgDouble, int32_t, double, double>();
+ } else {
+ CheckEntrypointTypes<kQuickCmplDouble, int32_t, double, double>();
+ }
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unimplemented compare type " << in_type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitCondition(HCondition* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (instruction->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitCondition(HCondition* instruction) {
+ if (!instruction->NeedsMaterialization()) {
+ return;
+ }
+ // TODO: generalize to long
+ DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Register dst = locations->Out().AsRegister<Register>();
+
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Location rhs_location = locations->InAt(1);
+
+ Register rhs_reg = ZERO;
+ int64_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ IfCondition if_cond = instruction->GetCondition();
+
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
+ break;
+
+ case kCondLT:
+ case kCondGE:
+ if (use_imm && IsInt<16>(rhs_imm)) {
+ __ Slti(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondGE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the slt instruction but no sge.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondLE:
+ case kCondGT:
+ if (use_imm && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Slti(dst, lhs, rhs_imm + 1);
+ if (if_cond == kCondGT) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the slti instruction but no sgti.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, rhs_reg, lhs);
+ if (if_cond == kCondLE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the slt instruction but no sle.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
+
+ case kCondB:
+ case kCondAE:
+ // Use sltiu instruction if rhs_imm is in range [0, 32767] or in
+ // [max_unsigned - 32767 = 0xffff8000, max_unsigned = 0xffffffff].
+ if (use_imm &&
+ (IsUint<15>(rhs_imm) ||
+ IsUint<15>(rhs_imm - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(15))))) {
+ if (IsUint<15>(rhs_imm)) {
+ __ Sltiu(dst, lhs, rhs_imm);
+ } else {
+ // 16-bit value (in range [0x8000, 0xffff]) passed to sltiu is sign-extended
+ // and then used as unsigned integer (range [0xffff8000, 0xffffffff]).
+ __ Sltiu(dst, lhs, rhs_imm - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(16)));
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondAE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the sltu instruction but no sgeu.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondBE:
+ case kCondA:
+ // Use sltiu instruction if rhs_imm is in range [0, 32766] or in
+ // [max_unsigned - 32767 - 1 = 0xffff7fff, max_unsigned - 1 = 0xfffffffe].
+ // lhs <= rhs is simulated via lhs < rhs + 1.
+ if (use_imm && (rhs_imm != -1) &&
+ (IsUint<15>(rhs_imm + 1) ||
+ IsUint<15>(rhs_imm + 1 - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(15))))) {
+ if (IsUint<15>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Sltiu(dst, lhs, rhs_imm + 1);
+ } else {
+ // 16-bit value (in range [0x8000, 0xffff]) passed to sltiu is sign-extended
+ // and then used as unsigned integer (range [0xffff8000, 0xffffffff] where rhs_imm
+ // is in range [0xffff7fff, 0xfffffffe] since lhs <= rhs is simulated via lhs < rhs + 1).
+ __ Sltiu(dst, lhs, rhs_imm + 1 - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(16)));
+ }
+ if (if_cond == kCondA) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the sltiu instruction but no sgtiu.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, rhs_reg, lhs);
+ if (if_cond == kCondBE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the sltu instruction but no sleu.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
+ }
+}
+
+void LocationsBuilderMIPS::VisitDiv(HDiv* div) {
+ Primitive::Type type = div->GetResultType();
+ LocationSummary::CallKind call_kind = (type == Primitive::kPrimLong)
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Register rhs = locations->InAt(1).AsRegister<Register>();
+ if (isR6) {
+ __ DivR6(dst, lhs, rhs);
+ } else {
+ __ DivR2(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickLdiv));
+ CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ DivS(dst, lhs, rhs);
+ } else {
+ __ DivD(dst, lhs, rhs);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ Location value = instruction->GetLocations()->InAt(0);
+ Primitive::Type type = instruction->GetType();
+
+ switch (type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt: {
+ if (value.IsConstant()) {
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // A division by a non-null constant is valid. We don't need to perform
+ // any check, so simply fall through.
+ }
+ } else {
+ DCHECK(value.IsRegister()) << value;
+ __ Beqz(value.AsRegister<Register>(), slow_path->GetEntryLabel());
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsConstant()) {
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // A division by a non-null constant is valid. We don't need to perform
+ // any check, so simply fall through.
+ }
+ } else {
+ DCHECK(value.IsRegisterPair()) << value;
+ __ Or(TMP, value.AsRegisterPairHigh<Register>(), value.AsRegisterPairLow<Register>());
+ __ Beqz(TMP, slow_path->GetEntryLabel());
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
+ }
+}
+
+void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
+ DCHECK(!successor->IsExitBlock());
+ HBasicBlock* block = got->GetBlock();
+ HInstruction* previous = got->GetPrevious();
+ HLoopInformation* info = block->GetLoopInformation();
+
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
+ GenerateSuspendCheck(info->GetSuspendCheck(), successor);
+ return;
+ }
+ if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
+ GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
+ }
+ if (!codegen_->GoesToNextBlock(block, successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitGoto(HGoto* got) {
+ HandleGoto(got, got->GetSuccessor());
+}
+
+void LocationsBuilderMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
+ try_boundary->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
+ HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
+ if (!successor->IsExitBlock()) {
+ HandleGoto(try_boundary, successor);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instruction,
+ MipsLabel* true_target,
+ MipsLabel* false_target,
+ MipsLabel* always_true_target) {
+ HInstruction* cond = instruction->InputAt(0);
+ HCondition* condition = cond->AsCondition();
+
+ if (cond->IsIntConstant()) {
+ int32_t cond_value = cond->AsIntConstant()->GetValue();
+ if (cond_value == 1) {
+ if (always_true_target != nullptr) {
+ __ B(always_true_target);
+ }
+ return;
+ } else {
+ DCHECK_EQ(cond_value, 0);
+ }
+ } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
+ // The condition instruction has been materialized, compare the output to 0.
+ Location cond_val = instruction->GetLocations()->InAt(0);
+ DCHECK(cond_val.IsRegister());
+ __ Bnez(cond_val.AsRegister<Register>(), true_target);
+ } else {
+ // The condition instruction has not been materialized, use its inputs as
+ // the comparison and its condition as the branch condition.
+ Register lhs = condition->GetLocations()->InAt(0).AsRegister<Register>();
+ Location rhs_location = condition->GetLocations()->InAt(1);
+ Register rhs_reg = ZERO;
+ int32_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ IfCondition if_cond = condition->GetCondition();
+ if (use_imm && rhs_imm == 0) {
+ switch (if_cond) {
+ case kCondEQ:
+ __ Beqz(lhs, true_target);
+ break;
+ case kCondNE:
+ __ Bnez(lhs, true_target);
+ break;
+ case kCondLT:
+ __ Bltz(lhs, true_target);
+ break;
+ case kCondGE:
+ __ Bgez(lhs, true_target);
+ break;
+ case kCondLE:
+ __ Blez(lhs, true_target);
+ break;
+ case kCondGT:
+ __ Bgtz(lhs, true_target);
+ break;
+ case kCondB:
+ break; // always false
+ case kCondBE:
+ __ Beqz(lhs, true_target); // <= 0 if zero
+ break;
+ case kCondA:
+ __ Bnez(lhs, true_target); // > 0 if non-zero
+ break;
+ case kCondAE:
+ __ B(true_target); // always true
+ break;
+ }
+ } else {
+ if (use_imm) {
+ // TODO: more efficient comparison with 16-bit constants without loading them into TMP.
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ switch (if_cond) {
+ case kCondEQ:
+ __ Beq(lhs, rhs_reg, true_target);
+ break;
+ case kCondNE:
+ __ Bne(lhs, rhs_reg, true_target);
+ break;
+ case kCondLT:
+ __ Blt(lhs, rhs_reg, true_target);
+ break;
+ case kCondGE:
+ __ Bge(lhs, rhs_reg, true_target);
+ break;
+ case kCondLE:
+ __ Bge(rhs_reg, lhs, true_target);
+ break;
+ case kCondGT:
+ __ Blt(rhs_reg, lhs, true_target);
+ break;
+ case kCondB:
+ __ Bltu(lhs, rhs_reg, true_target);
+ break;
+ case kCondAE:
+ __ Bgeu(lhs, rhs_reg, true_target);
+ break;
+ case kCondBE:
+ __ Bgeu(rhs_reg, lhs, true_target);
+ break;
+ case kCondA:
+ __ Bltu(rhs_reg, lhs, true_target);
+ break;
+ }
+ }
+ }
+ if (false_target != nullptr) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ HInstruction* cond = if_instr->InputAt(0);
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
+ MipsLabel* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
+ MipsLabel* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+ MipsLabel* always_true_target = true_target;
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfTrueSuccessor())) {
+ always_true_target = nullptr;
+ }
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfFalseSuccessor())) {
+ false_target = nullptr;
+ }
+ GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
+}
+
+void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ HInstruction* cond = deoptimize->InputAt(0);
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
+ DeoptimizationSlowPathMIPS(deoptimize);
+ codegen_->AddSlowPath(slow_path);
+ MipsLabel* slow_path_entry = slow_path->GetEntryLabel();
+ GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
+}
+
+void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
+ Primitive::Type field_type = field_info.GetFieldType();
+ bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
+ bool generate_volatile = field_info.IsVolatile() && is_wide;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, generate_volatile ? LocationSummary::kCall : LocationSummary::kNoCall);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (generate_volatile) {
+ InvokeRuntimeCallingConvention calling_convention;
+ // need A0 to hold base + offset
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ if (field_type == Primitive::kPrimLong) {
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimLong));
+ } else {
+ locations->SetOut(Location::RequiresFpuRegister());
+ // Need some temp core regs since FP results are returned in core registers
+ Location reg = calling_convention.GetReturnLocation(Primitive::kPrimLong);
+ locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairHigh<Register>()));
+ }
+ } else {
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ uint32_t dex_pc) {
+ Primitive::Type type = field_info.GetFieldType();
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ LoadOperandType load_type = kLoadUnsignedByte;
+ bool is_volatile = field_info.IsVolatile();
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ load_type = kLoadUnsignedByte;
+ break;
+ case Primitive::kPrimByte:
+ load_type = kLoadSignedByte;
+ break;
+ case Primitive::kPrimShort:
+ load_type = kLoadSignedHalfword;
+ break;
+ case Primitive::kPrimChar:
+ load_type = kLoadUnsignedHalfword;
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimNot:
+ load_type = kLoadWord;
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ load_type = kLoadDoubleword;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+
+ if (is_volatile && load_type == kLoadDoubleword) {
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Addiu32(locations->GetTemp(0).AsRegister<Register>(),
+ obj, field_info.GetFieldOffset().Uint32Value());
+ // Do implicit Null check
+ __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Load),
+ instruction,
+ dex_pc,
+ nullptr,
+ IsDirectEntrypoint(kQuickA64Load));
+ CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
+ if (type == Primitive::kPrimDouble) {
+ // Need to move to FP regs since FP results are returned in core registers.
+ __ Mtc1(locations->GetTemp(1).AsRegister<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ __ Mthc1(locations->GetTemp(2).AsRegister<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ }
+ } else {
+ if (!Primitive::IsFloatingPointType(type)) {
+ Register dst;
+ if (type == Primitive::kPrimLong) {
+ DCHECK(locations->Out().IsRegisterPair());
+ dst = locations->Out().AsRegisterPairLow<Register>();
+ } else {
+ DCHECK(locations->Out().IsRegister());
+ dst = locations->Out().AsRegister<Register>();
+ }
+ __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ DCHECK(locations->Out().IsFpuRegister());
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ LoadSFromOffset(dst, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ __ LoadDFromOffset(dst, obj, field_info.GetFieldOffset().Uint32Value());
+ }
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+}
+
+void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
+ Primitive::Type field_type = field_info.GetFieldType();
+ bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
+ bool generate_volatile = field_info.IsVolatile() && is_wide;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, generate_volatile ? LocationSummary::kCall : LocationSummary::kNoCall);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (generate_volatile) {
+ InvokeRuntimeCallingConvention calling_convention;
+ // need A0 to hold base + offset
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ if (field_type == Primitive::kPrimLong) {
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ } else {
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ // Pass FP parameters in core registers.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ }
+ } else {
+ if (Primitive::IsFloatingPointType(field_type)) {
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ uint32_t dex_pc) {
+ Primitive::Type type = field_info.GetFieldType();
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ StoreOperandType store_type = kStoreByte;
+ bool is_volatile = field_info.IsVolatile();
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ store_type = kStoreByte;
+ break;
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar:
+ store_type = kStoreHalfword;
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimNot:
+ store_type = kStoreWord;
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ store_type = kStoreDoubleword;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ }
+
+ if (is_volatile && store_type == kStoreDoubleword) {
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Addiu32(locations->GetTemp(0).AsRegister<Register>(),
+ obj, field_info.GetFieldOffset().Uint32Value());
+ // Do implicit Null check.
+ __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ if (type == Primitive::kPrimDouble) {
+ // Pass FP parameters in core registers.
+ __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
+ locations->InAt(1).AsFpuRegister<FRegister>());
+ __ Mfhc1(locations->GetTemp(2).AsRegister<Register>(),
+ locations->InAt(1).AsFpuRegister<FRegister>());
+ }
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Store),
+ instruction,
+ dex_pc,
+ nullptr,
+ IsDirectEntrypoint(kQuickA64Store));
+ CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
+ } else {
+ if (!Primitive::IsFloatingPointType(type)) {
+ Register src;
+ if (type == Primitive::kPrimLong) {
+ DCHECK(locations->InAt(1).IsRegisterPair());
+ src = locations->InAt(1).AsRegisterPairLow<Register>();
+ } else {
+ DCHECK(locations->InAt(1).IsRegister());
+ src = locations->InAt(1).AsRegister<Register>();
+ }
+ __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ DCHECK(locations->InAt(1).IsFpuRegister());
+ FRegister src = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ StoreSToOffset(src, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ __ StoreDToOffset(src, obj, field_info.GetFieldOffset().Uint32Value());
+ }
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ // TODO: memory barriers?
+ if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
+ DCHECK(locations->InAt(1).IsRegister());
+ Register src = locations->InAt(1).AsRegister<Register>();
+ codegen_->MarkGCCard(obj, src);
+ }
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ }
+}
+
+void LocationsBuilderMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary::CallKind call_kind =
+ instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // The output does overlap inputs.
+ // Note that TypeCheckSlowPathMIPS uses this register too.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register cls = locations->InAt(1).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ MipsLabel done;
+
+ // Return 0 if `obj` is null.
+ // TODO: Avoid this check if we know `obj` is not null.
+ __ Move(out, ZERO);
+ __ Beqz(obj, &done);
+
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, out, obj, mirror::Object::ClassOffset().Int32Value());
+ if (instruction->IsExactCheck()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ Xor(out, out, cls);
+ __ Sltiu(out, out, 1);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ Bne(out, cls, slow_path->GetEntryLabel());
+ __ LoadConst32(out, 1);
+ __ Bind(slow_path->GetExitLabel());
+ }
+
+ __ Bind(&done);
+}
+
+void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::HandleInvoke(HInvoke* invoke) {
+ InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
+ CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
+}
+
+void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+ // The register T0 is required to be used for the hidden argument in
+ // art_quick_imt_conflict_trampoline, so add the hidden argument.
+ invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value();
+ Location receiver = invoke->GetLocations()->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+
+ // Set the hidden argument.
+ __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
+ invoke->GetDexMethodIndex());
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+ } else {
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // temp = temp->GetImtEntryAt(method_offset);
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+ // T9 = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
+ // T9();
+ __ Jalr(T9);
+ __ Nop();
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ // TODO: intrinsic function.
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+ // TODO: intrinsic function.
+ HandleInvoke(invoke);
+}
+
+static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen ATTRIBUTE_UNUSED) {
+ if (invoke->GetLocations()->Intrinsified()) {
+ // TODO: intrinsic function.
+ return true;
+ }
+ return false;
+}
+
+void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+ // All registers are assumed to be correctly set up per the calling convention.
+
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+ // temp = thread->string_init_entrypoint
+ __ LoadFromOffset(kLoadWord,
+ temp.AsRegister<Register>(),
+ TR,
+ invoke->GetStringInitOffset());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
+ __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+ // TODO: Implement this type. (Needs literal support.) At the moment, the
+ // CompilerDriver will not direct the backend to use this type for MIPS.
+ LOG(FATAL) << "Unsupported!";
+ UNREACHABLE();
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
+ FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
+ Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+ Register reg = temp.AsRegister<Register>();
+ Register method_reg;
+ if (current_method.IsRegister()) {
+ method_reg = current_method.AsRegister<Register>();
+ } else {
+ // TODO: use the appropriate DCHECK() here if possible.
+ // DCHECK(invoke->GetLocations()->Intrinsified());
+ DCHECK(!current_method.IsValid());
+ method_reg = reg;
+ __ Lw(reg, SP, kCurrentMethodStackOffset);
+ }
+
+ // temp = temp->dex_cache_resolved_methods_;
+ __ LoadFromOffset(kLoadWord,
+ reg,
+ method_reg,
+ ArtMethod::DexCacheResolvedMethodsOffset(kMipsPointerSize).Int32Value());
+ // temp = temp[index_in_cache]
+ uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+ __ LoadFromOffset(kLoadWord,
+ reg,
+ reg,
+ CodeGenerator::GetCachePointerOffset(index_in_cache));
+ break;
+ }
+ }
+
+ switch (invoke->GetCodePtrLocation()) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
+ __ Jalr(&frame_entry_label_, T9);
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // LR = invoke->GetDirectCodePtr();
+ __ LoadConst32(T9, invoke->GetDirectCodePtr());
+ // LR()
+ __ Jalr(T9);
+ __ Nop();
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
+ FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
+ FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
+ // T9 = callee_method->entry_point_from_quick_compiled_code_;
+ __ LoadFromOffset(kLoadDoubleword,
+ T9,
+ callee_method.AsRegister<Register>(),
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kMipsWordSize).Int32Value());
+ // T9()
+ __ Jalr(T9);
+ __ Nop();
+ break;
+ }
+ DCHECK(!IsLeafMethod());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+ if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+ return;
+ }
+
+ LocationSummary* locations = invoke->GetLocations();
+ codegen_->GenerateStaticOrDirectCall(invoke,
+ locations->HasTemps()
+ ? locations->GetTemp(0)
+ : Location::NoLocation());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ // TODO: Try to generate intrinsics code.
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+ } else {
+ DCHECK(receiver.IsRegister());
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // temp = temp->GetMethodAt(method_offset);
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+ // T9 = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
+ // T9();
+ __ Jalr(T9);
+ __ Nop();
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary* locations = cls->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
+ if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ __ LoadFromOffset(kLoadWord, out, current_method,
+ ArtMethod::DeclaringClassOffset().Int32Value());
+ } else {
+ DCHECK(cls->CanCallRuntime());
+ __ LoadFromOffset(kLoadWord, out, current_method,
+ ArtMethod::DexCacheResolvedTypesOffset(kMipsPointerSize).Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ cls,
+ cls,
+ cls->GetDexPc(),
+ cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ Beqz(out, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ }
+}
+
+static int32_t GetExceptionTlsOffset() {
+ return Thread::ExceptionOffset<kMipsWordSize>().Int32Value();
+}
+
+void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) {
+ Register out = load->GetLocations()->Out().AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS::VisitLoadLocal(HLoadLocal* load) {
+ load->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = load->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ __ Beqz(out, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS::VisitLocal(HLocal* local) {
+ local->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLocal(HLocal* local) {
+ DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
+}
+
+void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
+ if (instruction->IsEnter()) {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickLockObject));
+ CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
+ } else {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickUnlockObject));
+ }
+ CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Register rhs = locations->InAt(1).AsRegister<Register>();
+
+ if (isR6) {
+ __ MulR6(dst, lhs, rhs);
+ } else {
+ __ MulR2(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+ Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+
+ // Extra checks to protect caused by the existance of A1_A2.
+ // The algorithm is wrong if dst_high is either lhs_lo or rhs_lo:
+ // (e.g. lhs=a0_a1, rhs=a2_a3 and dst=a1_a2).
+ DCHECK_NE(dst_high, lhs_low);
+ DCHECK_NE(dst_high, rhs_low);
+
+ // A_B * C_D
+ // dst_hi: [ low(A*D) + low(B*C) + hi(B*D) ]
+ // dst_lo: [ low(B*D) ]
+ // Note: R2 and R6 MUL produce the low 32 bit of the multiplication result.
+
+ if (isR6) {
+ __ MulR6(TMP, lhs_high, rhs_low);
+ __ MulR6(dst_high, lhs_low, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ __ MuhuR6(TMP, lhs_low, rhs_low);
+ __ Addu(dst_high, dst_high, TMP);
+ __ MulR6(dst_low, lhs_low, rhs_low);
+ } else {
+ __ MulR2(TMP, lhs_high, rhs_low);
+ __ MulR2(dst_high, lhs_low, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ __ MultuR2(lhs_low, rhs_low);
+ __ Mfhi(TMP);
+ __ Addu(dst_high, dst_high, TMP);
+ __ Mflo(dst_low);
+ }
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ MulS(dst, lhs, rhs);
+ } else {
+ __ MulD(dst, lhs, rhs);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected mul type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register src = locations->InAt(0).AsRegister<Register>();
+ __ Subu(dst, ZERO, src);
+ break;
+ }
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ __ Subu(dst_low, ZERO, src_low);
+ __ Sltu(TMP, ZERO, dst_low);
+ __ Subu(dst_high, ZERO, src_high);
+ __ Subu(dst_high, dst_high, TMP);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ NegS(dst, src);
+ } else {
+ __ NegD(dst, src);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected neg type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ Register current_method_register = calling_convention.GetRegisterAt(2);
+ __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
+ // Move an uint16_t value to a register.
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickAllocArrayWithAccessCheck));
+ CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
+ void*, uint32_t, int32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ Register current_method_register = calling_convention.GetRegisterAt(1);
+ __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
+ // Move an uint16_t value to a register.
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickAllocObjectWithAccessCheck));
+ CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register src = locations->InAt(0).AsRegister<Register>();
+ __ Nor(dst, src, ZERO);
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ __ Nor(dst_high, src_high, ZERO);
+ __ Nor(dst_low, src_low, ZERO);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
+ }
+}
+
+void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ __ Xori(locations->Out().AsRegister<Register>(),
+ locations->InAt(0).AsRegister<Register>(),
+ 1);
+}
+
+void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
+ if (codegen_->CanMoveNullCheckToUser(instruction)) {
+ return;
+ }
+ Location obj = instruction->GetLocations()->InAt(0);
+
+ __ Lw(ZERO, obj.AsRegister<Register>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ Location obj = instruction->GetLocations()->InAt(0);
+
+ __ Beqz(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
+}
+
+void InstructionCodeGeneratorMIPS::VisitNullCheck(HNullCheck* instruction) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
+ GenerateImplicitNullCheck(instruction);
+ } else {
+ GenerateExplicitNullCheck(instruction);
+ }
+}
+
+void LocationsBuilderMIPS::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
+ codegen_->GetMoveResolver()->EmitNativeCode(instruction);
+}
+
+void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ }
+ locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruction
+ ATTRIBUTE_UNUSED) {
+ // Nothing to do, the parameter is already at its location.
+}
+
+void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instruction
+ ATTRIBUTE_UNUSED) {
+ // Nothing to do, the method is already at its location.
+}
+
+void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
+ locations->SetInAt(i, Location::Any());
+ }
+ locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorMIPS::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS::VisitRem(HRem* rem) {
+ Primitive::Type type = rem->GetResultType();
+ LocationSummary::CallKind call_kind =
+ (type == Primitive::kPrimInt) ? LocationSummary::kNoCall : LocationSummary::kCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitRem(HRem* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Register rhs = locations->InAt(1).AsRegister<Register>();
+ if (isR6) {
+ __ ModR6(dst, lhs, rhs);
+ } else {
+ __ ModR2(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickLmod));
+ CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
+ break;
+ }
+ case Primitive::kPrimFloat: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf),
+ instruction, instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickFmodf));
+ CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ break;
+ }
+ case Primitive::kPrimDouble: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod),
+ instruction, instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickFmod));
+ CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
+void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ Primitive::Type return_type = ret->InputAt(0)->GetType();
+ locations->SetInAt(0, MipsReturnLocation(return_type));
+}
+
+void InstructionCodeGeneratorMIPS::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS::VisitReturnVoid(HReturnVoid* ret) {
+ ret->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void InstructionCodeGeneratorMIPS::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void LocationsBuilderMIPS::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
+void LocationsBuilderMIPS::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+ Primitive::Type field_type = store->InputAt(1)->GetType();
+ switch (field_type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << field_type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
+ HBasicBlock* block = instruction->GetBlock();
+ if (block->GetLoopInformation() != nullptr) {
+ DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
+ // The back edge will generate the suspend check.
+ return;
+ }
+ if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
+ // The goto will generate the suspend check.
+ return;
+ }
+ GenerateSuspendCheck(instruction, nullptr);
+}
+
+void LocationsBuilderMIPS::VisitTemporary(HTemporary* temp) {
+ temp->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickDeliverException));
+ CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
+ Primitive::Type input_type = conversion->GetInputType();
+ Primitive::Type result_type = conversion->GetResultType();
+ DCHECK_NE(input_type, result_type);
+
+ if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
+ (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
+ LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
+ }
+
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
+ (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
+ call_kind = LocationSummary::kCall;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+
+ if (call_kind == LocationSummary::kNoCall) {
+ if (Primitive::IsFloatingPointType(input_type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+
+ if (Primitive::IsFloatingPointType(result_type)) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+
+ if (Primitive::IsFloatingPointType(input_type)) {
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ } else {
+ DCHECK_EQ(input_type, Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ }
+
+ locations->SetOut(calling_convention.GetReturnLocation(result_type));
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
+
+ DCHECK_NE(input_type, result_type);
+
+ if (result_type == Primitive::kPrimLong && Primitive::IsIntegralType(input_type)) {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register src = locations->InAt(0).AsRegister<Register>();
+
+ __ Move(dst_low, src);
+ __ Sra(dst_high, src, 31);
+ } else if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register src = (input_type == Primitive::kPrimLong)
+ ? locations->InAt(0).AsRegisterPairLow<Register>()
+ : locations->InAt(0).AsRegister<Register>();
+
+ switch (result_type) {
+ case Primitive::kPrimChar:
+ __ Andi(dst, src, 0xFFFF);
+ break;
+ case Primitive::kPrimByte:
+ if (has_sign_extension) {
+ __ Seb(dst, src);
+ } else {
+ __ Sll(dst, src, 24);
+ __ Sra(dst, dst, 24);
+ }
+ break;
+ case Primitive::kPrimShort:
+ if (has_sign_extension) {
+ __ Seh(dst, src);
+ } else {
+ __ Sll(dst, src, 16);
+ __ Sra(dst, dst, 16);
+ }
+ break;
+ case Primitive::kPrimInt:
+ __ Move(dst, src);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
+ if (input_type != Primitive::kPrimLong) {
+ Register src = locations->InAt(0).AsRegister<Register>();
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ __ Mtc1(src, FTMP);
+ if (result_type == Primitive::kPrimFloat) {
+ __ Cvtsw(dst, FTMP);
+ } else {
+ __ Cvtdw(dst, FTMP);
+ }
+ } else {
+ int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
+ : QUICK_ENTRY_POINT(pL2d);
+ bool direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickL2f)
+ : IsDirectEntrypoint(kQuickL2d);
+ codegen_->InvokeRuntime(entry_offset,
+ conversion,
+ conversion->GetDexPc(),
+ nullptr,
+ direct);
+ if (result_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ } else {
+ CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+ }
+ }
+ } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
+ CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
+ int32_t entry_offset;
+ bool direct;
+ if (result_type != Primitive::kPrimLong) {
+ entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
+ : QUICK_ENTRY_POINT(pD2iz);
+ direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2iz)
+ : IsDirectEntrypoint(kQuickD2iz);
+ } else {
+ entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
+ : QUICK_ENTRY_POINT(pD2l);
+ direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2l)
+ : IsDirectEntrypoint(kQuickD2l);
+ }
+ codegen_->InvokeRuntime(entry_offset,
+ conversion,
+ conversion->GetDexPc(),
+ nullptr,
+ direct);
+ if (result_type != Primitive::kPrimLong) {
+ if (input_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickF2iz, int32_t, float>();
+ } else {
+ CheckEntrypointTypes<kQuickD2iz, int32_t, double>();
+ }
+ } else {
+ if (input_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>();
+ } else {
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>();
+ }
+ }
+ } else if (Primitive::IsFloatingPointType(result_type) &&
+ Primitive::IsFloatingPointType(input_type)) {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+ if (result_type == Primitive::kPrimFloat) {
+ __ Cvtsd(dst, src);
+ } else {
+ __ Cvtds(dst, src);
+ }
+ } else {
+ LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
+}
+
+void LocationsBuilderMIPS::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, this should be removed during prepare for register allocator.
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, this should be removed during prepare for register allocator.
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS::VisitEqual(HEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitEqual(HEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitNotEqual(HNotEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitNotEqual(HNotEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitLessThan(HLessThan* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLessThan(HLessThan* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitGreaterThan(HGreaterThan* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitGreaterThan(HGreaterThan* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorMIPS::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ int32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // Create a set of compare/jumps.
+ const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
+ for (int32_t i = 0; i < num_entries; ++i) {
+ int32_t case_value = lower_bound + i;
+ MipsLabel* successor_label = codegen_->GetLabelOf(successors[i]);
+ if (case_value == 0) {
+ __ Beqz(value_reg, successor_label);
+ } else {
+ __ LoadConst32(TMP, case_value);
+ __ Beq(value_reg, TMP, successor_label);
+ }
+ }
+
+ // Insert the default branch for every other value.
+ if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
+ __ B(codegen_->GetLabelOf(default_block));
+ }
+}
+
+void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+ // The trampoline uses the same calling convention as dex calling conventions,
+ // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+ // the method_idx.
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+ codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
+}
+
+#undef __
+#undef QUICK_ENTRY_POINT
+
+} // namespace mips
+} // namespace art
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
new file mode 100644
index 0000000000..a571e76933
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
+
+#include "code_generator.h"
+#include "dex/compiler_enums.h"
+#include "driver/compiler_options.h"
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/mips/assembler_mips.h"
+
+namespace art {
+namespace mips {
+
+// InvokeDexCallingConvention registers
+
+static constexpr Register kParameterCoreRegisters[] =
+ { A1, A2, A3 };
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+static constexpr FRegister kParameterFpuRegisters[] =
+ { F12, F14 };
+static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
+
+
+// InvokeRuntimeCallingConvention registers
+
+static constexpr Register kRuntimeParameterCoreRegisters[] =
+ { A0, A1, A2, A3 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+static constexpr FRegister kRuntimeParameterFpuRegisters[] =
+ { F12, F14};
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+ arraysize(kRuntimeParameterFpuRegisters);
+
+
+static constexpr Register kCoreCalleeSaves[] =
+ { S0, S1, S2, S3, S4, S5, S6, S7, FP, RA };
+static constexpr FRegister kFpuCalleeSaves[] =
+ { F20, F22, F24, F26, F28, F30 };
+
+
+class CodeGeneratorMIPS;
+
+class InvokeDexCallingConvention : public CallingConvention<Register, FRegister> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters,
+ kParameterCoreRegistersLength,
+ kParameterFpuRegisters,
+ kParameterFpuRegistersLength,
+ kMipsPointerSize) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitorMIPS() {}
+ virtual ~InvokeDexCallingConventionVisitorMIPS() {}
+
+ Location GetNextLocation(Primitive::Type type) OVERRIDE;
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
+ Location GetMethodLocation() const OVERRIDE;
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS);
+};
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, FRegister> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength,
+ kMipsPointerSize) {}
+
+ Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionMIPS() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(A1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(A0);
+ }
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(V0, V1)
+ : Location::RegisterLocation(V0);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(A2, A3)
+ : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(F0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS);
+};
+
+class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap {
+ public:
+ ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
+ : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
+
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
+
+ void Exchange(int index1, int index2, bool double_slot);
+
+ MipsAssembler* GetAssembler() const;
+
+ private:
+ CodeGeneratorMIPS* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS);
+};
+
+class SlowPathCodeMIPS : public SlowPathCode {
+ public:
+ SlowPathCodeMIPS() : entry_label_(), exit_label_() {}
+
+ MipsLabel* GetEntryLabel() { return &entry_label_; }
+ MipsLabel* GetExitLabel() { return &exit_label_; }
+
+ private:
+ MipsLabel entry_label_;
+ MipsLabel exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS);
+};
+
+class LocationsBuilderMIPS : public HGraphVisitor {
+ public:
+ LocationsBuilderMIPS(HGraph* graph, CodeGeneratorMIPS* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
+
+ FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+ << " (id " << instruction->GetId() << ")";
+ }
+
+ private:
+ void HandleInvoke(HInvoke* invoke);
+ void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleShift(HBinaryOperation* operation);
+ void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
+ InvokeDexCallingConventionVisitorMIPS parameter_visitor_;
+
+ CodeGeneratorMIPS* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS);
+};
+
+class InstructionCodeGeneratorMIPS : public HGraphVisitor {
+ public:
+ InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
+
+ FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+ << " (id " << instruction->GetId() << ")";
+ }
+
+ MipsAssembler* GetAssembler() const { return assembler_; }
+
+ private:
+ void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg);
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+ void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleShift(HBinaryOperation* operation);
+ void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+ void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+ void GenerateImplicitNullCheck(HNullCheck* instruction);
+ void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateTestAndBranch(HInstruction* instruction,
+ MipsLabel* true_target,
+ MipsLabel* false_target,
+ MipsLabel* always_true_target);
+ void HandleGoto(HInstruction* got, HBasicBlock* successor);
+
+ MipsAssembler* const assembler_;
+ CodeGeneratorMIPS* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS);
+};
+
+class CodeGeneratorMIPS : public CodeGenerator {
+ public:
+ CodeGeneratorMIPS(HGraph* graph,
+ const MipsInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
+ virtual ~CodeGeneratorMIPS() {}
+
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+
+ void Bind(HBasicBlock* block) OVERRIDE;
+
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ void Move32(Location destination, Location source);
+ void Move64(Location destination, Location source);
+ void MoveConstant(Location location, HConstant* c);
+
+ size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }
+
+ size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMipsDoublewordSize; }
+
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return assembler_.GetLabelLocation(GetLabelOf(block));
+ }
+
+ HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+ const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+
+ void MarkGCCard(Register object, Register value);
+
+ // Register allocation.
+
+ void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
+ // AllocateFreeRegister() is only used when allocating registers locally
+ // during CompileBaseline().
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ // Blocks all register pairs made out of blocked core registers.
+ void UpdateBlockedPairRegisters() const;
+
+ InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
+
+ const MipsInstructionSetFeatures& GetInstructionSetFeatures() const {
+ return isa_features_;
+ }
+
+ MipsLabel* GetLabelOf(HBasicBlock* block) const {
+ return CommonGetLabelOf<MipsLabel>(block_labels_, block);
+ }
+
+ void Initialize() OVERRIDE {
+ block_labels_ = CommonInitializeLabels<MipsLabel>();
+ }
+
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
+ // Code generation helpers.
+
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+
+ void MoveConstant(Location destination, int32_t value);
+
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) OVERRIDE;
+
+ void InvokeRuntime(int32_t offset,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path,
+ bool is_direct_entrypoint);
+
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+
+ bool NeedsTwoRegisters(Primitive::Type type) const {
+ return type == Primitive::kPrimLong;
+ }
+
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
+ Location temp ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
+ }
+
+ void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
+ Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
+ }
+
+ private:
+ // Labels for each block that will be compiled.
+ MipsLabel* block_labels_;
+ MipsLabel frame_entry_label_;
+ LocationsBuilderMIPS location_builder_;
+ InstructionCodeGeneratorMIPS instruction_visitor_;
+ ParallelMoveResolverMIPS move_resolver_;
+ MipsAssembler assembler_;
+ const MipsInstructionSetFeatures& isa_features_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS);
+};
+
+} // namespace mips
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 1a08503cf9..5f78285b69 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -342,8 +342,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -1778,6 +1777,9 @@ void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
return;
}
+ // TODO: generalize to long
+ DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
+
LocationSummary* locations = instruction->GetLocations();
GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
@@ -1855,6 +1857,48 @@ void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
}
}
break;
+
+ case kCondB:
+ case kCondAE:
+ if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7fff) {
+ __ Sltiu(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondAE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the sltu instruction but no sgeu.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondBE:
+ case kCondA:
+ if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7ffe) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Sltiu(dst, lhs, rhs_imm + 1);
+ if (if_cond == kCondA) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the sltiu instruction but no sgtiu.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, rhs_reg, lhs);
+ if (if_cond == kCondBE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the sltu instruction but no sleu.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
}
}
@@ -2072,6 +2116,17 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
case kCondGT:
__ Bgtzc(lhs, true_target);
break;
+ case kCondB:
+ break; // always false
+ case kCondBE:
+ __ Beqzc(lhs, true_target); // <= 0 if zero
+ break;
+ case kCondA:
+ __ Bnezc(lhs, true_target); // > 0 if non-zero
+ break;
+ case kCondAE:
+ __ B(true_target); // always true
+ break;
}
} else {
if (use_imm) {
@@ -2086,12 +2141,16 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
case kCondEQ:
case kCondGE:
case kCondLE:
+ case kCondBE:
+ case kCondAE:
// if lhs == rhs for a positive condition, then it is a branch
__ B(true_target);
break;
case kCondNE:
case kCondLT:
case kCondGT:
+ case kCondB:
+ case kCondA:
// if lhs == rhs for a negative condition, then it is a NOP
break;
}
@@ -2115,6 +2174,18 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
case kCondGT:
__ Bltc(rhs_reg, lhs, true_target);
break;
+ case kCondB:
+ __ Bltuc(lhs, rhs_reg, true_target);
+ break;
+ case kCondAE:
+ __ Bgeuc(lhs, rhs_reg, true_target);
+ break;
+ case kCondBE:
+ __ Bgeuc(rhs_reg, lhs, true_target);
+ break;
+ case kCondA:
+ __ Bltuc(rhs_reg, lhs, true_target);
+ break;
}
}
}
@@ -2151,8 +2222,7 @@ void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
@@ -3462,6 +3532,38 @@ void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual
VisitCondition(comp);
}
+void LocationsBuilderMIPS64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
DCHECK(codegen_->IsBaseline());
LocationSummary* locations =
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 7799437235..df3fc0d1e9 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -217,9 +217,6 @@ class InstructionCodeGeneratorMIPS64 : public HGraphVisitor {
Mips64Assembler* GetAssembler() const { return assembler_; }
private:
- // Generate code for the given suspend check. If not null, `successor`
- // is the block to branch to if the suspend check is not needed, and after
- // the suspend call.
void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f8be21a06e..963eec2529 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -428,7 +428,7 @@ class ArraySetSlowPathX86 : public SlowPathCode {
#undef __
#define __ down_cast<X86Assembler*>(GetAssembler())->
-inline Condition X86SignedCondition(IfCondition cond) {
+inline Condition X86Condition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
@@ -436,19 +436,30 @@ inline Condition X86SignedCondition(IfCondition cond) {
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
+ case kCondB: return kBelow;
+ case kCondBE: return kBelowEqual;
+ case kCondA: return kAbove;
+ case kCondAE: return kAboveEqual;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
+// Maps signed condition to unsigned condition and FP condition to x86 name.
inline Condition X86UnsignedOrFPCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
+ // Signed to unsigned, and FP to x86 name.
case kCondLT: return kBelow;
case kCondLE: return kBelowEqual;
case kCondGT: return kAbove;
case kCondGE: return kAboveEqual;
+ // Unsigned remain unchanged.
+ case kCondB: return kBelow;
+ case kCondBE: return kBelowEqual;
+ case kCondA: return kAbove;
+ case kCondAE: return kAboveEqual;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -521,7 +532,8 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
move_resolver_(graph->GetArena(), this),
isa_features_(isa_features),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1040,8 +1052,7 @@ void LocationsBuilderX86::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorX86::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond,
@@ -1067,7 +1078,7 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
Register left_low = left.AsRegisterPairLow<Register>();
IfCondition true_high_cond = if_cond;
IfCondition false_high_cond = cond->GetOppositeCondition();
- Condition final_condition = X86UnsignedOrFPCondition(if_cond);
+ Condition final_condition = X86UnsignedOrFPCondition(if_cond); // unsigned on lower part
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
@@ -1088,6 +1099,18 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
case kCondGE:
true_high_cond = kCondGT;
break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
}
if (right.IsConstant()) {
@@ -1101,12 +1124,12 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
__ cmpl(left_high, Immediate(val_high));
}
if (if_cond == kCondNE) {
- __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86Condition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(false_high_cond), false_label);
} else {
- __ j(X86SignedCondition(true_high_cond), true_label);
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86Condition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
if (val_low == 0) {
@@ -1120,12 +1143,12 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
__ cmpl(left_high, right_high);
if (if_cond == kCondNE) {
- __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86Condition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(false_high_cond), false_label);
} else {
- __ j(X86SignedCondition(true_high_cond), true_label);
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86Condition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
__ cmpl(left_low, right_low);
@@ -1214,7 +1237,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
}
__ j(kNotEqual, true_target);
} else {
- __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
}
} else {
// Condition has not been materialized, use its inputs as the
@@ -1247,7 +1270,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
}
}
if (false_target != nullptr) {
@@ -1283,8 +1306,7 @@ void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::Any());
}
}
@@ -1309,9 +1331,8 @@ void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) {
local->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
@@ -1338,8 +1359,7 @@ void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86::VisitCondition(HCondition* cond) {
@@ -1405,7 +1425,7 @@ void InstructionCodeGeneratorX86::VisitCondition(HCondition* cond) {
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ setb(X86SignedCondition(cond->GetCondition()), reg);
+ __ setb(X86Condition(cond->GetCondition()), reg);
return;
}
case Primitive::kPrimLong:
@@ -1483,15 +1503,46 @@ void InstructionCodeGeneratorX86::VisitGreaterThanOrEqual(HGreaterThanOrEqual* c
VisitCondition(comp);
}
+void LocationsBuilderX86::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
@@ -1500,9 +1551,8 @@ void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -1511,9 +1561,8 @@ void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
@@ -1522,9 +1571,8 @@ void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1533,9 +1581,8 @@ void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1550,8 +1597,7 @@ void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3685,8 +3731,7 @@ void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -4684,13 +4729,11 @@ void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -5613,15 +5656,13 @@ void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instr
}
}
-void LocationsBuilderX86::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -5669,6 +5710,51 @@ void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr)
}
}
+void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+
+ // Constant area pointer.
+ locations->SetInAt(1, Location::RequiresRegister());
+
+ // And the temporary we need.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ int32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // Optimizing has a jump area.
+ Register temp_reg = locations->GetTemp(0).AsRegister<Register>();
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+
+ // Remove the bias, if needed.
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg, -lower_bound));
+ value_reg = temp_reg;
+ }
+
+ // Is the value in range?
+ DCHECK_GE(num_entries, 1);
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load (target-constant_area) from the jump table, indexing by the value.
+ __ movl(temp_reg, codegen_->LiteralCaseTable(switch_instr, constant_area, value_reg));
+
+ // Compute the actual target address by adding in constant_area.
+ __ addl(temp_reg, constant_area);
+
+ // And jump.
+ __ jmp(temp_reg);
+}
+
void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
HX86ComputeBaseMethodAddress* insn) {
LocationSummary* locations =
@@ -5752,28 +5838,18 @@ void InstructionCodeGeneratorX86::VisitX86LoadFromConstantTable(HX86LoadFromCons
}
}
-void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
- // Generate the constant area if needed.
- X86Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values.
- assembler->Align(4, 0);
- constant_area_start_ = assembler->CodeSize();
- assembler->AddConstantArea();
- }
-
- // And finish up.
- CodeGenerator::Finalize(allocator);
-}
-
/**
* Class to handle late fixup of offsets into constant area.
*/
class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
public:
- RIPFixup(const CodeGeneratorX86& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
+ RIPFixup(CodeGeneratorX86& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86* codegen_;
private:
void Process(const MemoryRegion& region, int pos) OVERRIDE {
@@ -5781,19 +5857,77 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera
// last 4 bytes of the instruction.
// The value to patch is the distance from the offset in the constant area
// from the address computed by the HX86ComputeBaseMethodAddress instruction.
- int32_t constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int32_t relative_position = constant_offset - codegen_.GetMethodAddressOffset();;
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - codegen_->GetMethodAddressOffset();;
// Patch in the right value.
region.StoreUnaligned<int32_t>(pos - 4, relative_position);
}
- const CodeGeneratorX86& codegen_;
-
// Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
+ int32_t offset_into_constant_area_;
};
+/**
+ * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86& codegen, HX86PackedSwitch* switch_instr)
+ : RIPFixup(codegen, static_cast<size_t>(-1)), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // The label values in the jump table are computed relative to the
+ // instruction addressing the constant area.
+ const int32_t relative_offset = codegen_->GetMethodAddressOffset();
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - relative_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HX86PackedSwitch* switch_instr_;
+};
+
+void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
+ // Generate the constant area if needed.
+ X86Assembler* assembler = GetAssembler();
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
+ // byte values.
+ assembler->Align(4, 0);
+ constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
+ assembler->AddConstantArea();
+ }
+
+ // And finish up.
+ CodeGenerator::Finalize(allocator);
+}
+
Address CodeGeneratorX86::LiteralDoubleAddress(double v, Register reg) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address(reg, kDummy32BitOffset, fixup);
@@ -5814,98 +5948,18 @@ Address CodeGeneratorX86::LiteralInt64Address(int64_t v, Register reg) {
return Address(reg, kDummy32BitOffset, fixup);
}
-/**
- * Finds instructions that need the constant area base as an input.
- */
-class ConstantHandlerVisitor : public HGraphVisitor {
- public:
- explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
-
- private:
- void VisitAdd(HAdd* add) OVERRIDE {
- BinaryFP(add);
- }
-
- void VisitSub(HSub* sub) OVERRIDE {
- BinaryFP(sub);
- }
-
- void VisitMul(HMul* mul) OVERRIDE {
- BinaryFP(mul);
- }
-
- void VisitDiv(HDiv* div) OVERRIDE {
- BinaryFP(div);
- }
+Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
+ Register reg,
+ Register value) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
- void VisitReturn(HReturn* ret) OVERRIDE {
- HConstant* value = ret->InputAt(0)->AsConstant();
- if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
- ReplaceInput(ret, value, 0, true);
- }
- }
-
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void BinaryFP(HBinaryOperation* bin) {
- HConstant* rhs = bin->InputAt(1)->AsConstant();
- if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
- ReplaceInput(bin, rhs, 1, false);
- }
- }
-
- void InitializeConstantAreaPointer(HInstruction* user) {
- // Ensure we only initialize the pointer once.
- if (base_ != nullptr) {
- return;
- }
-
- HGraph* graph = GetGraph();
- HBasicBlock* entry = graph->GetEntryBlock();
- base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
- HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
- entry->InsertInstructionBefore(base_, insert_pos);
- DCHECK(base_ != nullptr);
- }
-
- void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
- InitializeConstantAreaPointer(insn);
- HGraph* graph = GetGraph();
- HBasicBlock* block = insn->GetBlock();
- HX86LoadFromConstantTable* load_constant =
- new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
- block->InsertInstructionBefore(load_constant, insn);
- insn->ReplaceInput(load_constant, input_index);
- }
-
- void HandleInvoke(HInvoke* invoke) {
- // Ensure that we can load FP arguments from the constant area.
- for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
- HConstant* input = invoke->InputAt(i)->AsConstant();
- if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
- ReplaceInput(invoke, input, i, true);
- }
- }
- }
-
- // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
- // input to the HX86LoadFromConstantTable instructions.
- HX86ComputeBaseMethodAddress* base_;
-};
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
-void ConstantAreaFixups::Run() {
- ConstantHandlerVisitor visitor(graph_);
- visitor.VisitInsertionOrder();
+ // We want a scaled address, as we are extracting the correct offset from the table.
+ return Address(reg, value, TIMES_4, kDummy32BitOffset, table_fixup);
}
// TODO: target as memory.
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ae2d84f945..fdfc5ab69b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -245,6 +245,8 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86);
};
+class JumpTableRIPFixup;
+
class CodeGeneratorX86 : public CodeGenerator {
public:
CodeGeneratorX86(HGraph* graph,
@@ -385,6 +387,8 @@ class CodeGeneratorX86 : public CodeGenerator {
Address LiteralInt32Address(int32_t v, Register reg);
Address LiteralInt64Address(int64_t v, Register reg);
+ Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
+
void Finalize(CodeAllocator* allocator) OVERRIDE;
private:
@@ -405,6 +409,9 @@ class CodeGeneratorX86 : public CodeGenerator {
// Used for fixups to the constant area.
int32_t constant_area_start_;
+ // Fixups for jump tables that need to be patched after the constant table is generated.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
// If there is a HX86ComputeBaseMethodAddress instruction in the graph
// (which shall be the sole instruction of this kind), subtracting this offset
// from the value contained in the out register of this HX86ComputeBaseMethodAddress
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 21120a0c80..ed2e4ca87c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -449,11 +449,16 @@ inline Condition X86_64IntegerCondition(IfCondition cond) {
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
+ case kCondB: return kBelow;
+ case kCondBE: return kBelowEqual;
+ case kCondA: return kAbove;
+ case kCondAE: return kAboveEqual;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
+// Maps FP condition to x86_64 name.
inline Condition X86_64FPCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
@@ -462,6 +467,7 @@ inline Condition X86_64FPCondition(IfCondition cond) {
case kCondLE: return kBelowEqual;
case kCondGT: return kAbove;
case kCondGE: return kAboveEqual;
+ default: break; // should not happen
};
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -670,7 +676,8 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
constant_area_start_(0),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1043,8 +1050,7 @@ void LocationsBuilderX86_64::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond,
@@ -1246,8 +1252,7 @@ void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::Any());
}
}
@@ -1272,9 +1277,8 @@ void LocationsBuilderX86_64::VisitLoadLocal(HLoadLocal* local) {
local->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
@@ -1301,8 +1305,7 @@ void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86_64::VisitCondition(HCondition* cond) {
@@ -1474,6 +1477,38 @@ void InstructionCodeGeneratorX86_64::VisitGreaterThanOrEqual(HGreaterThanOrEqual
VisitCondition(comp);
}
+void LocationsBuilderX86_64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86_64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
@@ -1575,9 +1610,8 @@ void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
@@ -1586,9 +1620,8 @@ void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -1597,9 +1630,8 @@ void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
@@ -1608,9 +1640,8 @@ void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1619,9 +1650,9 @@ void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitDoubleConstant(
+ HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1636,8 +1667,7 @@ void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3591,8 +3621,7 @@ void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unimplemented";
}
@@ -4412,13 +4441,11 @@ void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unimplemented";
}
@@ -5293,15 +5320,13 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in
}
}
-void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -5322,31 +5347,43 @@ void LocationsBuilderX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
int32_t lower_bound = switch_instr->GetStartValue();
int32_t num_entries = switch_instr->GetNumEntries();
LocationSummary* locations = switch_instr->GetLocations();
- CpuRegister value_reg = locations->InAt(0).AsRegister<CpuRegister>();
- HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ CpuRegister value_reg_in = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister temp_reg = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister base_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
- // Create a series of compare/jumps.
- const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (int i = 0; i < num_entries; i++) {
- int32_t case_value = lower_bound + i;
- if (case_value == 0) {
- __ testl(value_reg, value_reg);
- } else {
- __ cmpl(value_reg, Immediate(case_value));
- }
- __ j(kEqual, codegen_->GetLabelOf(successors[i]));
+ // Remove the bias, if needed.
+ Register value_reg_out = value_reg_in.AsRegister();
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg_in, -lower_bound));
+ value_reg_out = temp_reg.AsRegister();
}
+ CpuRegister value_reg(value_reg_out);
- // And the default for any other value.
- if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
- __ jmp(codegen_->GetLabelOf(default_block));
- }
+ // Is the value in range?
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load the address of the jump table in the constant area.
+ __ leaq(base_reg, codegen_->LiteralCaseTable(switch_instr));
+
+ // Load the (signed) offset from the jump table.
+ __ movsxd(temp_reg, Address(base_reg, value_reg, TIMES_4, 0));
+
+ // Add the offset to the address of the table base.
+ __ addq(temp_reg, base_reg);
+
+ // And jump.
+ __ jmp(temp_reg);
}
void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) {
@@ -5372,15 +5409,85 @@ void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
}
}
+/**
+ * Class to handle late fixup of offsets into constant area.
+ */
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
+ public:
+ RIPFixup(CodeGeneratorX86_64& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86_64* codegen_;
+
+ private:
+ void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ // Patch the correct offset for the instruction. We use the address of the
+ // 'next' instruction, which is 'pos' (patch the 4 bytes before).
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - pos;
+
+ // Patch in the right value.
+ region.StoreUnaligned<int32_t>(pos - 4, relative_position);
+ }
+
+ // Location in constant area that the fixup refers to.
+ size_t offset_into_constant_area_;
+};
+
+/**
+ t * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86_64& codegen, HPackedSwitch* switch_instr)
+ : RIPFixup(codegen, -1), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86_64Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // Compute the offset from the start of the function to this jump table.
+ const int32_t current_table_offset = assembler->CodeSize() + offset_in_constant_table;
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - current_table_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HPackedSwitch* switch_instr_;
+};
+
void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
// Generate the constant area if needed.
X86_64Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values. If used for vectors at a later time, this will need to be
- // updated to 16 bytes with the appropriate offset.
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8 byte values.
assembler->Align(4, 0);
constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
assembler->AddConstantArea();
}
@@ -5388,31 +5495,6 @@ void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
CodeGenerator::Finalize(allocator);
}
-/**
- * Class to handle late fixup of offsets into constant area.
- */
-class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
- public:
- RIPFixup(const CodeGeneratorX86_64& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
-
- private:
- void Process(const MemoryRegion& region, int pos) OVERRIDE {
- // Patch the correct offset for the instruction. We use the address of the
- // 'next' instruction, which is 'pos' (patch the 4 bytes before).
- int constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int relative_position = constant_offset - pos;
-
- // Patch in the right value.
- region.StoreUnaligned<int32_t>(pos - 4, relative_position);
- }
-
- const CodeGeneratorX86_64& codegen_;
-
- // Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
-};
-
Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address::RIP(fixup);
@@ -5453,6 +5535,16 @@ void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, Primitive::Type t
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
+Address CodeGeneratorX86_64::LiteralCaseTable(HPackedSwitch* switch_instr) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
+ return Address::RIP(table_fixup);
+}
+
#undef __
} // namespace x86_64
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d6a6a7e760..dc86a48ce7 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -234,6 +234,9 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86_64);
};
+// Class for fixups to jump tables.
+class JumpTableRIPFixup;
+
class CodeGeneratorX86_64 : public CodeGenerator {
public:
CodeGeneratorX86_64(HGraph* graph,
@@ -354,6 +357,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Load a 64 bit value into a register in the most efficient manner.
void Load64BitValue(CpuRegister dest, int64_t value);
+ Address LiteralCaseTable(HPackedSwitch* switch_instr);
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
@@ -391,6 +395,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// We will fix this up in the linker later to have the right value.
static constexpr int32_t kDummy32BitOffset = 256;
+ // Fixups for jump tables need to be handled specially.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 22f227c56a..57de41f557 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -20,6 +20,8 @@
#include "arch/arm/instruction_set_features_arm.h"
#include "arch/arm/registers_arm.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "arch/mips/registers_mips.h"
#include "arch/mips64/instruction_set_features_mips64.h"
#include "arch/mips64/registers_mips64.h"
#include "arch/x86/instruction_set_features_x86.h"
@@ -29,6 +31,7 @@
#include "builder.h"
#include "code_generator_arm.h"
#include "code_generator_arm64.h"
+#include "code_generator_mips.h"
#include "code_generator_mips64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
@@ -43,6 +46,7 @@
#include "ssa_liveness_analysis.h"
#include "utils.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/mips/managed_register_mips.h"
#include "utils/mips64/managed_register_mips64.h"
#include "utils/x86/managed_register_x86.h"
@@ -177,6 +181,14 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
Run(allocator, codegenARM64, has_result, expected);
}
+ std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+ MipsInstructionSetFeatures::FromCppDefines());
+ mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
+ codegenMIPS.CompileBaseline(&allocator, true);
+ if (kRuntimeISA == kMips) {
+ Run(allocator, codegenMIPS, has_result, expected);
+ }
+
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
@@ -234,6 +246,11 @@ static void RunCodeOptimized(HGraph* graph,
X86_64InstructionSetFeatures::FromCppDefines());
x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
+ } else if (kRuntimeISA == kMips) {
+ std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+ MipsInstructionSetFeatures::FromCppDefines());
+ mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
+ RunCodeOptimized(&codegenMIPS, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kMips64) {
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
@@ -761,4 +778,130 @@ TEST(CodegenTest, ReturnDivInt2Addr) {
TestCode(data, true, 2);
}
+// Helper method.
+static void TestComparison(IfCondition condition, int64_t i, int64_t j, Primitive::Type type) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(block);
+
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ graph->SetExitBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ entry_block->AddSuccessor(block);
+ block->AddSuccessor(exit_block);
+
+ HInstruction* op1;
+ HInstruction* op2;
+ if (type == Primitive::kPrimInt) {
+ op1 = graph->GetIntConstant(i);
+ op2 = graph->GetIntConstant(j);
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimLong);
+ op1 = graph->GetLongConstant(i);
+ op2 = graph->GetLongConstant(j);
+ }
+
+ HInstruction* comparison = nullptr;
+ bool expected_result = false;
+ const uint64_t x = i;
+ const uint64_t y = j;
+ switch (condition) {
+ case kCondEQ:
+ comparison = new (&allocator) HEqual(op1, op2);
+ expected_result = (i == j);
+ break;
+ case kCondNE:
+ comparison = new (&allocator) HNotEqual(op1, op2);
+ expected_result = (i != j);
+ break;
+ case kCondLT:
+ comparison = new (&allocator) HLessThan(op1, op2);
+ expected_result = (i < j);
+ break;
+ case kCondLE:
+ comparison = new (&allocator) HLessThanOrEqual(op1, op2);
+ expected_result = (i <= j);
+ break;
+ case kCondGT:
+ comparison = new (&allocator) HGreaterThan(op1, op2);
+ expected_result = (i > j);
+ break;
+ case kCondGE:
+ comparison = new (&allocator) HGreaterThanOrEqual(op1, op2);
+ expected_result = (i >= j);
+ break;
+ case kCondB:
+ comparison = new (&allocator) HBelow(op1, op2);
+ expected_result = (x < y);
+ break;
+ case kCondBE:
+ comparison = new (&allocator) HBelowOrEqual(op1, op2);
+ expected_result = (x <= y);
+ break;
+ case kCondA:
+ comparison = new (&allocator) HAbove(op1, op2);
+ expected_result = (x > y);
+ break;
+ case kCondAE:
+ comparison = new (&allocator) HAboveOrEqual(op1, op2);
+ expected_result = (x >= y);
+ break;
+ }
+ block->AddInstruction(comparison);
+ block->AddInstruction(new (&allocator) HReturn(comparison));
+
+ auto hook_before_codegen = [](HGraph*) {
+ };
+ RunCodeOptimized(graph, hook_before_codegen, true, expected_result);
+}
+
+TEST(CodegenTest, ComparisonsInt) {
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimInt);
+ TestComparison(kCondNE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondLT, i, j, Primitive::kPrimInt);
+ TestComparison(kCondLE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondGT, i, j, Primitive::kPrimInt);
+ TestComparison(kCondGE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondB, i, j, Primitive::kPrimInt);
+ TestComparison(kCondBE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondA, i, j, Primitive::kPrimInt);
+ TestComparison(kCondAE, i, j, Primitive::kPrimInt);
+ }
+ }
+}
+
+TEST(CodegenTest, ComparisonsLong) {
+ // TODO: make MIPS work for long
+ if (kRuntimeISA == kMips || kRuntimeISA == kMips64) {
+ return;
+ }
+
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimLong);
+ TestComparison(kCondNE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondLT, i, j, Primitive::kPrimLong);
+ TestComparison(kCondLE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondGT, i, j, Primitive::kPrimLong);
+ TestComparison(kCondGE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondB, i, j, Primitive::kPrimLong);
+ TestComparison(kCondBE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondA, i, j, Primitive::kPrimLong);
+ TestComparison(kCondAE, i, j, Primitive::kPrimLong);
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index f54547534f..4abe5e953c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -206,7 +206,9 @@ static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* inst
if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
instr->IsCompare() || instr->IsBoundsCheck()) {
// Uses aliases of ADD/SUB instructions.
- return vixl::Assembler::IsImmAddSub(value);
+ // If `value` does not fit but `-value` does, VIXL will automatically use
+ // the 'opposite' instruction.
+ return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
} else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
// Uses logical operations.
return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
diff --git a/compiler/optimizing/constant_area_fixups_x86.cc b/compiler/optimizing/constant_area_fixups_x86.cc
new file mode 100644
index 0000000000..c3470002c5
--- /dev/null
+++ b/compiler/optimizing/constant_area_fixups_x86.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "constant_area_fixups_x86.h"
+
+namespace art {
+namespace x86 {
+
+/**
+ * Finds instructions that need the constant area base as an input.
+ */
+class ConstantHandlerVisitor : public HGraphVisitor {
+ public:
+ explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+
+ private:
+ void VisitAdd(HAdd* add) OVERRIDE {
+ BinaryFP(add);
+ }
+
+ void VisitSub(HSub* sub) OVERRIDE {
+ BinaryFP(sub);
+ }
+
+ void VisitMul(HMul* mul) OVERRIDE {
+ BinaryFP(mul);
+ }
+
+ void VisitDiv(HDiv* div) OVERRIDE {
+ BinaryFP(div);
+ }
+
+ void VisitReturn(HReturn* ret) OVERRIDE {
+ HConstant* value = ret->InputAt(0)->AsConstant();
+ if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
+ ReplaceInput(ret, value, 0, true);
+ }
+ }
+
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void BinaryFP(HBinaryOperation* bin) {
+ HConstant* rhs = bin->InputAt(1)->AsConstant();
+ if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
+ ReplaceInput(bin, rhs, 1, false);
+ }
+ }
+
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ // We need to replace the HPackedSwitch with a HX86PackedSwitch in order to
+ // address the constant area.
+ InitializeConstantAreaPointer(switch_insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = switch_insn->GetBlock();
+ HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
+ switch_insn->GetStartValue(),
+ switch_insn->GetNumEntries(),
+ switch_insn->InputAt(0),
+ base_,
+ switch_insn->GetDexPc());
+ block->ReplaceAndRemoveInstructionWith(switch_insn, x86_switch);
+ }
+
+ void InitializeConstantAreaPointer(HInstruction* user) {
+ // Ensure we only initialize the pointer once.
+ if (base_ != nullptr) {
+ return;
+ }
+
+ HGraph* graph = GetGraph();
+ HBasicBlock* entry = graph->GetEntryBlock();
+ base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
+ HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
+ entry->InsertInstructionBefore(base_, insert_pos);
+ DCHECK(base_ != nullptr);
+ }
+
+ void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
+ InitializeConstantAreaPointer(insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = insn->GetBlock();
+ HX86LoadFromConstantTable* load_constant =
+ new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
+ block->InsertInstructionBefore(load_constant, insn);
+ insn->ReplaceInput(load_constant, input_index);
+ }
+
+ void HandleInvoke(HInvoke* invoke) {
+ // Ensure that we can load FP arguments from the constant area.
+ for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
+ HConstant* input = invoke->InputAt(i)->AsConstant();
+ if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
+ ReplaceInput(invoke, input, i, true);
+ }
+ }
+ }
+
+ // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
+ // input to the HX86LoadFromConstantTable instructions.
+ HX86ComputeBaseMethodAddress* base_;
+};
+
+void ConstantAreaFixups::Run() {
+ ConstantHandlerVisitor visitor(graph_);
+ visitor.VisitInsertionOrder();
+}
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index b2e222f1a9..2feb75cc9f 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -569,7 +569,7 @@ TEST(ConstantFolding, IntConstantFoldingAndJumps) {
Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 5,
Instruction::GOTO | 4 << 8,
Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 4,
- static_cast<uint16_t>(Instruction::GOTO | -5 << 8),
+ static_cast<uint16_t>(Instruction::GOTO | 0xFFFFFFFB << 8),
Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 8,
Instruction::RETURN | 2 << 8);
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index cf0a4acd4a..2c6a1ef63d 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -140,7 +140,7 @@ TEST(DeadCodeElimination, AdditionsAndInconditionalJumps) {
Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 3,
Instruction::GOTO | 4 << 8,
Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 2,
- static_cast<uint16_t>(Instruction::GOTO | -5 << 8),
+ static_cast<uint16_t>(Instruction::GOTO | 0xFFFFFFFB << 8),
Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 4,
Instruction::RETURN_VOID);
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 56f2718264..aa375f697b 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -34,7 +34,10 @@ TEST(GVNTest, LocalFieldElimination) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
@@ -111,7 +114,10 @@ TEST(GVNTest, GlobalFieldElimination) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
@@ -188,7 +194,10 @@ TEST(GVNTest, LoopFieldElimination) {
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
@@ -328,7 +337,10 @@ TEST(GVNTest, LoopSideEffects) {
inner_loop_body->AddSuccessor(inner_loop_header);
inner_loop_exit->AddSuccessor(outer_loop_header);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimBoolean);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimBoolean);
entry->AddInstruction(parameter);
entry->AddInstruction(new (&allocator) HGoto());
outer_loop_header->AddInstruction(new (&allocator) HIf(parameter));
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index cf0f3493fd..8968a44da8 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -650,8 +650,7 @@ bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
case kCondLE: return lower_value <= upper_value;
case kCondGT: return lower_value > upper_value;
case kCondGE: return lower_value >= upper_value;
- case kCondEQ:
- case kCondNE: LOG(FATAL) << "CONDITION UNREACHABLE";
+ default: LOG(FATAL) << "CONDITION UNREACHABLE";
}
}
return false; // not certain, may be untaken
@@ -680,8 +679,8 @@ bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
(IsIntAndGet(upper_expr, &value) && value >= (min - stride_value - 1));
case kCondGE:
return (IsIntAndGet(upper_expr, &value) && value >= (min - stride_value));
- case kCondEQ:
- case kCondNE: LOG(FATAL) << "CONDITION UNREACHABLE";
+ default:
+ LOG(FATAL) << "CONDITION UNREACHABLE";
}
return false; // not certain, may be infinite
}
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 20492e7152..f16da2a3f7 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -20,7 +20,6 @@
#include "builder.h"
#include "gtest/gtest.h"
#include "induction_var_analysis.h"
-#include "induction_var_range.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
@@ -78,7 +77,8 @@ class InductionVarAnalysisTest : public testing::Test {
graph_->SetExitBlock(exit_);
// Provide entry and exit instructions.
- parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot, true);
+ parameter_ = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimNot, true);
entry_->AddInstruction(parameter_);
constant0_ = graph_->GetIntConstant(0);
constant1_ = graph_->GetIntConstant(1);
@@ -522,36 +522,6 @@ TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
EXPECT_STREQ("periodic(( - (1)), (0))", GetInductionInfo(neg, 0).c_str());
}
-TEST_F(InductionVarAnalysisTest, FindRange) {
- // Setup:
- // for (int i = 0; i < 100; i++) {
- // k = i << 1;
- // k = k + 1;
- // a[k] = 0;
- // }
- BuildLoopNest(1);
- HInstruction *shl = InsertInstruction(
- new (&allocator_) HShl(Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0), constant1_), 0);
- InsertLocalStore(induc_, shl, 0);
- HInstruction *add = InsertInstruction(
- new (&allocator_) HAdd(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
- InsertLocalStore(induc_, add, 0);
- HInstruction* store = InsertArrayStore(induc_, 0);
- PerformInductionVarAnalysis();
-
- EXPECT_STREQ("((2) * i + (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
-
- InductionVarRange range(iva_);
- InductionVarRange::Value v_min = range.GetMinInduction(store, store->InputAt(1));
- InductionVarRange::Value v_max = range.GetMaxInduction(store, store->InputAt(1));
- ASSERT_TRUE(v_min.is_known);
- EXPECT_EQ(0, v_min.a_constant);
- EXPECT_EQ(1, v_min.b_constant);
- ASSERT_TRUE(v_max.is_known);
- EXPECT_EQ(0, v_max.a_constant);
- EXPECT_EQ(199, v_max.b_constant);
-}
-
TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
// Setup:
// k = 0;
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index db12819060..f4842f9696 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -75,6 +75,13 @@ static InductionVarRange::Value SimplifyMax(InductionVarRange::Value v) {
return v;
}
+static HInstruction* Insert(HBasicBlock* preheader, HInstruction* instruction) {
+ DCHECK(preheader != nullptr);
+ DCHECK(instruction != nullptr);
+ preheader->InsertInstructionBefore(instruction, preheader->GetLastInstruction());
+ return instruction;
+}
+
//
// Public class methods.
//
@@ -94,6 +101,21 @@ InductionVarRange::Value InductionVarRange::GetMaxInduction(HInstruction* contex
return SimplifyMax(GetInduction(context, instruction, /* is_min */ false));
}
+bool InductionVarRange::CanGenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ /*out*/bool* top_test) {
+ return GenerateCode(context, instruction, nullptr, nullptr, nullptr, nullptr, top_test);
+}
+
+bool InductionVarRange::GenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper) {
+ return GenerateCode(context, instruction, graph, block, lower, upper, nullptr);
+}
+
//
// Private class methods.
//
@@ -162,15 +184,15 @@ InductionVarRange::Value InductionVarRange::GetVal(HInductionVarAnalysis::Induct
case HInductionVarAnalysis::kFetch:
return GetFetch(info->fetch, trip, in_body, is_min);
case HInductionVarAnalysis::kTripCountInLoop:
- if (!in_body) {
- return is_min ? Value(0)
- : GetVal(info->op_b, trip, in_body, is_min); // one extra!
+ if (!in_body && !is_min) { // one extra!
+ return GetVal(info->op_b, trip, in_body, is_min);
}
FALLTHROUGH_INTENDED;
case HInductionVarAnalysis::kTripCountInBody:
- if (in_body) {
- return is_min ? Value(0)
- : SubValue(GetVal(info->op_b, trip, in_body, is_min), Value(1));
+ if (is_min) {
+ return Value(0);
+ } else if (in_body) {
+ return SubValue(GetVal(info->op_b, trip, in_body, is_min), Value(1));
}
break;
default:
@@ -256,9 +278,11 @@ InductionVarRange::Value InductionVarRange::GetDiv(HInductionVarAnalysis::Induct
bool InductionVarRange::GetConstant(HInductionVarAnalysis::InductionInfo* info, int32_t *value) {
Value v_min = GetVal(info, nullptr, false, /* is_min */ true);
Value v_max = GetVal(info, nullptr, false, /* is_min */ false);
- if (v_min.a_constant == 0 && v_max.a_constant == 0 && v_min.b_constant == v_max.b_constant) {
- *value = v_min.b_constant;
- return true;
+ if (v_min.is_known && v_max.is_known) {
+ if (v_min.a_constant == 0 && v_max.a_constant == 0 && v_min.b_constant == v_max.b_constant) {
+ *value = v_min.b_constant;
+ return true;
+ }
}
return false;
}
@@ -326,4 +350,129 @@ InductionVarRange::Value InductionVarRange::MergeVal(Value v1, Value v2, bool is
return Value();
}
+bool InductionVarRange::GenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper,
+ /*out*/bool* top_test) {
+ HLoopInformation* loop = context->GetBlock()->GetLoopInformation(); // closest enveloping loop
+ if (loop != nullptr) {
+ HBasicBlock* header = loop->GetHeader();
+ bool in_body = context->GetBlock() != header;
+ HInductionVarAnalysis::InductionInfo* info = induction_analysis_->LookupInfo(loop, instruction);
+ HInductionVarAnalysis::InductionInfo* trip =
+ induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
+ if (info != nullptr && trip != nullptr) {
+ if (top_test != nullptr) {
+ *top_test = trip->operation != HInductionVarAnalysis::kTripCountInLoop;
+ }
+ return
+ // Success on lower if invariant (not set), or code can be generated.
+ ((info->induction_class == HInductionVarAnalysis::kInvariant) ||
+ GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) &&
+ // And success on upper.
+ GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
+ }
+ }
+ return false;
+}
+
+bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ HGraph* graph, // when set, code is generated
+ HBasicBlock* block,
+ /*out*/HInstruction** result,
+ bool in_body,
+ bool is_min) {
+ if (info != nullptr) {
+ Primitive::Type type = Primitive::kPrimInt;
+ HInstruction* opa = nullptr;
+ HInstruction* opb = nullptr;
+ int32_t value = 0;
+ switch (info->induction_class) {
+ case HInductionVarAnalysis::kInvariant:
+ // Invariants.
+ switch (info->operation) {
+ case HInductionVarAnalysis::kAdd:
+ if (GenerateCode(info->op_a, trip, graph, block, &opa, in_body, is_min) &&
+ GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block, new (graph->GetArena()) HAdd(type, opa, opb));
+ }
+ return true;
+ }
+ break;
+ case HInductionVarAnalysis::kSub: // second reversed!
+ if (GenerateCode(info->op_a, trip, graph, block, &opa, in_body, is_min) &&
+ GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block, new (graph->GetArena()) HSub(type, opa, opb));
+ }
+ return true;
+ }
+ break;
+ case HInductionVarAnalysis::kNeg: // reversed!
+ if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block, new (graph->GetArena()) HNeg(type, opb));
+ }
+ return true;
+ }
+ break;
+ case HInductionVarAnalysis::kFetch:
+ if (graph != nullptr) {
+ *result = info->fetch; // already in HIR
+ }
+ return true;
+ case HInductionVarAnalysis::kTripCountInLoop:
+ if (!in_body && !is_min) { // one extra!
+ return GenerateCode(info->op_b, trip, graph, block, result, in_body, is_min);
+ }
+ FALLTHROUGH_INTENDED;
+ case HInductionVarAnalysis::kTripCountInBody:
+ if (is_min) {
+ if (graph != nullptr) {
+ *result = graph->GetIntConstant(0);
+ }
+ return true;
+ } else if (in_body) {
+ if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block,
+ new (graph->GetArena())
+ HSub(type, opb, graph->GetIntConstant(1)));
+ }
+ return true;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case HInductionVarAnalysis::kLinear:
+ // Linear induction a * i + b, for normalized 0 <= i < TC. Restrict to unit stride only
+ // to avoid arithmetic wrap-around situations that are hard to guard against.
+ if (GetConstant(info->op_a, &value)) {
+ if (value == 1 || value == -1) {
+ const bool is_min_a = value == 1 ? is_min : !is_min;
+ if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) &&
+ GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block, new (graph->GetArena()) HAdd(type, opa, opb));
+ }
+ return true;
+ }
+ }
+ }
+ break;
+ default: // TODO(ajcbik): add more cases
+ break;
+ }
+ }
+ return false;
+}
+
} // namespace art
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index dbdd2eedac..7fa5a26dce 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -68,6 +68,33 @@ class InductionVarRange {
*/
Value GetMaxInduction(HInstruction* context, HInstruction* instruction);
+ /**
+ * Returns true if range analysis is able to generate code for the lower and upper bound
+ * expressions on the instruction in the given context. Output parameter top_test denotes
+ * whether a top test is needed to protect the trip-count expression evaluation.
+ */
+ bool CanGenerateCode(HInstruction* context, HInstruction* instruction, /*out*/bool* top_test);
+
+ /**
+ * Generates the actual code in the HIR for the lower and upper bound expressions on the
+ * instruction in the given context. Code for the lower and upper bound expression are
+ * generated in given block and graph and are returned in lower and upper, respectively.
+ * For a loop invariant, lower is not set.
+ *
+ * For example, given expression x+i with range [0, 5] for i, calling this method
+ * will generate the following sequence:
+ *
+ * block:
+ * lower: add x, 0
+ * upper: add x, 5
+ */
+ bool GenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper);
+
private:
//
// Private helper methods.
@@ -102,6 +129,27 @@ class InductionVarRange {
static Value DivValue(Value v1, Value v2);
static Value MergeVal(Value v1, Value v2, bool is_min);
+ /**
+ * Generates code for lower/upper expression in the HIR. Returns true on success.
+ * With graph == nullptr, the method can be used to determine if code generation
+ * would be successful without generating actual code yet.
+ */
+ bool GenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper,
+ bool* top_test);
+
+ static bool GenerateCode(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** result,
+ bool in_body,
+ bool is_min);
+
/** Results of prior induction variable analysis. */
HInductionVarAnalysis *induction_analysis_;
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 4497a884d9..8fbc59fb4a 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -49,12 +49,52 @@ class InductionVarRangeTest : public testing::Test {
/** Constructs bare minimum graph. */
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_);
- graph_->AddBlock(entry_block);
- graph_->AddBlock(exit_block);
- graph_->SetEntryBlock(entry_block);
- graph_->SetExitBlock(exit_block);
+ entry_block_ = new (&allocator_) HBasicBlock(graph_);
+ exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry_block_);
+ graph_->AddBlock(exit_block_);
+ graph_->SetEntryBlock(entry_block_);
+ graph_->SetExitBlock(exit_block_);
+ }
+
+ /** Constructs loop with given upper bound. */
+ void BuildLoop(HInstruction* upper) {
+ // Control flow.
+ loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_preheader_);
+ HBasicBlock* loop_header = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_header);
+ HBasicBlock* loop_body = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_body);
+ entry_block_->AddSuccessor(loop_preheader_);
+ loop_preheader_->AddSuccessor(loop_header);
+ loop_header->AddSuccessor(loop_body);
+ loop_header->AddSuccessor(exit_block_);
+ loop_body->AddSuccessor(loop_header);
+ // Instructions.
+ HLocal* induc = new (&allocator_) HLocal(0);
+ entry_block_->AddInstruction(induc);
+ loop_preheader_->AddInstruction(
+ new (&allocator_) HStoreLocal(induc, graph_->GetIntConstant(0))); // i = 0
+ loop_preheader_->AddInstruction(new (&allocator_) HGoto());
+ HInstruction* load = new (&allocator_) HLoadLocal(induc, Primitive::kPrimInt);
+ loop_header->AddInstruction(load);
+ condition_ = new (&allocator_) HLessThan(load, upper);
+ loop_header->AddInstruction(condition_);
+ loop_header->AddInstruction(new (&allocator_) HIf(condition_)); // i < u
+ load = new (&allocator_) HLoadLocal(induc, Primitive::kPrimInt);
+ loop_body->AddInstruction(load);
+ increment_ = new (&allocator_) HAdd(Primitive::kPrimInt, load, graph_->GetIntConstant(1));
+ loop_body->AddInstruction(increment_);
+ loop_body->AddInstruction(new (&allocator_) HStoreLocal(induc, increment_)); // i++
+ loop_body->AddInstruction(new (&allocator_) HGoto());
+ exit_block_->AddInstruction(new (&allocator_) HReturnVoid());
+ }
+
+ /** Performs induction variable analysis. */
+ void PerformInductionVarAnalysis() {
+ ASSERT_TRUE(graph_->TryBuildingSsa());
+ iva_->Run();
}
/** Constructs an invariant. */
@@ -146,15 +186,20 @@ class InductionVarRangeTest : public testing::Test {
ArenaPool pool_;
ArenaAllocator allocator_;
HGraph* graph_;
+ HBasicBlock* entry_block_;
+ HBasicBlock* exit_block_;
+ HBasicBlock* loop_preheader_;
HInductionVarAnalysis* iva_;
- // Two dummy instructions.
+ // Instructions.
+ HInstruction* condition_;
+ HInstruction* increment_;
HReturnVoid x_;
HReturnVoid y_;
};
//
-// The actual InductionVarRange tests.
+// Tests on static methods.
//
TEST_F(InductionVarRangeTest, GetMinMaxNull) {
@@ -349,4 +394,83 @@ TEST_F(InductionVarRangeTest, MaxValue) {
ExpectEqual(Value(), MaxValue(Value(55), Value(&y_, 1, -50)));
}
+//
+// Tests on instance methods.
+//
+
+TEST_F(InductionVarRangeTest, FindRangeConstantTripCount) {
+ BuildLoop(graph_->GetIntConstant(1000));
+ PerformInductionVarAnalysis();
+ InductionVarRange range(iva_);
+
+ // In context of header: known.
+ ExpectEqual(Value(0), range.GetMinInduction(condition_, condition_->InputAt(0)));
+ ExpectEqual(Value(1000), range.GetMaxInduction(condition_, condition_->InputAt(0)));
+
+ // In context of loop-body: known.
+ ExpectEqual(Value(0), range.GetMinInduction(increment_, condition_->InputAt(0)));
+ ExpectEqual(Value(999), range.GetMaxInduction(increment_, condition_->InputAt(0)));
+ ExpectEqual(Value(1), range.GetMinInduction(increment_, increment_));
+ ExpectEqual(Value(1000), range.GetMaxInduction(increment_, increment_));
+}
+
+TEST_F(InductionVarRangeTest, FindRangeSymbolicTripCount) {
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ entry_block_->AddInstruction(parameter);
+ BuildLoop(parameter);
+ PerformInductionVarAnalysis();
+ InductionVarRange range(iva_);
+
+ // In context of header: full range unknown.
+ ExpectEqual(Value(0), range.GetMinInduction(condition_, condition_->InputAt(0)));
+ ExpectEqual(Value(), range.GetMaxInduction(condition_, condition_->InputAt(0)));
+
+ // In context of loop-body: known.
+ ExpectEqual(Value(0), range.GetMinInduction(increment_, condition_->InputAt(0)));
+ ExpectEqual(Value(parameter, 1, -1), range.GetMaxInduction(increment_, condition_->InputAt(0)));
+ ExpectEqual(Value(1), range.GetMinInduction(increment_, increment_));
+ ExpectEqual(Value(parameter, 1, 0), range.GetMaxInduction(increment_, increment_));
+}
+
+TEST_F(InductionVarRangeTest, CodeGeneration) {
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ entry_block_->AddInstruction(parameter);
+ BuildLoop(parameter);
+ PerformInductionVarAnalysis();
+ InductionVarRange range(iva_);
+
+ HInstruction* lower = nullptr;
+ HInstruction* upper = nullptr;
+ bool top_test = false;
+
+ // Can generate code in context of loop-body only.
+ EXPECT_FALSE(range.CanGenerateCode(condition_, condition_->InputAt(0), &top_test));
+ ASSERT_TRUE(range.CanGenerateCode(increment_, condition_->InputAt(0), &top_test));
+ EXPECT_TRUE(top_test);
+
+ // Generates code.
+ EXPECT_TRUE(range.GenerateCode(
+ increment_, condition_->InputAt(0), graph_, loop_preheader_, &lower, &upper));
+
+ // Verify lower is 0+0.
+ ASSERT_TRUE(lower != nullptr);
+ ASSERT_TRUE(lower->IsAdd());
+ ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
+ EXPECT_EQ(0, lower->InputAt(0)->AsIntConstant()->GetValue());
+ ASSERT_TRUE(lower->InputAt(1)->IsIntConstant());
+ EXPECT_EQ(0, lower->InputAt(1)->AsIntConstant()->GetValue());
+
+ // Verify upper is (V-1)+0
+ ASSERT_TRUE(upper != nullptr);
+ ASSERT_TRUE(upper->IsAdd());
+ ASSERT_TRUE(upper->InputAt(0)->IsSub());
+ EXPECT_TRUE(upper->InputAt(0)->InputAt(0)->IsParameterValue());
+ ASSERT_TRUE(upper->InputAt(0)->InputAt(1)->IsIntConstant());
+ EXPECT_EQ(1, upper->InputAt(0)->InputAt(1)->AsIntConstant()->GetValue());
+ ASSERT_TRUE(upper->InputAt(1)->IsIntConstant());
+ EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
+}
+
} // namespace art
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index f3b5f08c7e..e2aca3091f 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -495,6 +495,9 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
number_of_inlined_instructions_ += number_of_instructions;
HInstruction* return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+ if (return_replacement != nullptr) {
+ DCHECK_EQ(graph_, return_replacement->GetBlock()->GetGraph());
+ }
// When merging the graph we might create a new NullConstant in the caller graph which does
// not have the chance to be typed. We assign the correct type here so that we can keep the
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 839cf44632..b97dc1a511 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -73,6 +73,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
void VisitFakeString(HFakeString* fake_string) OVERRIDE;
void VisitInvoke(HInvoke* invoke) OVERRIDE;
+ void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const;
@@ -618,13 +619,15 @@ void InstructionSimplifierVisitor::VisitLessThanOrEqual(HLessThanOrEqual* condit
VisitCondition(condition);
}
+// TODO: unsigned comparisons too?
+
void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
// Try to fold an HCompare into this HCondition.
// This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS64.
+ // TODO: Implement it for MIPS and MIPS64.
InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- if (instruction_set == kMips64) {
+ if (instruction_set == kMips || instruction_set == kMips64) {
return;
}
@@ -1149,4 +1152,16 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
}
}
+void InstructionSimplifierVisitor::VisitDeoptimize(HDeoptimize* deoptimize) {
+ HInstruction* cond = deoptimize->InputAt(0);
+ if (cond->IsConstant()) {
+ if (cond->AsIntConstant()->IsZero()) {
+ // Never deopt: instruction can be removed.
+ deoptimize->GetBlock()->RemoveInstruction(deoptimize);
+ } else {
+ // Always deopt.
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 764a11475f..56c4177b29 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -43,6 +43,93 @@ ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() {
return codegen_->GetGraph()->GetArena();
}
+#define __ codegen->GetAssembler()->
+
+static void MoveFromReturnRegister(Location trg,
+ Primitive::Type type,
+ CodeGeneratorMIPS64* codegen) {
+ if (!trg.IsValid()) {
+ DCHECK_EQ(type, Primitive::kPrimVoid);
+ return;
+ }
+
+ DCHECK_NE(type, Primitive::kPrimVoid);
+
+ if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
+ GpuRegister trg_reg = trg.AsRegister<GpuRegister>();
+ if (trg_reg != V0) {
+ __ Move(V0, trg_reg);
+ }
+ } else {
+ FpuRegister trg_reg = trg.AsFpuRegister<FpuRegister>();
+ if (trg_reg != F0) {
+ if (type == Primitive::kPrimFloat) {
+ __ MovS(F0, trg_reg);
+ } else {
+ __ MovD(F0, trg_reg);
+ }
+ }
+ }
+}
+
+static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
+ InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
+}
+
+// Slow-path for fallback (calling the managed code to handle the
+// intrinsic) in an intrinsified call. This will copy the arguments
+// into the positions for a regular call.
+//
+// Note: The actual parameters are required to be in the locations
+// given by the invoke's location summary. If an intrinsic
+// modifies those locations before a slowpath call, they must be
+// restored!
+class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit IntrinsicSlowPathMIPS64(HInvoke* invoke) : invoke_(invoke) { }
+
+ void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
+
+ __ Bind(GetEntryLabel());
+
+ SaveLiveRegisters(codegen, invoke_->GetLocations());
+
+ MoveArguments(invoke_, codegen);
+
+ if (invoke_->IsInvokeStaticOrDirect()) {
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+ Location::RegisterLocation(A0));
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
+ } else {
+ UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
+ UNREACHABLE();
+ }
+
+ // Copy the result back to the expected output.
+ Location out = invoke_->GetLocations()->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory.
+ DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ MoveFromReturnRegister(out, invoke_->GetType(), codegen);
+ }
+
+ RestoreLiveRegisters(codegen, invoke_->GetLocations());
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; }
+
+ private:
+ // The instruction where this slow path is happening.
+ HInvoke* const invoke_;
+
+ DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS64);
+};
+
+#undef __
+
bool IntrinsicLocationsBuilderMIPS64::TryDispatch(HInvoke* invoke) {
Dispatch(invoke);
LocationSummary* res = invoke->GetLocations();
@@ -185,7 +272,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
}
-static void GenCountZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+static void GenNumberOfLeadingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
@@ -202,7 +289,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke*
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenCountZeroes(invoke->GetLocations(), false, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), false, GetAssembler());
}
// int java.lang.Long.numberOfLeadingZeros(long i)
@@ -211,7 +298,166 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* inv
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenCountZeroes(invoke->GetLocations(), true, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), true, GetAssembler());
+}
+
+static void GenNumberOfTrailingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+
+ if (is64bit) {
+ __ Dsbh(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>());
+ __ Dshd(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Dbitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Dclz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ } else {
+ __ Rotr(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>(), 16);
+ __ Wsbh(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Bitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Clz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ }
+}
+
+// int java.lang.Integer.numberOfTrailingZeros(int i)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), false, GetAssembler());
+}
+
+// int java.lang.Long.numberOfTrailingZeros(long i)
+void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), true, GetAssembler());
+}
+
+static void GenRotateRight(HInvoke* invoke,
+ Primitive::Type type,
+ Mips64Assembler* assembler) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ LocationSummary* locations = invoke->GetLocations();
+ GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ uint32_t shift = static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
+ if (type == Primitive::kPrimInt) {
+ shift &= 0x1f;
+ __ Rotr(out, in, shift);
+ } else {
+ shift &= 0x3f;
+ if (shift < 32) {
+ __ Drotr(out, in, shift);
+ } else {
+ shift &= 0x1f;
+ __ Drotr32(out, in, shift);
+ }
+ }
+ } else {
+ GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimInt) {
+ __ Rotrv(out, in, shamt);
+ } else {
+ __ Drotrv(out, in, shamt);
+ }
+ }
+}
+
+// int java.lang.Integer.rotateRight(int i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler());
+}
+
+// int java.lang.Long.rotateRight(long i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitLongRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke, Primitive::kPrimLong, GetAssembler());
+}
+
+static void GenRotateLeft(HInvoke* invoke,
+ Primitive::Type type,
+ Mips64Assembler* assembler) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ LocationSummary* locations = invoke->GetLocations();
+ GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ int32_t shift = -static_cast<int32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
+ if (type == Primitive::kPrimInt) {
+ shift &= 0x1f;
+ __ Rotr(out, in, shift);
+ } else {
+ shift &= 0x3f;
+ if (shift < 32) {
+ __ Drotr(out, in, shift);
+ } else {
+ shift &= 0x1f;
+ __ Drotr32(out, in, shift);
+ }
+ }
+ } else {
+ GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimInt) {
+ __ Subu(TMP, ZERO, shamt);
+ __ Rotrv(out, in, TMP);
+ } else {
+ __ Dsubu(TMP, ZERO, shamt);
+ __ Drotrv(out, in, TMP);
+ }
+ }
+}
+
+// int java.lang.Integer.rotateLeft(int i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) {
+ GenRotateLeft(invoke, Primitive::kPrimInt, GetAssembler());
+}
+
+// int java.lang.Long.rotateLeft(long i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
+ GenRotateLeft(invoke, Primitive::kPrimLong, GetAssembler());
}
static void GenReverse(LocationSummary* locations,
@@ -765,6 +1011,505 @@ void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
Thread::PeerOffset<kMips64PointerSize>().Int32Value());
}
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+static void GenUnsafeGet(HInvoke* invoke,
+ Primitive::Type type,
+ bool is_volatile,
+ CodeGeneratorMIPS64* codegen) {
+ LocationSummary* locations = invoke->GetLocations();
+ DCHECK((type == Primitive::kPrimInt) ||
+ (type == Primitive::kPrimLong) ||
+ (type == Primitive::kPrimNot));
+ Mips64Assembler* assembler = codegen->GetAssembler();
+ // Object pointer.
+ GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
+ // Long offset.
+ GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
+ GpuRegister trg = locations->Out().AsRegister<GpuRegister>();
+
+ __ Daddu(TMP, base, offset);
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ switch (type) {
+ case Primitive::kPrimInt:
+ __ Lw(trg, TMP, 0);
+ break;
+
+ case Primitive::kPrimNot:
+ __ Lwu(trg, TMP, 0);
+ break;
+
+ case Primitive::kPrimLong:
+ __ Ld(trg, TMP, 0);
+ break;
+
+ default:
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
+ }
+}
+
+// int sun.misc.Unsafe.getInt(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimInt, false, codegen_);
+}
+
+// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimInt, true, codegen_);
+}
+
+// long sun.misc.Unsafe.getLong(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimLong, false, codegen_);
+}
+
+// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimLong, true, codegen_);
+}
+
+// Object sun.misc.Unsafe.getObject(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimNot, false, codegen_);
+}
+
+// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimNot, true, codegen_);
+}
+
+static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+}
+
+static void GenUnsafePut(LocationSummary* locations,
+ Primitive::Type type,
+ bool is_volatile,
+ bool is_ordered,
+ CodeGeneratorMIPS64* codegen) {
+ DCHECK((type == Primitive::kPrimInt) ||
+ (type == Primitive::kPrimLong) ||
+ (type == Primitive::kPrimNot));
+ Mips64Assembler* assembler = codegen->GetAssembler();
+ // Object pointer.
+ GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
+ // Long offset.
+ GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
+ GpuRegister value = locations->InAt(3).AsRegister<GpuRegister>();
+
+ __ Daddu(TMP, base, offset);
+ if (is_volatile || is_ordered) {
+ __ Sync(0);
+ }
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ __ Sw(value, TMP, 0);
+ break;
+
+ case Primitive::kPrimLong:
+ __ Sd(value, TMP, 0);
+ break;
+
+ default:
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
+ }
+ if (is_volatile) {
+ __ Sync(0);
+ }
+
+ if (type == Primitive::kPrimNot) {
+ codegen->MarkGCCard(base, value);
+ }
+}
+
+// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, true, codegen_);
+}
+
+// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, true, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, true, codegen_);
+}
+
+// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, true, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, true, codegen_);
+}
+
+// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, false, codegen_);
+}
+
+// char java.lang.String.charAt(int index)
+void IntrinsicLocationsBuilderMIPS64::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Mips64Assembler* assembler = GetAssembler();
+
+ // Location of reference to data array
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count
+ const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister idx = locations->InAt(1).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ // TODO: Maybe we can support range check elimination. Overall,
+ // though, I think it's not worth the cost.
+ // TODO: For simplicity, the index parameter is requested in a
+ // register, so different from Quick we will not optimize the
+ // code for constants (which would save a register).
+
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load the string size
+ __ Lw(TMP, obj, count_offset);
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Revert to slow path if idx is too large, or negative
+ __ Bgeuc(idx, TMP, slow_path->GetEntryLabel());
+
+ // out = obj[2*idx].
+ __ Sll(TMP, idx, 1); // idx * 2
+ __ Daddu(TMP, TMP, obj); // Address of char at location idx
+ __ Lhu(out, TMP, value_offset); // Load char at location idx
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// int java.lang.String.compareTo(String anotherString)
+void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(argument, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize,
+ pStringCompareTo).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
+static void GenerateStringIndexOf(HInvoke* invoke,
+ Mips64Assembler* assembler,
+ CodeGeneratorMIPS64* codegen,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = invoke->GetLocations();
+ GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP;
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check for code points > 0xFFFF. Either a slow-path check when we
+ // don't know statically, or directly dispatch if we have a constant.
+ SlowPathCodeMIPS64* slow_path = nullptr;
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) {
+ // Always needs the slow-path. We could directly dispatch to it,
+ // but this case should be rare, so for simplicity just put the
+ // full slow-path down and branch unconditionally.
+ slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ } else {
+ GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
+ __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
+ slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel()); // UTF-16 required
+ }
+
+ if (start_at_zero) {
+ DCHECK_EQ(tmp_reg, A2);
+ // Start-index = 0.
+ __ Clear(tmp_reg);
+ } else {
+ __ Slt(TMP, A2, ZERO); // if fromIndex < 0
+ __ Seleqz(A2, A2, TMP); // fromIndex = 0
+ }
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pIndexOf).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+// int java.lang.String.indexOf(int ch)
+void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+
+ // Need a temp for slow-path codepoint compare, and need to send start-index=0.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true);
+}
+
+// int java.lang.String.indexOf(int ch, int fromIndex)
+void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false);
+}
+
+// java.lang.String.String(byte[] bytes)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(byte_array, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromBytes).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// java.lang.String.String(char[] value)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromChars).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+}
+
+// java.lang.String.String(String original)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(string_to_copy, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromString).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -776,38 +1521,10 @@ void IntrinsicCodeGeneratorMIPS64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSE
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(UnsafeGet)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLong)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLongVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObject)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePut)
-UNIMPLEMENTED_INTRINSIC(UnsafePutOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObject)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLong)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongVolatile)
UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
-UNIMPLEMENTED_INTRINSIC(StringCharAt)
-UNIMPLEMENTED_INTRINSIC(StringCompareTo)
UNIMPLEMENTED_INTRINSIC(StringEquals)
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromBytes)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromChars)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromString)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index e83aebb5be..8a7aded935 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1837,6 +1837,14 @@ void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
+ // The UnsafeCASObject intrinsic does not always work when heap
+ // poisoning is enabled (it breaks several libcore tests); turn it
+ // off temporarily as a quick fix.
+ // TODO(rpl): Fix it and turn it back on.
+ if (kPoisonHeapReferences) {
+ return;
+ }
+
CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index e0d88a91d3..7a1d92d2fe 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1909,6 +1909,14 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASLong(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
+ // The UnsafeCASObject intrinsic does not always work when heap
+ // poisoning is enabled (it breaks several libcore tests); turn it
+ // off temporarily as a quick fix.
+ // TODO(rpl): Fix it and turn it back on.
+ if (kPoisonHeapReferences) {
+ return;
+ }
+
CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
}
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 558892d01c..a036bd5aa9 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -61,7 +61,7 @@ class LICMTest : public testing::Test {
loop_body_->AddSuccessor(loop_header_);
// Provide boiler-plate instructions.
- parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry_->AddInstruction(parameter_);
constant_ = graph_->GetIntConstant(42);
loop_preheader_->AddInstruction(new (&allocator_) HGoto());
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 24a89bca4e..98c3096cae 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -606,8 +606,23 @@ static void UpdateInputsUsers(HInstruction* instruction) {
void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement) {
DCHECK(initial->GetBlock() == this);
- InsertInstructionBefore(replacement, initial);
- initial->ReplaceWith(replacement);
+ if (initial->IsControlFlow()) {
+ // We can only replace a control flow instruction with another control flow instruction.
+ DCHECK(replacement->IsControlFlow());
+ DCHECK_EQ(replacement->GetId(), -1);
+ DCHECK_EQ(replacement->GetType(), Primitive::kPrimVoid);
+ DCHECK_EQ(initial->GetBlock(), this);
+ DCHECK_EQ(initial->GetType(), Primitive::kPrimVoid);
+ DCHECK(initial->GetUses().IsEmpty());
+ DCHECK(initial->GetEnvUses().IsEmpty());
+ replacement->SetBlock(this);
+ replacement->SetId(GetGraph()->GetNextInstructionId());
+ instructions_.InsertInstructionBefore(replacement, initial);
+ UpdateInputsUsers(replacement);
+ } else {
+ InsertInstructionBefore(replacement, initial);
+ initial->ReplaceWith(replacement);
+ }
RemoveInstruction(initial);
}
@@ -1576,7 +1591,6 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// Replace the invoke with the return value of the inlined graph.
if (last->IsReturn()) {
return_value = last->InputAt(0);
- invoke->ReplaceWith(return_value);
} else {
DCHECK(last->IsReturnVoid());
}
@@ -1624,10 +1638,6 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
}
}
- if (return_value != nullptr) {
- invoke->ReplaceWith(return_value);
- }
-
// Update the meta information surrounding blocks:
// (1) the graph they are now in,
// (2) the reverse post order of that graph,
@@ -1697,20 +1707,21 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
size_t parameter_index = 0;
for (HInstructionIterator it(entry_block_->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
+ HInstruction* replacement = nullptr;
if (current->IsNullConstant()) {
- current->ReplaceWith(outer_graph->GetNullConstant(current->GetDexPc()));
+ replacement = outer_graph->GetNullConstant(current->GetDexPc());
} else if (current->IsIntConstant()) {
- current->ReplaceWith(outer_graph->GetIntConstant(
- current->AsIntConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetIntConstant(
+ current->AsIntConstant()->GetValue(), current->GetDexPc());
} else if (current->IsLongConstant()) {
- current->ReplaceWith(outer_graph->GetLongConstant(
- current->AsLongConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetLongConstant(
+ current->AsLongConstant()->GetValue(), current->GetDexPc());
} else if (current->IsFloatConstant()) {
- current->ReplaceWith(outer_graph->GetFloatConstant(
- current->AsFloatConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetFloatConstant(
+ current->AsFloatConstant()->GetValue(), current->GetDexPc());
} else if (current->IsDoubleConstant()) {
- current->ReplaceWith(outer_graph->GetDoubleConstant(
- current->AsDoubleConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetDoubleConstant(
+ current->AsDoubleConstant()->GetValue(), current->GetDexPc());
} else if (current->IsParameterValue()) {
if (kIsDebugBuild
&& invoke->IsInvokeStaticOrDirect()
@@ -1720,13 +1731,25 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
size_t last_input_index = invoke->InputCount() - 1;
DCHECK(parameter_index != last_input_index);
}
- current->ReplaceWith(invoke->InputAt(parameter_index++));
+ replacement = invoke->InputAt(parameter_index++);
} else if (current->IsCurrentMethod()) {
- current->ReplaceWith(outer_graph->GetCurrentMethod());
+ replacement = outer_graph->GetCurrentMethod();
} else {
DCHECK(current->IsGoto() || current->IsSuspendCheck());
entry_block_->RemoveInstruction(current);
}
+ if (replacement != nullptr) {
+ current->ReplaceWith(replacement);
+ // If the current is the return value then we need to update the latter.
+ if (current == return_value) {
+ DCHECK_EQ(entry_block_, return_value->GetBlock());
+ return_value = replacement;
+ }
+ }
+ }
+
+ if (return_value != nullptr) {
+ invoke->ReplaceWith(return_value);
}
// Finally remove the invoke from the caller.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 82909c41b6..7cf6339b6e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -81,12 +81,19 @@ static constexpr InvokeType kInvalidInvokeType = static_cast<InvokeType>(-1);
static constexpr uint32_t kNoDexPc = -1;
enum IfCondition {
- kCondEQ,
- kCondNE,
- kCondLT,
- kCondLE,
- kCondGT,
- kCondGE,
+ // All types.
+ kCondEQ, // ==
+ kCondNE, // !=
+ // Signed integers and floating-point numbers.
+ kCondLT, // <
+ kCondLE, // <=
+ kCondGT, // >
+ kCondGE, // >=
+ // Unsigned integers.
+ kCondB, // <
+ kCondBE, // <=
+ kCondA, // >
+ kCondAE, // >=
};
class HInstructionList : public ValueObject {
@@ -988,11 +995,15 @@ class HLoopInformationOutwardIterator : public ValueObject {
};
#define FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
+ M(Above, Condition) \
+ M(AboveOrEqual, Condition) \
M(Add, BinaryOperation) \
M(And, BinaryOperation) \
M(ArrayGet, Instruction) \
M(ArrayLength, Instruction) \
M(ArraySet, Instruction) \
+ M(Below, Condition) \
+ M(BelowOrEqual, Condition) \
M(BooleanNot, UnaryOperation) \
M(BoundsCheck, Instruction) \
M(BoundType, Instruction) \
@@ -1070,11 +1081,14 @@ class HLoopInformationOutwardIterator : public ValueObject {
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
+#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)
+
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
M(X86ComputeBaseMethodAddress, Instruction) \
- M(X86LoadFromConstantTable, Instruction)
+ M(X86LoadFromConstantTable, Instruction) \
+ M(X86PackedSwitch, Instruction)
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1082,6 +1096,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
FOR_EACH_CONCRETE_INSTRUCTION_ARM(M) \
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \
FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M) \
FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1787,8 +1802,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return true;
}
- virtual bool CanDoImplicitNullCheckOn(HInstruction* obj) const {
- UNUSED(obj);
+ virtual bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const {
return false;
}
@@ -1905,16 +1919,14 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
- virtual bool InstructionTypeEquals(HInstruction* other) const {
- UNUSED(other);
+ virtual bool InstructionTypeEquals(HInstruction* other ATTRIBUTE_UNUSED) const {
return false;
}
// Returns whether any data encoded in the two instructions is equal.
// This method does not look at the inputs. Both instructions must be
// of the same type, otherwise the method has undefined behavior.
- virtual bool InstructionDataEquals(HInstruction* other) const {
- UNUSED(other);
+ virtual bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const {
return false;
}
@@ -2477,8 +2489,7 @@ class HUnaryOperation : public HExpression<1> {
Primitive::Type GetResultType() const { return GetType(); }
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -2548,8 +2559,7 @@ class HBinaryOperation : public HExpression<2> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -2653,8 +2663,6 @@ class HEqual : public HCondition {
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> bool Compute(T x, T y) const { return x == y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2675,6 +2683,8 @@ class HEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x == y; }
+
DISALLOW_COPY_AND_ASSIGN(HEqual);
};
@@ -2685,8 +2695,6 @@ class HNotEqual : public HCondition {
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> bool Compute(T x, T y) const { return x != y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2707,6 +2715,8 @@ class HNotEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x != y; }
+
DISALLOW_COPY_AND_ASSIGN(HNotEqual);
};
@@ -2715,8 +2725,6 @@ class HLessThan : public HCondition {
HLessThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x < y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2737,6 +2745,8 @@ class HLessThan : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x < y; }
+
DISALLOW_COPY_AND_ASSIGN(HLessThan);
};
@@ -2745,8 +2755,6 @@ class HLessThanOrEqual : public HCondition {
HLessThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x <= y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2767,6 +2775,8 @@ class HLessThanOrEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x <= y; }
+
DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual);
};
@@ -2775,8 +2785,6 @@ class HGreaterThan : public HCondition {
HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x > y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2797,6 +2805,8 @@ class HGreaterThan : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x > y; }
+
DISALLOW_COPY_AND_ASSIGN(HGreaterThan);
};
@@ -2805,8 +2815,6 @@ class HGreaterThanOrEqual : public HCondition {
HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x >= y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2827,9 +2835,138 @@ class HGreaterThanOrEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x >= y; }
+
DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual);
};
+class HBelow : public HCondition {
+ public:
+ HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(Below);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondB;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondAE;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x < y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HBelow);
+};
+
+class HBelowOrEqual : public HCondition {
+ public:
+ HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(BelowOrEqual);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondBE;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondA;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x <= y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HBelowOrEqual);
+};
+
+class HAbove : public HCondition {
+ public:
+ HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(Above);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondA;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondBE;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x > y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HAbove);
+};
+
+class HAboveOrEqual : public HCondition {
+ public:
+ HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(AboveOrEqual);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondAE;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondB;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x >= y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HAboveOrEqual);
+};
// Instruction to check how two inputs compare to each other.
// Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
@@ -3253,8 +3390,7 @@ class HInvokeStaticOrDirect : public HInvoke {
target_method_(target_method),
dispatch_info_(dispatch_info) {}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
return false;
@@ -3692,8 +3828,7 @@ class HDivZeroCheck : public HExpression<1> {
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -3927,24 +4062,31 @@ class HXor : public HBinaryOperation {
// the calling convention.
class HParameterValue : public HExpression<0> {
public:
- HParameterValue(uint8_t index,
+ HParameterValue(const DexFile& dex_file,
+ uint16_t type_index,
+ uint8_t index,
Primitive::Type parameter_type,
bool is_this = false)
: HExpression(parameter_type, SideEffects::None(), kNoDexPc),
+ dex_file_(dex_file),
+ type_index_(type_index),
index_(index),
is_this_(is_this),
can_be_null_(!is_this) {}
+ const DexFile& GetDexFile() const { return dex_file_; }
+ uint16_t GetTypeIndex() const { return type_index_; }
uint8_t GetIndex() const { return index_; }
+ bool IsThis() const { return is_this_; }
bool CanBeNull() const OVERRIDE { return can_be_null_; }
void SetCanBeNull(bool can_be_null) { can_be_null_ = can_be_null; }
- bool IsThis() const { return is_this_; }
-
DECLARE_INSTRUCTION(ParameterValue);
private:
+ const DexFile& dex_file_;
+ const uint16_t type_index_;
// The index of this parameter in the parameters list. Must be less
// than HGraph::number_of_in_vregs_.
const uint8_t index_;
@@ -3963,8 +4105,7 @@ class HNot : public HUnaryOperation {
: HUnaryOperation(result_type, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -3989,8 +4130,7 @@ class HBooleanNot : public HUnaryOperation {
: HUnaryOperation(Primitive::Type::kPrimBoolean, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4028,7 +4168,7 @@ class HTypeConversion : public HExpression<1> {
Primitive::Type GetInputType() const { return GetInput()->GetType(); }
Primitive::Type GetResultType() const { return GetType(); }
- // Required by the x86 and ARM code generators when producing calls
+ // Required by the x86, ARM, MIPS and MIPS64 code generators when producing calls
// to the runtime.
bool CanBeMoved() const OVERRIDE { return true; }
@@ -4156,8 +4296,7 @@ class HNullCheck : public HExpression<1> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4302,12 +4441,10 @@ class HArrayGet : public HExpression<2> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// TODO: We can be smarter here.
// Currently, the array access is always preceded by an ArrayLength or a NullCheck
// which generates the implicit null check. There are cases when these can be removed
@@ -4355,8 +4492,7 @@ class HArraySet : public HTemplateInstruction<3> {
// Can throw ArrayStoreException.
bool CanThrow() const OVERRIDE { return needs_type_check_; }
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// TODO: Same as for ArrayGet.
return false;
}
@@ -4419,8 +4555,7 @@ class HArrayLength : public HExpression<1> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
@@ -4443,8 +4578,7 @@ class HBoundsCheck : public HExpression<2> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4658,8 +4792,7 @@ class HClinitCheck : public HExpression<1> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4689,7 +4822,8 @@ class HStaticFieldGet : public HExpression<1> {
uint32_t dex_pc)
: HExpression(
field_type,
- SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
+ SideEffects::FieldReadOfType(field_type, is_volatile),
+ dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
SetRawInputAt(0, cls);
}
@@ -4731,7 +4865,8 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HTemplateInstruction(
- SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
+ SideEffects::FieldWriteOfType(field_type, is_volatile),
+ dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
value_can_be_null_(true) {
SetRawInputAt(0, cls);
@@ -5277,7 +5412,7 @@ class HGraphVisitor : public ValueObject {
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
virtual ~HGraphVisitor() {}
- virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); }
+ virtual void VisitInstruction(HInstruction* instruction ATTRIBUTE_UNUSED) {}
virtual void VisitBasicBlock(HBasicBlock* block);
// Visit the graph following basic block insertion order.
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index 8eeac56ceb..764f5fec5b 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -34,7 +34,8 @@ TEST(Node, RemoveInstruction) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
entry->AddInstruction(new (&allocator) HGoto());
@@ -76,8 +77,10 @@ TEST(Node, InsertInstruction) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
- HInstruction* parameter2 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter1 = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
+ HInstruction* parameter2 = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
entry->AddInstruction(new (&allocator) HExit());
@@ -102,7 +105,8 @@ TEST(Node, AddInstruction) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
ASSERT_FALSE(parameter->HasUses());
@@ -122,7 +126,8 @@ TEST(Node, ParentEnvironment) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter1 = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
HInstruction* with_environment = new (&allocator) HNullCheck(parameter1, 0);
entry->AddInstruction(parameter1);
entry->AddInstruction(with_environment);
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index f7cc872419..556217bf74 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -62,6 +62,45 @@ class HX86LoadFromConstantTable : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
};
+// X86 version of HPackedSwitch that holds a pointer to the base method address.
+class HX86PackedSwitch : public HTemplateInstruction<2> {
+ public:
+ HX86PackedSwitch(int32_t start_value,
+ int32_t num_entries,
+ HInstruction* input,
+ HX86ComputeBaseMethodAddress* method_base,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc),
+ start_value_(start_value),
+ num_entries_(num_entries) {
+ SetRawInputAt(0, input);
+ SetRawInputAt(1, method_base);
+ }
+
+ bool IsControlFlow() const OVERRIDE { return true; }
+
+ int32_t GetStartValue() const { return start_value_; }
+
+ int32_t GetNumEntries() const { return num_entries_; }
+
+ HX86ComputeBaseMethodAddress* GetBaseMethodAddress() const {
+ return InputAt(1)->AsX86ComputeBaseMethodAddress();
+ }
+
+ HBasicBlock* GetDefaultBlock() const {
+ // Last entry is the default block.
+ return GetBlock()->GetSuccessors()[num_entries_];
+ }
+
+ DECLARE_INSTRUCTION(X86PackedSwitch);
+
+ private:
+ const int32_t start_value_;
+ const int32_t num_entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HX86PackedSwitch);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index c7f08066d4..8d7b8a94b7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -361,6 +361,7 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) {
return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat)
|| instruction_set == kArm64
|| (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
+ || instruction_set == kMips
|| instruction_set == kMips64
|| instruction_set == kX86
|| instruction_set == kX86_64;
@@ -666,7 +667,6 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
jobject class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
- UNUSED(invoke_type);
std::string method_name = PrettyMethod(method_idx, dex_file);
MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
@@ -839,18 +839,26 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
Handle<mirror::DexCache> dex_cache) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
CompiledMethod* method = nullptr;
- const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
- DCHECK(!verified_method->HasRuntimeThrow());
- if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
- || CanHandleVerificationFailure(verified_method)) {
- method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
- method_idx, jclass_loader, dex_file, dex_cache);
- } else {
- if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
+ if (Runtime::Current()->IsAotCompiler()) {
+ const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
+ DCHECK(!verified_method->HasRuntimeThrow());
+ if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
+ || CanHandleVerificationFailure(verified_method)) {
+ method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, jclass_loader, dex_file, dex_cache);
} else {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified);
+ if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
+ } else {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified);
+ }
}
+ } else {
+ // This is for the JIT compiler, which has already ensured the class is verified.
+ // We can go straight to compiling.
+ DCHECK(Runtime::Current()->UseJit());
+ method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, jclass_loader, dex_file, dex_cache);
}
if (kIsDebugBuild &&
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f7a7e420bb..26a05da4cb 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -428,12 +428,21 @@ void RTPVisitor::VisitNewArray(HNewArray* instr) {
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
+static mirror::Class* GetClassFromDexCache(Thread* self, const DexFile& dex_file, uint16_t type_idx)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::DexCache* dex_cache =
+ Runtime::Current()->GetClassLinker()->FindDexCache(self, dex_file, false);
+ // Get type from dex cache assuming it was populated by the verifier.
+ return dex_cache->GetResolvedType(type_idx);
+}
+
void RTPVisitor::VisitParameterValue(HParameterValue* instr) {
ScopedObjectAccess soa(Thread::Current());
// We check if the existing type is valid: the inliner may have set it.
if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) {
- // TODO: parse the signature and add precise types for the parameters.
- SetClassAsTypeInfo(instr, nullptr, /* is_exact */ false);
+ mirror::Class* resolved_class =
+ GetClassFromDexCache(soa.Self(), instr->GetDexFile(), instr->GetTypeIndex());
+ SetClassAsTypeInfo(instr, resolved_class, /* is_exact */ false);
}
}
@@ -479,11 +488,9 @@ void RTPVisitor::VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr)
void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache =
- Runtime::Current()->GetClassLinker()->FindDexCache(soa.Self(), instr->GetDexFile(), false);
// Get type from dex cache assuming it was populated by the verifier.
- mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex());
- // TODO: investigating why we are still getting unresolved classes: b/22821472.
+ mirror::Class* resolved_class =
+ GetClassFromDexCache(soa.Self(), instr->GetDexFile(), instr->GetTypeIndex());
if (resolved_class != nullptr) {
instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
handles_->NewHandle(resolved_class), /* is_exact */ true));
@@ -756,7 +763,9 @@ void ReferenceTypePropagation::ProcessWorklist() {
while (!worklist_.empty()) {
HInstruction* instruction = worklist_.back();
worklist_.pop_back();
- if (UpdateNullability(instruction) || UpdateReferenceTypeInfo(instruction)) {
+ bool updated_nullability = UpdateNullability(instruction);
+ bool updated_reference_type = UpdateReferenceTypeInfo(instruction);
+ if (updated_nullability || updated_reference_type) {
AddDependentInstructionsToWorklist(instruction);
}
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 6fc77721e7..ef22c816a0 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -85,12 +85,13 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
InstructionSet instruction_set) {
- return instruction_set == kArm64
- || instruction_set == kX86_64
+ return instruction_set == kArm
+ || instruction_set == kArm64
+ || instruction_set == kMips
|| instruction_set == kMips64
- || instruction_set == kArm
+ || instruction_set == kThumb2
|| instruction_set == kX86
- || instruction_set == kThumb2;
+ || instruction_set == kX86_64;
}
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 1511606950..ed5419ee49 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -475,7 +475,8 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
NullHandle<mirror::DexCache> dex_cache;
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
@@ -624,7 +625,8 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
@@ -698,7 +700,8 @@ static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry->AddInstruction(parameter);
HInstruction* constant1 = graph->GetIntConstant(1);
@@ -768,8 +771,10 @@ static HGraph* BuildDiv(ArenaAllocator* allocator,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* first = new (allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* second = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* first = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* second = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry->AddInstruction(first);
entry->AddInstruction(second);
@@ -820,10 +825,14 @@ TEST(RegisterAllocatorTest, SpillInactive) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* one = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* two = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* three = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* four = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* one = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* two = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* three = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* four = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry->AddInstruction(one);
entry->AddInstruction(two);
entry->AddInstruction(three);