diff options
author | 2017-09-21 22:50:39 +0100 | |
---|---|---|
committer | 2017-09-25 15:45:01 +0100 | |
commit | 0ebe0d83138bba1996e9c8007969b5381d972b32 (patch) | |
tree | a5ee66ebc5b587ade97e56ac8fc7d832fbbed4af | |
parent | e1e347dace0ded83774999bb26c37527dcdb1d5a (diff) |
ART: Introduce compiler data type.
Replace most uses of the runtime's Primitive in compiler
with a new class DataType. This prepares for introducing
new types, such as Uint8, that the runtime does not need
to know about.
Test: m test-art-host-gtest
Test: testrunner.py --host
Bug: 23964345
Change-Id: Iec2ad82454eec678fffcd8279a9746b90feb9b0c
106 files changed, 6207 insertions, 5874 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp index c50c1978ac..b1bef79736 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -54,6 +54,7 @@ art_cc_defaults { "optimizing/code_sinking.cc", "optimizing/constant_folding.cc", "optimizing/constructor_fence_redundancy_elimination.cc", + "optimizing/data_type.cc", "optimizing/dead_code_elimination.cc", "optimizing/escape.cc", "optimizing/graph_checker.cc", @@ -321,6 +322,7 @@ art_cc_test { "linker/method_bss_mapping_encoder_test.cc", "linker/output_stream_test.cc", "optimizing/bounds_check_elimination_test.cc", + "optimizing/data_type_test.cc", "optimizing/dominator_test.cc", "optimizing/find_loops_test.cc", "optimizing/graph_checker_test.cc", diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index a170734ff2..a7f7bce07a 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -927,7 +927,7 @@ class BCEVisitor : public HGraphVisitor { void VisitPhi(HPhi* phi) OVERRIDE { if (phi->IsLoopHeaderPhi() - && (phi->GetType() == Primitive::kPrimInt) + && (phi->GetType() == DataType::Type::kInt32) && HasSameInputAtBackEdges(phi)) { HInstruction* instruction = phi->InputAt(1); HInstruction *left; @@ -1261,8 +1261,8 @@ class BCEVisitor : public HGraphVisitor { DCHECK_GE(min_c, 0); } else { HInstruction* lower = new (GetGraph()->GetArena()) - HAdd(Primitive::kPrimInt, base, GetGraph()->GetIntConstant(min_c)); - upper = new (GetGraph()->GetArena()) HAdd(Primitive::kPrimInt, base, upper); + HAdd(DataType::Type::kInt32, base, GetGraph()->GetIntConstant(min_c)); + upper = new (GetGraph()->GetArena()) HAdd(DataType::Type::kInt32, base, upper); block->InsertInstructionBefore(lower, bounds_check); block->InsertInstructionBefore(upper, bounds_check); InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAbove(lower, upper)); @@ -1801,7 +1801,7 @@ class BCEVisitor : public HGraphVisitor { // Scan all instructions in a new deoptimization block. for (HInstructionIterator it(true_block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* instruction = it.Current(); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); HPhi* phi = nullptr; // Scan all uses of an instruction and replace each later use with a phi node. const HUseList<HInstruction*>& uses = instruction->GetUses(); @@ -1844,20 +1844,20 @@ class BCEVisitor : public HGraphVisitor { */ HPhi* NewPhi(HBasicBlock* new_preheader, HInstruction* instruction, - Primitive::Type type) { + DataType::Type type) { HGraph* graph = GetGraph(); HInstruction* zero; switch (type) { - case Primitive::kPrimNot: zero = graph->GetNullConstant(); break; - case Primitive::kPrimFloat: zero = graph->GetFloatConstant(0); break; - case Primitive::kPrimDouble: zero = graph->GetDoubleConstant(0); break; + case DataType::Type::kReference: zero = graph->GetNullConstant(); break; + case DataType::Type::kFloat32: zero = graph->GetFloatConstant(0); break; + case DataType::Type::kFloat64: zero = graph->GetDoubleConstant(0); break; default: zero = graph->GetConstant(type, 0); break; } HPhi* phi = new (graph->GetArena()) HPhi(graph->GetArena(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type)); phi->SetRawInputAt(0, instruction); phi->SetRawInputAt(1, zero); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { phi->SetReferenceTypeInfo(instruction->GetReferenceTypeInfo()); } new_preheader->AddPhi(phi); diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc index 2aaf05833c..851838c4b8 100644 --- a/compiler/optimizing/bounds_check_elimination_test.cc +++ b/compiler/optimizing/bounds_check_elimination_test.cc @@ -70,10 +70,10 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) { HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); - HInstruction* parameter1 = new (&allocator_) - HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); // array - HInstruction* parameter2 = new (&allocator_) - HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); // i + HInstruction* parameter1 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array + HInstruction* parameter2 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i entry->AddInstruction(parameter1); entry->AddInstruction(parameter2); @@ -95,7 +95,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) { HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(parameter2, array_length, 0); HArraySet* array_set = new (&allocator_) HArraySet( - null_check, bounds_check2, constant_1, Primitive::kPrimInt, 0); + null_check, bounds_check2, constant_1, DataType::Type::kInt32, 0); block2->AddInstruction(null_check); block2->AddInstruction(array_length); block2->AddInstruction(bounds_check2); @@ -119,7 +119,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) { HBoundsCheck* bounds_check4 = new (&allocator_) HBoundsCheck(parameter2, array_length, 0); array_set = new (&allocator_) HArraySet( - null_check, bounds_check4, constant_1, Primitive::kPrimInt, 0); + null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0); block4->AddInstruction(null_check); block4->AddInstruction(array_length); block4->AddInstruction(bounds_check4); @@ -132,7 +132,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) { HBoundsCheck* bounds_check5 = new (&allocator_) HBoundsCheck(parameter2, array_length, 0); array_set = new (&allocator_) HArraySet( - null_check, bounds_check5, constant_1, Primitive::kPrimInt, 0); + null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0); block5->AddInstruction(null_check); block5->AddInstruction(array_length); block5->AddInstruction(bounds_check5); @@ -167,10 +167,10 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) { HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); - HInstruction* parameter1 = new (&allocator_) - HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); // array - HInstruction* parameter2 = new (&allocator_) - HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); // i + HInstruction* parameter1 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array + HInstruction* parameter2 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i entry->AddInstruction(parameter1); entry->AddInstruction(parameter2); @@ -188,7 +188,7 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) { HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(block2); - HInstruction* add = new (&allocator_) HAdd(Primitive::kPrimInt, parameter2, constant_max_int); + HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, parameter2, constant_max_int); HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0); HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0); HInstruction* cmp2 = new (&allocator_) HGreaterThanOrEqual(add, array_length); @@ -204,7 +204,7 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) { HBoundsCheck* bounds_check = new (&allocator_) HBoundsCheck(add, array_length, 0); HArraySet* array_set = new (&allocator_) HArraySet( - null_check, bounds_check, constant_1, Primitive::kPrimInt, 0); + null_check, bounds_check, constant_1, DataType::Type::kInt32, 0); block3->AddInstruction(bounds_check); block3->AddInstruction(array_set); @@ -231,10 +231,10 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) { HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); - HInstruction* parameter1 = new (&allocator_) - HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); // array - HInstruction* parameter2 = new (&allocator_) - HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); // i + HInstruction* parameter1 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array + HInstruction* parameter2 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i entry->AddInstruction(parameter1); entry->AddInstruction(parameter2); @@ -256,8 +256,8 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) { HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(block2); - HInstruction* sub1 = new (&allocator_) HSub(Primitive::kPrimInt, parameter2, constant_max_int); - HInstruction* sub2 = new (&allocator_) HSub(Primitive::kPrimInt, sub1, constant_max_int); + HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, parameter2, constant_max_int); + HInstruction* sub2 = new (&allocator_) HSub(DataType::Type::kInt32, sub1, constant_max_int); HInstruction* cmp2 = new (&allocator_) HLessThanOrEqual(sub2, constant_0); if_inst = new (&allocator_) HIf(cmp2); block2->AddInstruction(sub1); @@ -270,7 +270,7 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) { HBoundsCheck* bounds_check = new (&allocator_) HBoundsCheck(sub2, array_length, 0); HArraySet* array_set = new (&allocator_) HArraySet( - null_check, bounds_check, constant_1, Primitive::kPrimInt, 0); + null_check, bounds_check, constant_1, DataType::Type::kInt32, 0); block3->AddInstruction(bounds_check); block3->AddInstruction(array_set); @@ -296,7 +296,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) { graph_->AddBlock(entry); graph_->SetEntryBlock(entry); HInstruction* parameter = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); HInstruction* constant_5 = graph_->GetIntConstant(5); @@ -313,7 +313,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) { HBoundsCheck* bounds_check6 = new (&allocator_) HBoundsCheck(constant_6, array_length, 0); HInstruction* array_set = new (&allocator_) HArraySet( - null_check, bounds_check6, constant_1, Primitive::kPrimInt, 0); + null_check, bounds_check6, constant_1, DataType::Type::kInt32, 0); block->AddInstruction(null_check); block->AddInstruction(array_length); block->AddInstruction(bounds_check6); @@ -324,7 +324,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) { HBoundsCheck* bounds_check5 = new (&allocator_) HBoundsCheck(constant_5, array_length, 0); array_set = new (&allocator_) HArraySet( - null_check, bounds_check5, constant_1, Primitive::kPrimInt, 0); + null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0); block->AddInstruction(null_check); block->AddInstruction(array_length); block->AddInstruction(bounds_check5); @@ -335,7 +335,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) { HBoundsCheck* bounds_check4 = new (&allocator_) HBoundsCheck(constant_4, array_length, 0); array_set = new (&allocator_) HArraySet( - null_check, bounds_check4, constant_1, Primitive::kPrimInt, 0); + null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0); block->AddInstruction(null_check); block->AddInstruction(array_length); block->AddInstruction(bounds_check4); @@ -365,7 +365,7 @@ static HInstruction* BuildSSAGraph1(HGraph* graph, graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); HInstruction* constant_initial = graph->GetIntConstant(initial); @@ -389,7 +389,7 @@ static HInstruction* BuildSSAGraph1(HGraph* graph, loop_header->AddSuccessor(loop_body); // false successor loop_body->AddSuccessor(loop_header); - HPhi* phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt); + HPhi* phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); HInstruction* null_check = new (allocator) HNullCheck(parameter, 0); HInstruction* array_length = new (allocator) HArrayLength(null_check, 0); HInstruction* cmp = nullptr; @@ -411,9 +411,9 @@ static HInstruction* BuildSSAGraph1(HGraph* graph, array_length = new (allocator) HArrayLength(null_check, 0); HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0); HInstruction* array_set = new (allocator) HArraySet( - null_check, bounds_check, constant_10, Primitive::kPrimInt, 0); + null_check, bounds_check, constant_10, DataType::Type::kInt32, 0); - HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_increment); + HInstruction* add = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_increment); loop_body->AddInstruction(null_check); loop_body->AddInstruction(array_length); loop_body->AddInstruction(bounds_check); @@ -480,7 +480,7 @@ static HInstruction* BuildSSAGraph2(HGraph *graph, graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); HInstruction* constant_initial = graph->GetIntConstant(initial); @@ -509,7 +509,7 @@ static HInstruction* BuildSSAGraph2(HGraph *graph, loop_header->AddSuccessor(loop_body); // false successor loop_body->AddSuccessor(loop_header); - HPhi* phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt); + HPhi* phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); HInstruction* cmp = nullptr; if (cond == kCondLE) { cmp = new (allocator) HLessThanOrEqual(phi, constant_initial); @@ -523,13 +523,13 @@ static HInstruction* BuildSSAGraph2(HGraph *graph, loop_header->AddInstruction(if_inst); phi->AddInput(array_length); - HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_minus_1); + HInstruction* add = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_minus_1); null_check = new (allocator) HNullCheck(parameter, 0); array_length = new (allocator) HArrayLength(null_check, 0); HInstruction* bounds_check = new (allocator) HBoundsCheck(add, array_length, 0); HInstruction* array_set = new (allocator) HArraySet( - null_check, bounds_check, constant_10, Primitive::kPrimInt, 0); - HInstruction* add_phi = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_increment); + null_check, bounds_check, constant_10, DataType::Type::kInt32, 0); + HInstruction* add_phi = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_increment); loop_body->AddInstruction(add); loop_body->AddInstruction(null_check); loop_body->AddInstruction(array_length); @@ -617,7 +617,7 @@ static HInstruction* BuildSSAGraph3(HGraph* graph, loop_header->AddSuccessor(loop_body); // false successor loop_body->AddSuccessor(loop_header); - HPhi* phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt); + HPhi* phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); HInstruction* cmp = nullptr; if (cond == kCondGE) { cmp = new (allocator) HGreaterThanOrEqual(phi, constant_10); @@ -635,8 +635,8 @@ static HInstruction* BuildSSAGraph3(HGraph* graph, HArrayLength* array_length = new (allocator) HArrayLength(null_check, 0); HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0); HInstruction* array_set = new (allocator) HArraySet( - null_check, bounds_check, constant_10, Primitive::kPrimInt, 0); - HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_increment); + null_check, bounds_check, constant_10, DataType::Type::kInt32, 0); + HInstruction* add = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_increment); loop_body->AddInstruction(null_check); loop_body->AddInstruction(array_length); loop_body->AddInstruction(bounds_check); @@ -691,7 +691,7 @@ static HInstruction* BuildSSAGraph4(HGraph* graph, graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); HInstruction* constant_initial = graph->GetIntConstant(initial); @@ -716,7 +716,7 @@ static HInstruction* BuildSSAGraph4(HGraph* graph, loop_header->AddSuccessor(loop_body); // false successor loop_body->AddSuccessor(loop_header); - HPhi* phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt); + HPhi* phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); HInstruction* null_check = new (allocator) HNullCheck(parameter, 0); HInstruction* array_length = new (allocator) HArrayLength(null_check, 0); HInstruction* cmp = nullptr; @@ -735,13 +735,13 @@ static HInstruction* BuildSSAGraph4(HGraph* graph, null_check = new (allocator) HNullCheck(parameter, 0); array_length = new (allocator) HArrayLength(null_check, 0); - HInstruction* sub = new (allocator) HSub(Primitive::kPrimInt, array_length, phi); + HInstruction* sub = new (allocator) HSub(DataType::Type::kInt32, array_length, phi); HInstruction* add_minus_1 = new (allocator) - HAdd(Primitive::kPrimInt, sub, constant_minus_1); + HAdd(DataType::Type::kInt32, sub, constant_minus_1); HInstruction* bounds_check = new (allocator) HBoundsCheck(add_minus_1, array_length, 0); HInstruction* array_set = new (allocator) HArraySet( - null_check, bounds_check, constant_10, Primitive::kPrimInt, 0); - HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_1); + null_check, bounds_check, constant_10, DataType::Type::kInt32, 0); + HInstruction* add = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_1); loop_body->AddInstruction(null_check); loop_body->AddInstruction(array_length); loop_body->AddInstruction(sub); @@ -794,7 +794,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { graph_->AddBlock(entry); graph_->SetEntryBlock(entry); HInstruction* parameter = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); HInstruction* constant_0 = graph_->GetIntConstant(0); @@ -812,10 +812,10 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { HBasicBlock* outer_header = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(outer_header); - HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt); + HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0); HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0); - HAdd* add = new (&allocator_) HAdd(Primitive::kPrimInt, array_length, constant_minus_1); + HAdd* add = new (&allocator_) HAdd(DataType::Type::kInt32, array_length, constant_minus_1); HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi_i, add); HIf* if_inst = new (&allocator_) HIf(cmp); outer_header->AddPhi(phi_i); @@ -828,11 +828,11 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { HBasicBlock* inner_header = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(inner_header); - HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt); + HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); null_check = new (&allocator_) HNullCheck(parameter, 0); array_length = new (&allocator_) HArrayLength(null_check, 0); - HSub* sub = new (&allocator_) HSub(Primitive::kPrimInt, array_length, phi_i); - add = new (&allocator_) HAdd(Primitive::kPrimInt, sub, constant_minus_1); + HSub* sub = new (&allocator_) HSub(DataType::Type::kInt32, array_length, phi_i); + add = new (&allocator_) HAdd(DataType::Type::kInt32, sub, constant_minus_1); cmp = new (&allocator_) HGreaterThanOrEqual(phi_j, add); if_inst = new (&allocator_) HIf(cmp); inner_header->AddPhi(phi_j); @@ -850,17 +850,17 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { array_length = new (&allocator_) HArrayLength(null_check, 0); HBoundsCheck* bounds_check1 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0); HArrayGet* array_get_j = new (&allocator_) - HArrayGet(null_check, bounds_check1, Primitive::kPrimInt, 0); + HArrayGet(null_check, bounds_check1, DataType::Type::kInt32, 0); inner_body_compare->AddInstruction(null_check); inner_body_compare->AddInstruction(array_length); inner_body_compare->AddInstruction(bounds_check1); inner_body_compare->AddInstruction(array_get_j); - HInstruction* j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1); + HInstruction* j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1); null_check = new (&allocator_) HNullCheck(parameter, 0); array_length = new (&allocator_) HArrayLength(null_check, 0); HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0); HArrayGet* array_get_j_plus_1 = new (&allocator_) - HArrayGet(null_check, bounds_check2, Primitive::kPrimInt, 0); + HArrayGet(null_check, bounds_check2, DataType::Type::kInt32, 0); cmp = new (&allocator_) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1); if_inst = new (&allocator_) HIf(cmp); inner_body_compare->AddInstruction(j_plus_1); @@ -873,13 +873,13 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { HBasicBlock* inner_body_swap = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(inner_body_swap); - j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1); + j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1); // temp = array[j+1] null_check = new (&allocator_) HNullCheck(parameter, 0); array_length = new (&allocator_) HArrayLength(null_check, 0); HInstruction* bounds_check3 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0); array_get_j_plus_1 = new (&allocator_) - HArrayGet(null_check, bounds_check3, Primitive::kPrimInt, 0); + HArrayGet(null_check, bounds_check3, DataType::Type::kInt32, 0); inner_body_swap->AddInstruction(j_plus_1); inner_body_swap->AddInstruction(null_check); inner_body_swap->AddInstruction(array_length); @@ -890,7 +890,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { array_length = new (&allocator_) HArrayLength(null_check, 0); HInstruction* bounds_check4 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0); array_get_j = new (&allocator_) - HArrayGet(null_check, bounds_check4, Primitive::kPrimInt, 0); + HArrayGet(null_check, bounds_check4, DataType::Type::kInt32, 0); inner_body_swap->AddInstruction(null_check); inner_body_swap->AddInstruction(array_length); inner_body_swap->AddInstruction(bounds_check4); @@ -899,7 +899,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { array_length = new (&allocator_) HArrayLength(null_check, 0); HInstruction* bounds_check5 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0); HArraySet* array_set_j_plus_1 = new (&allocator_) - HArraySet(null_check, bounds_check5, array_get_j, Primitive::kPrimInt, 0); + HArraySet(null_check, bounds_check5, array_get_j, DataType::Type::kInt32, 0); inner_body_swap->AddInstruction(null_check); inner_body_swap->AddInstruction(array_length); inner_body_swap->AddInstruction(bounds_check5); @@ -909,7 +909,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { array_length = new (&allocator_) HArrayLength(null_check, 0); HInstruction* bounds_check6 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0); HArraySet* array_set_j = new (&allocator_) - HArraySet(null_check, bounds_check6, array_get_j_plus_1, Primitive::kPrimInt, 0); + HArraySet(null_check, bounds_check6, array_get_j_plus_1, DataType::Type::kInt32, 0); inner_body_swap->AddInstruction(null_check); inner_body_swap->AddInstruction(array_length); inner_body_swap->AddInstruction(bounds_check6); @@ -918,14 +918,14 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { HBasicBlock* inner_body_add = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(inner_body_add); - add = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1); + add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1); inner_body_add->AddInstruction(add); inner_body_add->AddInstruction(new (&allocator_) HGoto()); phi_j->AddInput(add); HBasicBlock* outer_body_add = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(outer_body_add); - add = new (&allocator_) HAdd(Primitive::kPrimInt, phi_i, constant_1); + add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_i, constant_1); outer_body_add->AddInstruction(add); outer_body_add->AddInstruction(new (&allocator_) HGoto()); phi_i->AddInput(add); @@ -965,7 +965,7 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { graph_->AddBlock(entry); graph_->SetEntryBlock(entry); HInstruction* param_i = new (&allocator_) - HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry->AddInstruction(param_i); HInstruction* constant_0 = graph_->GetIntConstant(0); @@ -994,7 +994,7 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { loop_header->AddSuccessor(loop_body); // false successor loop_body->AddSuccessor(loop_header); - HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt); + HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi, constant_200); HInstruction* if_inst = new (&allocator_) HIf(cmp); loop_header->AddPhi(phi); @@ -1005,38 +1005,38 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { ////////////////////////////////////////////////////////////////////////////////// // LOOP BODY: // array[i % 10] = 10; - HRem* i_mod_10 = new (&allocator_) HRem(Primitive::kPrimInt, phi, constant_10, 0); + HRem* i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_10, 0); HBoundsCheck* bounds_check_i_mod_10 = new (&allocator_) HBoundsCheck(i_mod_10, constant_10, 0); HInstruction* array_set = new (&allocator_) HArraySet( - new_array, bounds_check_i_mod_10, constant_10, Primitive::kPrimInt, 0); + new_array, bounds_check_i_mod_10, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(i_mod_10); loop_body->AddInstruction(bounds_check_i_mod_10); loop_body->AddInstruction(array_set); // array[i % 1] = 10; - HRem* i_mod_1 = new (&allocator_) HRem(Primitive::kPrimInt, phi, constant_1, 0); + HRem* i_mod_1 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0); HBoundsCheck* bounds_check_i_mod_1 = new (&allocator_) HBoundsCheck(i_mod_1, constant_10, 0); array_set = new (&allocator_) HArraySet( - new_array, bounds_check_i_mod_1, constant_10, Primitive::kPrimInt, 0); + new_array, bounds_check_i_mod_1, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(i_mod_1); loop_body->AddInstruction(bounds_check_i_mod_1); loop_body->AddInstruction(array_set); // array[i % 200] = 10; - HRem* i_mod_200 = new (&allocator_) HRem(Primitive::kPrimInt, phi, constant_1, 0); + HRem* i_mod_200 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0); HBoundsCheck* bounds_check_i_mod_200 = new (&allocator_) HBoundsCheck(i_mod_200, constant_10, 0); array_set = new (&allocator_) HArraySet( - new_array, bounds_check_i_mod_200, constant_10, Primitive::kPrimInt, 0); + new_array, bounds_check_i_mod_200, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(i_mod_200); loop_body->AddInstruction(bounds_check_i_mod_200); loop_body->AddInstruction(array_set); // array[i % -10] = 10; - HRem* i_mod_minus_10 = new (&allocator_) HRem(Primitive::kPrimInt, phi, constant_minus_10, 0); + HRem* i_mod_minus_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_minus_10, 0); HBoundsCheck* bounds_check_i_mod_minus_10 = new (&allocator_) HBoundsCheck( i_mod_minus_10, constant_10, 0); array_set = new (&allocator_) HArraySet( - new_array, bounds_check_i_mod_minus_10, constant_10, Primitive::kPrimInt, 0); + new_array, bounds_check_i_mod_minus_10, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(i_mod_minus_10); loop_body->AddInstruction(bounds_check_i_mod_minus_10); loop_body->AddInstruction(array_set); @@ -1044,11 +1044,11 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { // array[i%array.length] = 10; HNullCheck* null_check = new (&allocator_) HNullCheck(new_array, 0); HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0); - HRem* i_mod_array_length = new (&allocator_) HRem(Primitive::kPrimInt, phi, array_length, 0); + HRem* i_mod_array_length = new (&allocator_) HRem(DataType::Type::kInt32, phi, array_length, 0); HBoundsCheck* bounds_check_i_mod_array_len = new (&allocator_) HBoundsCheck( i_mod_array_length, array_length, 0); array_set = new (&allocator_) HArraySet( - null_check, bounds_check_i_mod_array_len, constant_10, Primitive::kPrimInt, 0); + null_check, bounds_check_i_mod_array_len, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(null_check); loop_body->AddInstruction(array_length); loop_body->AddInstruction(i_mod_array_length); @@ -1056,11 +1056,11 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { loop_body->AddInstruction(array_set); // array[param_i % 10] = 10; - HRem* param_i_mod_10 = new (&allocator_) HRem(Primitive::kPrimInt, param_i, constant_10, 0); + HRem* param_i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, param_i, constant_10, 0); HBoundsCheck* bounds_check_param_i_mod_10 = new (&allocator_) HBoundsCheck( param_i_mod_10, constant_10, 0); array_set = new (&allocator_) HArraySet( - new_array, bounds_check_param_i_mod_10, constant_10, Primitive::kPrimInt, 0); + new_array, bounds_check_param_i_mod_10, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(param_i_mod_10); loop_body->AddInstruction(bounds_check_param_i_mod_10); loop_body->AddInstruction(array_set); @@ -1069,11 +1069,11 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { null_check = new (&allocator_) HNullCheck(new_array, 0); array_length = new (&allocator_) HArrayLength(null_check, 0); HRem* param_i_mod_array_length = new (&allocator_) HRem( - Primitive::kPrimInt, param_i, array_length, 0); + DataType::Type::kInt32, param_i, array_length, 0); HBoundsCheck* bounds_check_param_i_mod_array_len = new (&allocator_) HBoundsCheck( param_i_mod_array_length, array_length, 0); array_set = new (&allocator_) HArraySet( - null_check, bounds_check_param_i_mod_array_len, constant_10, Primitive::kPrimInt, 0); + null_check, bounds_check_param_i_mod_array_len, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(null_check); loop_body->AddInstruction(array_length); loop_body->AddInstruction(param_i_mod_array_length); @@ -1081,7 +1081,7 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { loop_body->AddInstruction(array_set); // i++; - HInstruction* add = new (&allocator_) HAdd(Primitive::kPrimInt, phi, constant_1); + HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, constant_1); loop_body->AddInstruction(add); loop_body->AddInstruction(new (&allocator_) HGoto()); phi->AddInput(add); diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 0d9d3d4c92..0e708ed408 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -20,17 +20,52 @@ #include "base/arena_bit_vector.h" #include "base/bit_vector-inl.h" #include "base/logging.h" +#include "data_type-inl.h" #include "dex/verified_method.h" #include "driver/compiler_options.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" #include "nodes.h" -#include "primitive.h" #include "thread.h" #include "utils/dex_cache_arrays_layout-inl.h" namespace art { +HGraphBuilder::HGraphBuilder(HGraph* graph, + DexCompilationUnit* dex_compilation_unit, + const DexCompilationUnit* const outer_compilation_unit, + CompilerDriver* driver, + CodeGenerator* code_generator, + OptimizingCompilerStats* compiler_stats, + const uint8_t* interpreter_metadata, + Handle<mirror::DexCache> dex_cache, + VariableSizedHandleScope* handles) + : graph_(graph), + dex_file_(&graph->GetDexFile()), + code_item_(*dex_compilation_unit->GetCodeItem()), + dex_compilation_unit_(dex_compilation_unit), + compiler_driver_(driver), + compilation_stats_(compiler_stats), + block_builder_(graph, dex_file_, code_item_), + ssa_builder_(graph, + dex_compilation_unit->GetClassLoader(), + dex_compilation_unit->GetDexCache(), + handles), + instruction_builder_(graph, + &block_builder_, + &ssa_builder_, + dex_file_, + code_item_, + DataType::FromShorty(dex_compilation_unit_->GetShorty()[0]), + dex_compilation_unit, + outer_compilation_unit, + driver, + code_generator, + interpreter_metadata, + compiler_stats, + dex_cache, + handles) {} + bool HGraphBuilder::SkipCompilation(size_t number_of_branches) { if (compiler_driver_ == nullptr) { // Note that the compiler driver is null when unit testing. diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index 2c9a9efadf..9524fe2534 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -27,7 +27,6 @@ #include "instruction_builder.h" #include "nodes.h" #include "optimizing_compiler_stats.h" -#include "primitive.h" #include "ssa_builder.h" namespace art { @@ -39,45 +38,18 @@ class HGraphBuilder : public ValueObject { HGraphBuilder(HGraph* graph, DexCompilationUnit* dex_compilation_unit, const DexCompilationUnit* const outer_compilation_unit, - const DexFile* dex_file, - const DexFile::CodeItem& code_item, CompilerDriver* driver, CodeGenerator* code_generator, OptimizingCompilerStats* compiler_stats, const uint8_t* interpreter_metadata, Handle<mirror::DexCache> dex_cache, - VariableSizedHandleScope* handles) - : graph_(graph), - dex_file_(dex_file), - code_item_(code_item), - dex_compilation_unit_(dex_compilation_unit), - compiler_driver_(driver), - compilation_stats_(compiler_stats), - block_builder_(graph, dex_file, code_item), - ssa_builder_(graph, - dex_compilation_unit->GetClassLoader(), - dex_compilation_unit->GetDexCache(), - handles), - instruction_builder_(graph, - &block_builder_, - &ssa_builder_, - dex_file, - code_item_, - Primitive::GetType(dex_compilation_unit_->GetShorty()[0]), - dex_compilation_unit, - outer_compilation_unit, - driver, - code_generator, - interpreter_metadata, - compiler_stats, - dex_cache, - handles) {} + VariableSizedHandleScope* handles); // Only for unit testing. HGraphBuilder(HGraph* graph, const DexFile::CodeItem& code_item, VariableSizedHandleScope* handles, - Primitive::Type return_type = Primitive::kPrimInt) + DataType::Type return_type = DataType::Type::kInt32) : graph_(graph), dex_file_(nullptr), code_item_(code_item), diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 6533e2b9f7..b8e4f326c8 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -68,35 +68,35 @@ namespace art { static constexpr bool kEnableDexLayoutOptimizations = false; // Return whether a location is consistent with a type. -static bool CheckType(Primitive::Type type, Location location) { +static bool CheckType(DataType::Type type, Location location) { if (location.IsFpuRegister() || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) { - return (type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble); + return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64); } else if (location.IsRegister() || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) { - return Primitive::IsIntegralType(type) || (type == Primitive::kPrimNot); + return DataType::IsIntegralType(type) || (type == DataType::Type::kReference); } else if (location.IsRegisterPair()) { - return type == Primitive::kPrimLong; + return type == DataType::Type::kInt64; } else if (location.IsFpuRegisterPair()) { - return type == Primitive::kPrimDouble; + return type == DataType::Type::kFloat64; } else if (location.IsStackSlot()) { - return (Primitive::IsIntegralType(type) && type != Primitive::kPrimLong) - || (type == Primitive::kPrimFloat) - || (type == Primitive::kPrimNot); + return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64) + || (type == DataType::Type::kFloat32) + || (type == DataType::Type::kReference); } else if (location.IsDoubleStackSlot()) { - return (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble); + return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64); } else if (location.IsConstant()) { if (location.GetConstant()->IsIntConstant()) { - return Primitive::IsIntegralType(type) && (type != Primitive::kPrimLong); + return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64); } else if (location.GetConstant()->IsNullConstant()) { - return type == Primitive::kPrimNot; + return type == DataType::Type::kReference; } else if (location.GetConstant()->IsLongConstant()) { - return type == Primitive::kPrimLong; + return type == DataType::Type::kInt64; } else if (location.GetConstant()->IsFloatConstant()) { - return type == Primitive::kPrimFloat; + return type == DataType::Type::kFloat32; } else { return location.GetConstant()->IsDoubleConstant() - && (type == Primitive::kPrimDouble); + && (type == DataType::Type::kFloat64); } } else { return location.IsInvalid() || (location.GetPolicy() == Location::kAny); @@ -130,7 +130,7 @@ static bool CheckTypeConsistency(HInstruction* instruction) { HEnvironment* environment = instruction->GetEnvironment(); for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) { if (environment->GetInstructionAt(i) != nullptr) { - Primitive::Type type = environment->GetInstructionAt(i)->GetType(); + DataType::Type type = environment->GetInstructionAt(i)->GetType(); DCHECK(CheckType(type, environment->GetLocationAt(i))) << type << " " << environment->GetLocationAt(i); } else { @@ -157,10 +157,10 @@ uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) { } uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) { - DCHECK(array_get->GetType() == Primitive::kPrimChar || !array_get->IsStringCharAt()); + DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt()); return array_get->IsStringCharAt() ? mirror::String::ValueOffset().Uint32Value() - : mirror::Array::DataOffset(Primitive::ComponentSize(array_get->GetType())).Uint32Value(); + : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value(); } bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const { @@ -413,7 +413,7 @@ void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) { void CodeGenerator::CreateUnresolvedFieldLocationSummary( HInstruction* field_access, - Primitive::Type field_type, + DataType::Type field_type, const FieldAccessCallingConvention& calling_convention) { bool is_instance = field_access->IsUnresolvedInstanceFieldGet() || field_access->IsUnresolvedInstanceFieldSet(); @@ -435,7 +435,7 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary( // regardless of the the type. Because of that we forced to special case // the access to floating point values. if (is_get) { - if (Primitive::IsFloatingPointType(field_type)) { + if (DataType::IsFloatingPointType(field_type)) { // The return value will be stored in regular registers while register // allocator expects it in a floating point register. // Note We don't need to request additional temps because the return @@ -448,7 +448,7 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary( } } else { size_t set_index = is_instance ? 1 : 0; - if (Primitive::IsFloatingPointType(field_type)) { + if (DataType::IsFloatingPointType(field_type)) { // The set value comes from a float location while the calling convention // expects it in a regular register location. Allocate a temp for it and // make the transfer at codegen. @@ -463,7 +463,7 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary( void CodeGenerator::GenerateUnresolvedFieldAccess( HInstruction* field_access, - Primitive::Type field_type, + DataType::Type field_type, uint32_t field_index, uint32_t dex_pc, const FieldAccessCallingConvention& calling_convention) { @@ -476,51 +476,52 @@ void CodeGenerator::GenerateUnresolvedFieldAccess( bool is_get = field_access->IsUnresolvedInstanceFieldGet() || field_access->IsUnresolvedStaticFieldGet(); - if (!is_get && Primitive::IsFloatingPointType(field_type)) { + if (!is_get && DataType::IsFloatingPointType(field_type)) { // Copy the float value to be set into the calling convention register. // Note that using directly the temp location is problematic as we don't // support temp register pairs. To avoid boilerplate conversion code, use // the location from the calling convention. MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance), locations->InAt(is_instance ? 1 : 0), - (Primitive::Is64BitType(field_type) ? Primitive::kPrimLong : Primitive::kPrimInt)); + (DataType::Is64BitType(field_type) ? DataType::Type::kInt64 + : DataType::Type::kInt32)); } QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings. switch (field_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: entrypoint = is_instance ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance) : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static); break; - case Primitive::kPrimByte: + case DataType::Type::kInt8: entrypoint = is_instance ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance) : (is_get ? kQuickGetByteStatic : kQuickSet8Static); break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: entrypoint = is_instance ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance) : (is_get ? kQuickGetShortStatic : kQuickSet16Static); break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: entrypoint = is_instance ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance) : (is_get ? kQuickGetCharStatic : kQuickSet16Static); break; - case Primitive::kPrimInt: - case Primitive::kPrimFloat: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: entrypoint = is_instance ? (is_get ? kQuickGet32Instance : kQuickSet32Instance) : (is_get ? kQuickGet32Static : kQuickSet32Static); break; - case Primitive::kPrimNot: + case DataType::Type::kReference: entrypoint = is_instance ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance) : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic); break; - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: entrypoint = is_instance ? (is_get ? kQuickGet64Instance : kQuickSet64Instance) : (is_get ? kQuickGet64Static : kQuickSet64Static); @@ -530,7 +531,7 @@ void CodeGenerator::GenerateUnresolvedFieldAccess( } InvokeRuntime(entrypoint, field_access, dex_pc, nullptr); - if (is_get && Primitive::IsFloatingPointType(field_type)) { + if (is_get && DataType::IsFloatingPointType(field_type)) { MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type); } } @@ -780,8 +781,8 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, return; } if (instruction->IsRem()) { - Primitive::Type type = instruction->AsRem()->GetResultType(); - if ((type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble)) { + DataType::Type type = instruction->AsRem()->GetResultType(); + if ((type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64)) { return; } } @@ -1052,7 +1053,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) { uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id); stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); - if (current->GetType() == Primitive::kPrimLong) { + if (current->GetType() == DataType::Type::kInt64) { stack_map_stream_.AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, offset + kVRegSize); ++i; @@ -1060,7 +1061,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo } } else { stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id); - if (current->GetType() == Primitive::kPrimLong) { + if (current->GetType() == DataType::Type::kInt64) { stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id); ++i; DCHECK_LT(i, environment_size); @@ -1074,7 +1075,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) { uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id); stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); - if (current->GetType() == Primitive::kPrimDouble) { + if (current->GetType() == DataType::Type::kFloat64) { stack_map_stream_.AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, offset + kVRegSize); ++i; @@ -1082,7 +1083,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo } } else { stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id); - if (current->GetType() == Primitive::kPrimDouble) { + if (current->GetType() == DataType::Type::kFloat64) { stack_map_stream_.AddDexRegisterEntry( DexRegisterLocation::Kind::kInFpuRegisterHigh, id); ++i; @@ -1226,7 +1227,7 @@ void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend LiveInterval* interval = current->GetLiveInterval(); // We only need to clear bits of loop phis containing objects and allocated in register. // Loop phis allocated on stack already have the object in the stack. - if (current->GetType() == Primitive::kPrimNot + if (current->GetType() == DataType::Type::kReference && interval->HasRegister() && interval->HasSpillSlot()) { locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize); @@ -1236,10 +1237,10 @@ void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend void CodeGenerator::EmitParallelMoves(Location from1, Location to1, - Primitive::Type type1, + DataType::Type type1, Location from2, Location to2, - Primitive::Type type2) { + DataType::Type type2) { HParallelMove parallel_move(GetGraph()->GetArena()); parallel_move.AddMove(from1, to1, type1, nullptr); parallel_move.AddMove(from2, to2, type2, nullptr); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 4b4abdfaa3..ac3c8394e6 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -146,8 +146,8 @@ class SlowPathCode : public DeletableArenaObject<kArenaAllocSlowPaths> { class InvokeDexCallingConventionVisitor { public: - virtual Location GetNextLocation(Primitive::Type type) = 0; - virtual Location GetReturnLocation(Primitive::Type type) const = 0; + virtual Location GetNextLocation(DataType::Type type) = 0; + virtual Location GetReturnLocation(DataType::Type type) const = 0; virtual Location GetMethodLocation() const = 0; protected: @@ -169,9 +169,9 @@ class FieldAccessCallingConvention { public: virtual Location GetObjectLocation() const = 0; virtual Location GetFieldIndexLocation() const = 0; - virtual Location GetReturnLocation(Primitive::Type type) const = 0; - virtual Location GetSetValueLocation(Primitive::Type type, bool is_instance) const = 0; - virtual Location GetFpuLocation(Primitive::Type type) const = 0; + virtual Location GetReturnLocation(DataType::Type type) const = 0; + virtual Location GetSetValueLocation(DataType::Type type, bool is_instance) const = 0; + virtual Location GetFpuLocation(DataType::Type type) const = 0; virtual ~FieldAccessCallingConvention() {} protected: @@ -213,7 +213,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { virtual void GenerateFrameExit() = 0; virtual void Bind(HBasicBlock* block) = 0; virtual void MoveConstant(Location destination, int32_t value) = 0; - virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0; + virtual void MoveLocation(Location dst, Location src, DataType::Type dst_type) = 0; virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0; virtual Assembler* GetAssembler() = 0; @@ -265,7 +265,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0; virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0; - virtual bool NeedsTwoRegisters(Primitive::Type type) const = 0; + virtual bool NeedsTwoRegisters(DataType::Type type) const = 0; // Returns whether we should split long moves in parallel moves. virtual bool ShouldSplitLongMoves() const { return false; } @@ -407,15 +407,15 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { void EmitParallelMoves(Location from1, Location to1, - Primitive::Type type1, + DataType::Type type1, Location from2, Location to2, - Primitive::Type type2); + DataType::Type type2); - static bool StoreNeedsWriteBarrier(Primitive::Type type, HInstruction* value) { + static bool StoreNeedsWriteBarrier(DataType::Type type, HInstruction* value) { // Check that null value is not represented as an integer constant. - DCHECK(type != Primitive::kPrimNot || !value->IsIntConstant()); - return type == Primitive::kPrimNot && !value->IsNullConstant(); + DCHECK(type != DataType::Type::kReference || !value->IsIntConstant()); + return type == DataType::Type::kReference && !value->IsNullConstant(); } @@ -504,12 +504,12 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { void CreateUnresolvedFieldLocationSummary( HInstruction* field_access, - Primitive::Type field_type, + DataType::Type field_type, const FieldAccessCallingConvention& calling_convention); void GenerateUnresolvedFieldAccess( HInstruction* field_access, - Primitive::Type field_type, + DataType::Type field_type, uint32_t field_index, uint32_t dex_pc, const FieldAccessCallingConvention& calling_convention); @@ -573,7 +573,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) = 0; // Copy the result of a call into the given target. - virtual void MoveFromReturnRegister(Location trg, Primitive::Type type) = 0; + virtual void MoveFromReturnRegister(Location trg, DataType::Type type) = 0; virtual void GenerateNop() = 0; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index aaea7c1025..42e9f68a76 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -144,24 +144,24 @@ inline Condition ARM64FPCondition(IfCondition cond, bool gt_bias) { } } -Location ARM64ReturnLocation(Primitive::Type return_type) { +Location ARM64ReturnLocation(DataType::Type return_type) { // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`, // but we use the exact registers for clarity. - if (return_type == Primitive::kPrimFloat) { + if (return_type == DataType::Type::kFloat32) { return LocationFrom(s0); - } else if (return_type == Primitive::kPrimDouble) { + } else if (return_type == DataType::Type::kFloat64) { return LocationFrom(d0); - } else if (return_type == Primitive::kPrimLong) { + } else if (return_type == DataType::Type::kInt64) { return LocationFrom(x0); - } else if (return_type == Primitive::kPrimVoid) { + } else if (return_type == DataType::Type::kVoid) { return Location::NoLocation(); } else { return LocationFrom(w0); } } -Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) { +Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type return_type) { return ARM64ReturnLocation(return_type); } @@ -265,9 +265,12 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves( - locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt, - locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); + codegen->EmitParallelMoves(locations->InAt(0), + LocationFrom(calling_convention.GetRegisterAt(0)), + DataType::Type::kInt32, + locations->InAt(1), + LocationFrom(calling_convention.GetRegisterAt(1)), + DataType::Type::kInt32); QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() ? kQuickThrowStringBounds : kQuickThrowArrayBounds; @@ -356,7 +359,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { // Move the class to the desired location. if (out.IsValid()) { DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); - Primitive::Type type = instruction_->GetType(); + DataType::Type type = instruction_->GetType(); arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type); } RestoreLiveRegisters(codegen, locations); @@ -376,7 +379,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { { SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler()); __ Bind(strp_label); - __ str(RegisterFrom(locations->Out(), Primitive::kPrimNot), + __ str(RegisterFrom(locations->Out(), DataType::Type::kReference), MemOperand(bss_entry_temp_, /* offset placeholder */ 0)); } } @@ -427,7 +430,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { __ Mov(calling_convention.GetRegisterAt(0).W(), string_index.index_); arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>(); - Primitive::Type type = instruction_->GetType(); + DataType::Type type = instruction_->GetType(); arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type); RestoreLiveRegisters(codegen, locations); @@ -446,7 +449,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { { SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler()); __ Bind(strp_label); - __ str(RegisterFrom(locations->Out(), Primitive::kPrimNot), + __ str(RegisterFrom(locations->Out(), DataType::Type::kReference), MemOperand(temp_, /* offset placeholder */ 0)); } @@ -553,14 +556,14 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves(locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); + DataType::Type::kReference); if (instruction_->IsInstanceOf()) { arm64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); - Primitive::Type ret_type = instruction_->GetType(); + DataType::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); } else { @@ -621,17 +624,17 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 { parallel_move.AddMove( locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove( locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); parallel_move.AddMove( locations->InAt(2), LocationFrom(calling_convention.GetRegisterAt(2)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); @@ -1200,7 +1203,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); LocationSummary* locations = instruction_->GetLocations(); - Primitive::Type type = Primitive::kPrimNot; + DataType::Type type = DataType::Type::kReference; DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg())); DCHECK(instruction_->IsInstanceFieldGet() || @@ -1229,7 +1232,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics. if (instruction_->IsArrayGet()) { // Compute the actual memory offset and store it in `index`. - Register index_reg = RegisterFrom(index_, Primitive::kPrimInt); + Register index_reg = RegisterFrom(index_, DataType::Type::kInt32); DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_.reg())); if (codegen->IsCoreCalleeSaveRegister(index_.reg())) { // We are about to change the value of `index_reg` (see the @@ -1268,7 +1271,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { // factor (2) cannot overflow in practice, as the runtime is // unable to allocate object arrays with a size larger than // 2^26 - 1 (that is, 2^28 - 4 bytes). - __ Lsl(index_reg, index_reg, Primitive::ComponentSizeShift(type)); + __ Lsl(index_reg, index_reg, DataType::SizeShift(type)); static_assert( sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); @@ -1303,7 +1306,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { if (index.IsValid()) { parallel_move.AddMove(index, LocationFrom(calling_convention.GetRegisterAt(2)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } else { @@ -1365,7 +1368,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Primitive::Type type = Primitive::kPrimNot; + DataType::Type type = DataType::Type::kReference; DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg())); DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString()) @@ -1387,7 +1390,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { // type); // // which would emit a 32-bit move, as `type` is a (32-bit wide) - // reference type (`Primitive::kPrimNot`). + // reference type (`DataType::Type::kReference`). __ Mov(calling_convention.GetRegisterAt(0), XRegisterFrom(out_)); arm64_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow, instruction_, @@ -1411,26 +1414,26 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { #undef __ -Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type type) { +Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(DataType::Type type) { Location next_location; - if (type == Primitive::kPrimVoid) { + if (type == DataType::Type::kVoid) { LOG(FATAL) << "Unreachable type " << type; } - if (Primitive::IsFloatingPointType(type) && + if (DataType::IsFloatingPointType(type) && (float_index_ < calling_convention.GetNumberOfFpuRegisters())) { next_location = LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++)); - } else if (!Primitive::IsFloatingPointType(type) && + } else if (!DataType::IsFloatingPointType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) { next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++)); } else { size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_); - next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) - : Location::StackSlot(stack_offset); + next_location = DataType::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) + : Location::StackSlot(stack_offset); } // Space on the stack is reserved for all arguments. - stack_index_ += Primitive::Is64BitType(type) ? 2 : 1; + stack_index_ += DataType::Is64BitType(type) ? 2 : 1; return next_location; } @@ -1547,7 +1550,7 @@ void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) { void ParallelMoveResolverARM64::EmitMove(size_t index) { MoveOperands* move = moves_[index]; - codegen_->MoveLocation(move->GetDestination(), move->GetSource(), Primitive::kPrimVoid); + codegen_->MoveLocation(move->GetDestination(), move->GetSource(), DataType::Type::kVoid); } void CodeGeneratorARM64::GenerateFrameEntry() { @@ -1638,7 +1641,7 @@ void CodeGeneratorARM64::Bind(HBasicBlock* block) { void CodeGeneratorARM64::MoveConstant(Location location, int32_t value) { DCHECK(location.IsRegister()); - __ Mov(RegisterFrom(location, Primitive::kPrimInt), value); + __ Mov(RegisterFrom(location, DataType::Type::kInt32), value); } void CodeGeneratorARM64::AddLocationAsTemp(Location location, LocationSummary* locations) { @@ -1745,15 +1748,15 @@ void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* consta } -static bool CoherentConstantAndType(Location constant, Primitive::Type type) { +static bool CoherentConstantAndType(Location constant, DataType::Type type) { DCHECK(constant.IsConstant()); HConstant* cst = constant.GetConstant(); - return (cst->IsIntConstant() && type == Primitive::kPrimInt) || + return (cst->IsIntConstant() && type == DataType::Type::kInt32) || // Null is mapped to a core W register, which we associate with kPrimInt. - (cst->IsNullConstant() && type == Primitive::kPrimInt) || - (cst->IsLongConstant() && type == Primitive::kPrimLong) || - (cst->IsFloatConstant() && type == Primitive::kPrimFloat) || - (cst->IsDoubleConstant() && type == Primitive::kPrimDouble); + (cst->IsNullConstant() && type == DataType::Type::kInt32) || + (cst->IsLongConstant() && type == DataType::Type::kInt64) || + (cst->IsFloatConstant() && type == DataType::Type::kFloat32) || + (cst->IsDoubleConstant() && type == DataType::Type::kFloat64); } // Allocate a scratch register from the VIXL pool, querying first @@ -1771,7 +1774,7 @@ static CPURegister AcquireFPOrCoreCPURegisterOfSize(vixl::aarch64::MacroAssemble void CodeGeneratorARM64::MoveLocation(Location destination, Location source, - Primitive::Type dst_type) { + DataType::Type dst_type) { if (source.Equals(destination)) { return; } @@ -1780,7 +1783,7 @@ void CodeGeneratorARM64::MoveLocation(Location destination, // locations. When moving from and to a register, the argument type can be // used to generate 32bit instead of 64bit moves. In debug mode we also // checks the coherency of the locations and the type. - bool unspecified_type = (dst_type == Primitive::kPrimVoid); + bool unspecified_type = (dst_type == DataType::Type::kVoid); if (destination.IsRegister() || destination.IsFpuRegister()) { if (unspecified_type) { @@ -1790,17 +1793,17 @@ void CodeGeneratorARM64::MoveLocation(Location destination, || src_cst->IsFloatConstant() || src_cst->IsNullConstant()))) { // For stack slots and 32bit constants, a 64bit type is appropriate. - dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; + dst_type = destination.IsRegister() ? DataType::Type::kInt32 : DataType::Type::kFloat32; } else { // If the source is a double stack slot or a 64bit constant, a 64bit // type is appropriate. Else the source is a register, and since the // type has not been specified, we chose a 64bit type to force a 64bit // move. - dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; + dst_type = destination.IsRegister() ? DataType::Type::kInt64 : DataType::Type::kFloat64; } } - DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) || - (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type))); + DCHECK((destination.IsFpuRegister() && DataType::IsFloatingPointType(dst_type)) || + (destination.IsRegister() && !DataType::IsFloatingPointType(dst_type))); CPURegister dst = CPURegisterFrom(destination, dst_type); if (source.IsStackSlot() || source.IsDoubleStackSlot()) { DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot()); @@ -1815,17 +1818,17 @@ void CodeGeneratorARM64::MoveLocation(Location destination, __ Mov(Register(dst), RegisterFrom(source, dst_type)); } else { DCHECK(destination.IsFpuRegister()); - Primitive::Type source_type = Primitive::Is64BitType(dst_type) - ? Primitive::kPrimLong - : Primitive::kPrimInt; + DataType::Type source_type = DataType::Is64BitType(dst_type) + ? DataType::Type::kInt64 + : DataType::Type::kInt32; __ Fmov(FPRegisterFrom(destination, dst_type), RegisterFrom(source, source_type)); } } else { DCHECK(source.IsFpuRegister()); if (destination.IsRegister()) { - Primitive::Type source_type = Primitive::Is64BitType(dst_type) - ? Primitive::kPrimDouble - : Primitive::kPrimFloat; + DataType::Type source_type = DataType::Is64BitType(dst_type) + ? DataType::Type::kFloat64 + : DataType::Type::kFloat32; __ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type)); } else { DCHECK(destination.IsFpuRegister()); @@ -1859,13 +1862,14 @@ void CodeGeneratorARM64::MoveLocation(Location destination, if (source.IsRegister() || source.IsFpuRegister()) { if (unspecified_type) { if (source.IsRegister()) { - dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; + dst_type = destination.IsStackSlot() ? DataType::Type::kInt32 : DataType::Type::kInt64; } else { - dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; + dst_type = + destination.IsStackSlot() ? DataType::Type::kFloat32 : DataType::Type::kFloat64; } } - DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) && - (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type))); + DCHECK((destination.IsDoubleStackSlot() == DataType::Is64BitType(dst_type)) && + (source.IsFpuRegister() == DataType::IsFloatingPointType(dst_type))); __ Str(CPURegisterFrom(source, dst_type), StackOperandFrom(destination)); } else if (source.IsConstant()) { DCHECK(unspecified_type || CoherentConstantAndType(source, dst_type)) @@ -1920,31 +1924,31 @@ void CodeGeneratorARM64::MoveLocation(Location destination, } } -void CodeGeneratorARM64::Load(Primitive::Type type, +void CodeGeneratorARM64::Load(DataType::Type type, CPURegister dst, const MemOperand& src) { switch (type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: __ Ldrb(Register(dst), src); break; - case Primitive::kPrimByte: + case DataType::Type::kInt8: __ Ldrsb(Register(dst), src); break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: __ Ldrsh(Register(dst), src); break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: __ Ldrh(Register(dst), src); break; - case Primitive::kPrimInt: - case Primitive::kPrimNot: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: - DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type)); + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type)); __ Ldr(dst, src); break; - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; } } @@ -1956,7 +1960,7 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, MacroAssembler* masm = GetVIXLAssembler(); UseScratchRegisterScope temps(masm); Register temp_base = temps.AcquireX(); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); DCHECK(!src.IsPreIndex()); DCHECK(!src.IsPostIndex()); @@ -1967,7 +1971,7 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. MemOperand base = MemOperand(temp_base); switch (type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: { ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); __ ldarb(Register(dst), base); @@ -1976,7 +1980,7 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, } } break; - case Primitive::kPrimByte: + case DataType::Type::kInt8: { ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); __ ldarb(Register(dst), base); @@ -1984,9 +1988,9 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, MaybeRecordImplicitNullCheck(instruction); } } - __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); + __ Sbfx(Register(dst), Register(dst), 0, DataType::Size(type) * kBitsPerByte); break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: { ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); __ ldarh(Register(dst), base); @@ -1995,7 +1999,7 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, } } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: { ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); __ ldarh(Register(dst), base); @@ -2003,12 +2007,12 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, MaybeRecordImplicitNullCheck(instruction); } } - __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); + __ Sbfx(Register(dst), Register(dst), 0, DataType::Size(type) * kBitsPerByte); break; - case Primitive::kPrimInt: - case Primitive::kPrimNot: - case Primitive::kPrimLong: - DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type)); + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: + DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type)); { ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); __ ldar(Register(dst), base); @@ -2017,10 +2021,10 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, } } break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { DCHECK(dst.IsFPRegister()); - DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type)); + DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type)); Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); { @@ -2033,39 +2037,39 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, __ Fmov(FPRegister(dst), temp); break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; } } } -void CodeGeneratorARM64::Store(Primitive::Type type, +void CodeGeneratorARM64::Store(DataType::Type type, CPURegister src, const MemOperand& dst) { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: __ Strb(Register(src), dst); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: __ Strh(Register(src), dst); break; - case Primitive::kPrimInt: - case Primitive::kPrimNot: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: - DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type)); + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type)); __ Str(src, dst); break; - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; } } void CodeGeneratorARM64::StoreRelease(HInstruction* instruction, - Primitive::Type type, + DataType::Type type, CPURegister src, const MemOperand& dst, bool needs_null_check) { @@ -2082,8 +2086,8 @@ void CodeGeneratorARM64::StoreRelease(HInstruction* instruction, MemOperand base = MemOperand(temp_base); // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: { ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); __ stlrb(Register(src), base); @@ -2092,8 +2096,8 @@ void CodeGeneratorARM64::StoreRelease(HInstruction* instruction, } } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: { ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); __ stlrh(Register(src), base); @@ -2102,10 +2106,10 @@ void CodeGeneratorARM64::StoreRelease(HInstruction* instruction, } } break; - case Primitive::kPrimInt: - case Primitive::kPrimNot: - case Primitive::kPrimLong: - DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type)); + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: + DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type)); { ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); __ stlr(Register(src), base); @@ -2114,9 +2118,9 @@ void CodeGeneratorARM64::StoreRelease(HInstruction* instruction, } } break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { - DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type)); + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type)); Register temp_src; if (src.IsZero()) { // The zero register is used to avoid synthesizing zero constants. @@ -2135,7 +2139,7 @@ void CodeGeneratorARM64::StoreRelease(HInstruction* instruction, } break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; } } @@ -2269,17 +2273,17 @@ enum UnimplementedInstructionBreakCode { void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) { DCHECK_EQ(instr->InputCount(), 2U); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); - Primitive::Type type = instr->GetResultType(); + DataType::Type type = instr->GetResultType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr)); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -2295,7 +2299,7 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction, DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, object_field_get_with_read_barrier ? @@ -2318,7 +2322,7 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction, } } locations->SetInAt(0, Location::RequiresRegister()); - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister()); } else { // The output overlaps for an object field get when read barriers @@ -2337,13 +2341,14 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, Location base_loc = locations->InAt(0); Location out = locations->Out(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset()); - if (field_type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && + field_type == DataType::Type::kReference) { // Object FieldGet with Baker's read barrier case. // /* HeapReference<Object> */ out = *(base + offset) - Register base = RegisterFrom(base_loc, Primitive::kPrimNot); + Register base = RegisterFrom(base_loc, DataType::Type::kReference); Location maybe_temp = (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location::NoLocation(); // Note that potential implicit null checks are handled in this @@ -2370,7 +2375,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, codegen_->Load(field_type, OutputCPURegister(instruction), field); codegen_->MaybeRecordImplicitNullCheck(instruction); } - if (field_type == Primitive::kPrimNot) { + if (field_type == DataType::Type::kReference) { // If read barriers are enabled, emit read barriers other than // Baker's using a slow path (and also unpoison the loaded // reference, if heap poisoning is enabled). @@ -2385,7 +2390,7 @@ void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) { locations->SetInAt(0, Location::RequiresRegister()); if (IsConstantZeroBitPattern(instruction->InputAt(1))) { locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); - } else if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) { + } else if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) { locations->SetInAt(1, Location::RequiresFpuRegister()); } else { locations->SetInAt(1, Location::RequiresRegister()); @@ -2401,14 +2406,14 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction, CPURegister value = InputCPURegisterOrZeroRegAt(instruction, 1); CPURegister source = value; Offset offset = field_info.GetFieldOffset(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); { // We use a block to end the scratch scope before the write barrier, thus // freeing the temporary registers so they can be used in `MarkGCCard`. UseScratchRegisterScope temps(GetVIXLAssembler()); - if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && field_type == DataType::Type::kReference) { DCHECK(value.IsW()); Register temp = temps.AcquireW(); __ Mov(temp, value.W()); @@ -2433,11 +2438,11 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction, } void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { - Primitive::Type type = instr->GetType(); + DataType::Type type = instr->GetType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { Register dst = OutputRegister(instr); Register lhs = InputRegisterAt(instr, 0); Operand rhs = InputOperandAt(instr, 1); @@ -2466,8 +2471,8 @@ void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { } break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { FPRegister dst = OutputFPRegister(instr); FPRegister lhs = InputFPRegisterAt(instr, 0); FPRegister rhs = InputFPRegisterAt(instr, 1); @@ -2489,10 +2494,10 @@ void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); - Primitive::Type type = instr->GetResultType(); + DataType::Type type = instr->GetResultType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -2506,16 +2511,16 @@ void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) { DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); - Primitive::Type type = instr->GetType(); + DataType::Type type = instr->GetType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { Register dst = OutputRegister(instr); Register lhs = InputRegisterAt(instr, 0); Operand rhs = InputOperandAt(instr, 1); if (rhs.IsImmediate()) { uint32_t shift_value = rhs.GetImmediate() & - (type == Primitive::kPrimInt ? kMaxIntShiftDistance : kMaxLongShiftDistance); + (type == DataType::Type::kInt32 ? kMaxIntShiftDistance : kMaxLongShiftDistance); if (instr->IsShl()) { __ Lsl(dst, lhs, shift_value); } else if (instr->IsShr()) { @@ -2558,7 +2563,7 @@ void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { } void LocationsBuilderARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) { - DCHECK(Primitive::IsIntegralType(instr->GetType())) << instr->GetType(); + DCHECK(DataType::IsIntegralType(instr->GetType())) << instr->GetType(); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); locations->SetInAt(0, Location::RequiresRegister()); // There is no immediate variant of negated bitwise instructions in AArch64. @@ -2588,8 +2593,8 @@ void InstructionCodeGeneratorARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRigh void LocationsBuilderARM64::VisitDataProcWithShifterOp( HDataProcWithShifterOp* instruction) { - DCHECK(instruction->GetType() == Primitive::kPrimInt || - instruction->GetType() == Primitive::kPrimLong); + DCHECK(instruction->GetType() == DataType::Type::kInt32 || + instruction->GetType() == DataType::Type::kInt64); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); if (instruction->GetInstrKind() == HInstruction::kNeg) { @@ -2603,9 +2608,9 @@ void LocationsBuilderARM64::VisitDataProcWithShifterOp( void InstructionCodeGeneratorARM64::VisitDataProcWithShifterOp( HDataProcWithShifterOp* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); HInstruction::InstructionKind kind = instruction->GetInstrKind(); - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); Register out = OutputRegister(instruction); Register left; if (kind != HInstruction::kNeg) { @@ -2731,7 +2736,7 @@ void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* // Avoid emitting code that could trigger Cortex A53's erratum 835769. // This fixup should be carried out for all multiply-accumulate instructions: // madd, msub, smaddl, smsubl, umaddl and umsubl. - if (instr->GetType() == Primitive::kPrimLong && + if (instr->GetType() == DataType::Type::kInt64 && codegen_->GetInstructionSetFeatures().NeedFixCortexA53_835769()) { MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen_)->GetVIXLAssembler(); vixl::aarch64::Instruction* prev = @@ -2760,7 +2765,7 @@ void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, object_array_get_with_read_barrier ? @@ -2778,7 +2783,7 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { // constant index loads we need a temporary only if the offset is too big. uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction); uint32_t index = instruction->GetIndex()->AsIntConstant()->GetValue(); - offset += index << Primitive::ComponentSizeShift(Primitive::kPrimNot); + offset += index << DataType::SizeShift(DataType::Type::kReference); if (offset >= kReferenceLoadMinFarOffset) { locations->AddTemp(FixedTempLocation()); } @@ -2788,7 +2793,7 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { // The output overlaps in the case of an object array get with @@ -2801,7 +2806,7 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { } void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); Register obj = InputRegisterAt(instruction, 0); LocationSummary* locations = instruction->GetLocations(); Location index = locations->InAt(1); @@ -2814,18 +2819,18 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { // The read barrier instrumentation of object ArrayGet instructions // does not support the HIntermediateAddress instruction. - DCHECK(!((type == Primitive::kPrimNot) && + DCHECK(!((type == DataType::Type::kReference) && instruction->GetArray()->IsIntermediateAddress() && kEmitCompilerReadBarrier)); - if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // Object ArrayGet with Baker's read barrier case. // Note that a potential implicit null check is handled in the // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call. DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0))); if (index.IsConstant()) { // Array load with a constant index can be treated as a field load. - offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type); + offset += Int64ConstantFrom(index) << DataType::SizeShift(type); Location maybe_temp = (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location::NoLocation(); codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction, @@ -2877,7 +2882,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { HeapOperand(obj, offset + (Int64ConstantFrom(index) << 1))); __ Bind(&done); } else { - offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type); + offset += Int64ConstantFrom(index) << DataType::SizeShift(type); source = HeapOperand(obj, offset); } } else { @@ -2907,7 +2912,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { HeapOperand(temp, XRegisterFrom(index), LSL, 1)); __ Bind(&done); } else { - source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type)); + source = HeapOperand(temp, XRegisterFrom(index), LSL, DataType::SizeShift(type)); } } if (!maybe_compressed_char_at) { @@ -2917,7 +2922,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { codegen_->MaybeRecordImplicitNullCheck(instruction); } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { static_assert( sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); @@ -2953,7 +2958,7 @@ void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) } void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( @@ -2965,7 +2970,7 @@ void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (IsConstantZeroBitPattern(instruction->InputAt(2))) { locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant())); - } else if (Primitive::IsFloatingPointType(value_type)) { + } else if (DataType::IsFloatingPointType(value_type)) { locations->SetInAt(2, Location::RequiresFpuRegister()); } else { locations->SetInAt(2, Location::RequiresRegister()); @@ -2973,7 +2978,7 @@ void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { } void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); LocationSummary* locations = instruction->GetLocations(); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); bool needs_write_barrier = @@ -2983,14 +2988,14 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { CPURegister value = InputCPURegisterOrZeroRegAt(instruction, 2); CPURegister source = value; Location index = locations->InAt(1); - size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value(); + size_t offset = mirror::Array::DataOffset(DataType::Size(value_type)).Uint32Value(); MemOperand destination = HeapOperand(array); MacroAssembler* masm = GetVIXLAssembler(); if (!needs_write_barrier) { DCHECK(!may_need_runtime_call_for_type_check); if (index.IsConstant()) { - offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type); + offset += Int64ConstantFrom(index) << DataType::SizeShift(value_type); destination = HeapOperand(array, offset); } else { UseScratchRegisterScope temps(masm); @@ -3010,7 +3015,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { destination = HeapOperand(temp, XRegisterFrom(index), LSL, - Primitive::ComponentSizeShift(value_type)); + DataType::SizeShift(value_type)); } { // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. @@ -3028,13 +3033,13 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { UseScratchRegisterScope temps(masm); Register temp = temps.AcquireSameSizeAs(array); if (index.IsConstant()) { - offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type); + offset += Int64ConstantFrom(index) << DataType::SizeShift(value_type); destination = HeapOperand(array, offset); } else { destination = HeapOperand(temp, XRegisterFrom(index), LSL, - Primitive::ComponentSizeShift(value_type)); + DataType::SizeShift(value_type)); } uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -3214,21 +3219,21 @@ void InstructionCodeGeneratorARM64::GenerateFcmp(HInstruction* instruction) { void LocationsBuilderARM64::VisitCompare(HCompare* compare) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); - Primitive::Type in_type = compare->InputAt(0)->GetType(); + DataType::Type in_type = compare->InputAt(0)->GetType(); switch (in_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare)); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, IsFloatingPointZeroConstant(compare->InputAt(1)) @@ -3243,18 +3248,18 @@ void LocationsBuilderARM64::VisitCompare(HCompare* compare) { } void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { - Primitive::Type in_type = compare->InputAt(0)->GetType(); + DataType::Type in_type = compare->InputAt(0)->GetType(); // 0 if: left == right // 1 if: left > right // -1 if: left < right switch (in_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: { Register result = OutputRegister(compare); Register left = InputRegisterAt(compare, 0); Operand right = InputOperandAt(compare, 1); @@ -3263,8 +3268,8 @@ void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { __ Cneg(result, result, lt); // result == -1 if LT or unchanged otherwise break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { Register result = OutputRegister(compare); GenerateFcmp(compare); __ Cset(result, ne); @@ -3279,7 +3284,7 @@ void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { void LocationsBuilderARM64::HandleCondition(HCondition* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); - if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) { + if (DataType::IsFloatingPointType(instruction->InputAt(0)->GetType())) { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, IsFloatingPointZeroConstant(instruction->InputAt(1)) @@ -3305,7 +3310,7 @@ void InstructionCodeGeneratorARM64::HandleCondition(HCondition* instruction) { Register res = RegisterFrom(locations->Out(), instruction->GetType()); IfCondition if_cond = instruction->GetCondition(); - if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) { + if (DataType::IsFloatingPointType(instruction->InputAt(0)->GetType())) { GenerateFcmp(instruction); __ Cset(res, ARM64FPCondition(if_cond, instruction->IsGtBias())); } else { @@ -3384,7 +3389,7 @@ void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruc __ Neg(out, Operand(out, ASR, ctz_imm)); } } else { - int bits = instruction->GetResultType() == Primitive::kPrimInt ? 32 : 64; + int bits = instruction->GetResultType() == DataType::Type::kInt32 ? 32 : 64; __ Asr(temp, dividend, bits - 1); __ Lsr(temp, temp, bits - ctz_imm); __ Add(out, dividend, temp); @@ -3404,19 +3409,20 @@ void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperati Register dividend = InputRegisterAt(instruction, 0); int64_t imm = Int64FromConstant(second.GetConstant()); - Primitive::Type type = instruction->GetResultType(); - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DataType::Type type = instruction->GetResultType(); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); int64_t magic; int shift; - CalculateMagicAndShiftForDivRem(imm, type == Primitive::kPrimLong /* is_long */, &magic, &shift); + CalculateMagicAndShiftForDivRem( + imm, type == DataType::Type::kInt64 /* is_long */, &magic, &shift); UseScratchRegisterScope temps(GetVIXLAssembler()); Register temp = temps.AcquireSameSizeAs(out); // temp = get_high(dividend * magic) __ Mov(temp, magic); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { __ Smulh(temp, dividend, temp); } else { __ Smull(temp.X(), dividend, temp); @@ -3434,9 +3440,9 @@ void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperati } if (instruction->IsDiv()) { - __ Sub(out, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31)); + __ Sub(out, temp, Operand(temp, ASR, type == DataType::Type::kInt64 ? 63 : 31)); } else { - __ Sub(temp, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31)); + __ Sub(temp, temp, Operand(temp, ASR, type == DataType::Type::kInt64 ? 63 : 31)); // TODO: Strength reduction for msub. Register temp_imm = temps.AcquireSameSizeAs(out); __ Mov(temp_imm, imm); @@ -3446,8 +3452,8 @@ void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperati void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - Primitive::Type type = instruction->GetResultType(); - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DataType::Type type = instruction->GetResultType(); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); LocationSummary* locations = instruction->GetLocations(); Register out = OutputRegister(instruction); @@ -3484,15 +3490,15 @@ void LocationsBuilderARM64::VisitDiv(HDiv* div) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); switch (div->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -3504,15 +3510,15 @@ void LocationsBuilderARM64::VisitDiv(HDiv* div) { } void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) { - Primitive::Type type = div->GetResultType(); + DataType::Type type = div->GetResultType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: GenerateDivRemIntegral(div); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1)); break; @@ -3532,9 +3538,9 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction codegen_->AddSlowPath(slow_path); Location value = instruction->GetLocations()->InAt(0); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); - if (!Primitive::IsIntegralType(type)) { + if (!DataType::IsIntegralType(type)) { LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck."; return; } @@ -3665,8 +3671,8 @@ void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruct // the comparison and its condition as the branch condition. HCondition* condition = cond->AsCondition(); - Primitive::Type type = condition->InputAt(0)->GetType(); - if (Primitive::IsFloatingPointType(type)) { + DataType::Type type = condition->InputAt(0)->GetType(); + if (DataType::IsFloatingPointType(type)) { GenerateFcmp(condition); if (true_target == nullptr) { IfCondition opposite_condition = condition->GetOppositeCondition(); @@ -3780,7 +3786,7 @@ void InstructionCodeGeneratorARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeF static inline bool IsConditionOnFloatingPointValues(HInstruction* condition) { return condition->IsCondition() && - Primitive::IsFloatingPointType(condition->InputAt(0)->GetType()); + DataType::IsFloatingPointType(condition->InputAt(0)->GetType()); } static inline Condition GetConditionForSelect(HCondition* condition) { @@ -3791,7 +3797,7 @@ static inline Condition GetConditionForSelect(HCondition* condition) { void LocationsBuilderARM64::VisitSelect(HSelect* select) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); - if (Primitive::IsFloatingPointType(select->GetType())) { + if (DataType::IsFloatingPointType(select->GetType())) { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -3845,7 +3851,7 @@ void InstructionCodeGeneratorARM64::VisitSelect(HSelect* select) { csel_cond = GetConditionForSelect(cond->AsCondition()); } - if (Primitive::IsFloatingPointType(select->GetType())) { + if (DataType::IsFloatingPointType(select->GetType())) { __ Fcsel(OutputFPRegister(select), InputFPRegisterAt(select, 1), InputFPRegisterAt(select, 0), @@ -4913,8 +4919,8 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { InvokeRuntimeCallingConvention calling_convention; caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(), - RegisterFrom(calling_convention.GetReturnLocation(Primitive::kPrimNot), - Primitive::kPrimNot).GetCode()); + RegisterFrom(calling_convention.GetReturnLocation(DataType::Type::kReference), + DataType::Type::kReference).GetCode()); locations->SetCustomSlowPathCallerSaves(caller_saves); } else { // For non-Baker read barrier we have a temp-clobbering call. @@ -5108,8 +5114,8 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { InvokeRuntimeCallingConvention calling_convention; caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(), - RegisterFrom(calling_convention.GetReturnLocation(Primitive::kPrimNot), - Primitive::kPrimNot).GetCode()); + RegisterFrom(calling_convention.GetReturnLocation(DataType::Type::kReference), + DataType::Type::kReference).GetCode()); locations->SetCustomSlowPathCallerSaves(caller_saves); } else { // For non-Baker read barrier we have a temp-clobbering call. @@ -5241,15 +5247,15 @@ void LocationsBuilderARM64::VisitMul(HMul* mul) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -5262,13 +5268,13 @@ void LocationsBuilderARM64::VisitMul(HMul* mul) { void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) { switch (mul->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1)); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1)); break; @@ -5281,14 +5287,14 @@ void LocationsBuilderARM64::VisitNeg(HNeg* neg) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg)); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -5300,13 +5306,13 @@ void LocationsBuilderARM64::VisitNeg(HNeg* neg) { void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) { switch (neg->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: __ Neg(OutputRegister(neg), InputOperandAt(neg, 0)); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0)); break; @@ -5343,7 +5349,7 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { } else { locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); } - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); } void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { @@ -5379,8 +5385,8 @@ void LocationsBuilderARM64::VisitNot(HNot* instruction) { void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) { switch (instruction->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0)); break; @@ -5487,22 +5493,22 @@ void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) } void LocationsBuilderARM64::VisitRem(HRem* rem) { - Primitive::Type type = rem->GetResultType(); + DataType::Type type = rem->GetResultType(); LocationSummary::CallKind call_kind = - Primitive::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly + DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1))); @@ -5517,20 +5523,21 @@ void LocationsBuilderARM64::VisitRem(HRem* rem) { } void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) { - Primitive::Type type = rem->GetResultType(); + DataType::Type type = rem->GetResultType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GenerateDivRemIntegral(rem); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { - QuickEntrypointEnum entrypoint = (type == Primitive::kPrimFloat) ? kQuickFmodf : kQuickFmod; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + QuickEntrypointEnum entrypoint = + (type == DataType::Type::kFloat32) ? kQuickFmodf : kQuickFmod; codegen_->InvokeRuntime(entrypoint, rem, rem->GetDexPc()); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { CheckEntrypointTypes<kQuickFmodf, float, float, float>(); } else { CheckEntrypointTypes<kQuickFmod, double, double, double>(); @@ -5563,7 +5570,7 @@ void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_ba void LocationsBuilderARM64::VisitReturn(HReturn* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); - Primitive::Type return_type = instruction->InputAt(0)->GetType(); + DataType::Type return_type = instruction->InputAt(0)->GetType(); locations->SetInAt(0, ARM64ReturnLocation(return_type)); } @@ -5735,21 +5742,21 @@ void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) { void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); - Primitive::Type input_type = conversion->GetInputType(); - Primitive::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); DCHECK_NE(input_type, result_type); - if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) || - (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) { + if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) || + (result_type == DataType::Type::kReference) || (result_type == DataType::Type::kVoid)) { LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } - if (Primitive::IsFloatingPointType(input_type)) { + if (DataType::IsFloatingPointType(input_type)) { locations->SetInAt(0, Location::RequiresFpuRegister()); } else { locations->SetInAt(0, Location::RequiresRegister()); } - if (Primitive::IsFloatingPointType(result_type)) { + if (DataType::IsFloatingPointType(result_type)) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -5757,18 +5764,18 @@ void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) { } void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) { - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); DCHECK_NE(input_type, result_type); - if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) { - int result_size = Primitive::ComponentSize(result_type); - int input_size = Primitive::ComponentSize(input_type); + if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) { + int result_size = DataType::Size(result_type); + int input_size = DataType::Size(input_type); int min_size = std::min(result_size, input_size); Register output = OutputRegister(conversion); Register source = InputRegisterAt(conversion, 0); - if (result_type == Primitive::kPrimInt && input_type == Primitive::kPrimLong) { + if (result_type == DataType::Type::kInt32 && input_type == DataType::Type::kInt64) { // 'int' values are used directly as W registers, discarding the top // bits, so we don't need to sign-extend and can just perform a move. // We do not pass the `kDiscardForSameWReg` argument to force clearing the @@ -5777,21 +5784,21 @@ void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* convers // 32bit input value as a 64bit value assuming that the top 32 bits are // zero. __ Mov(output.W(), source.W()); - } else if (result_type == Primitive::kPrimChar || - (input_type == Primitive::kPrimChar && input_size < result_size)) { + } else if (result_type == DataType::Type::kUint16 || + (input_type == DataType::Type::kUint16 && input_size < result_size)) { __ Ubfx(output, output.IsX() ? source.X() : source.W(), - 0, Primitive::ComponentSize(Primitive::kPrimChar) * kBitsPerByte); + 0, DataType::Size(DataType::Type::kUint16) * kBitsPerByte); } else { __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); } - } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) { + } else if (DataType::IsFloatingPointType(result_type) && DataType::IsIntegralType(input_type)) { __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0)); - } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) { - CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong); + } else if (DataType::IsIntegralType(result_type) && DataType::IsFloatingPointType(input_type)) { + CHECK(result_type == DataType::Type::kInt32 || result_type == DataType::Type::kInt64); __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0)); - } else if (Primitive::IsFloatingPointType(result_type) && - Primitive::IsFloatingPointType(input_type)) { + } else if (DataType::IsFloatingPointType(result_type) && + DataType::IsFloatingPointType(input_type)) { __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0)); } else { LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type @@ -5918,7 +5925,7 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister( uint32_t offset, Location maybe_temp, ReadBarrierOption read_barrier_option) { - Primitive::Type type = Primitive::kPrimNot; + DataType::Type type = DataType::Type::kReference; Register out_reg = RegisterFrom(out, type); if (read_barrier_option == kWithReadBarrier) { CHECK(kEmitCompilerReadBarrier); @@ -5958,7 +5965,7 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters( uint32_t offset, Location maybe_temp, ReadBarrierOption read_barrier_option) { - Primitive::Type type = Primitive::kPrimNot; + DataType::Type type = DataType::Type::kReference; Register out_reg = RegisterFrom(out, type); Register obj_reg = RegisterFrom(obj, type); if (read_barrier_option == kWithReadBarrier) { @@ -5995,7 +6002,7 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad( vixl::aarch64::Label* fixup_label, ReadBarrierOption read_barrier_option) { DCHECK(fixup_label == nullptr || offset == 0u); - Register root_reg = RegisterFrom(root, Primitive::kPrimNot); + Register root_reg = RegisterFrom(root, DataType::Type::kReference); if (read_barrier_option == kWithReadBarrier) { DCHECK(kEmitCompilerReadBarrier); if (kUseBakerReadBarrier) { @@ -6159,7 +6166,7 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4), "Field LDR must be 1 instruction (4B) before the return address label; " " 2 instructions (8B) for heap poisoning."); - Register ref_reg = RegisterFrom(ref, Primitive::kPrimNot); + Register ref_reg = RegisterFrom(ref, DataType::Type::kReference); __ ldr(ref_reg, MemOperand(base.X(), offset)); if (needs_null_check) { MaybeRecordImplicitNullCheck(instruction); @@ -6199,7 +6206,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* ins static_assert( sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); - size_t scale_factor = Primitive::ComponentSizeShift(Primitive::kPrimNot); + size_t scale_factor = DataType::SizeShift(DataType::Type::kReference); if (kBakerReadBarrierLinkTimeThunksEnableForArrays && !Runtime::Current()->UseJitCompilation()) { @@ -6224,8 +6231,8 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* ins // gray_return_address: DCHECK(index.IsValid()); - Register index_reg = RegisterFrom(index, Primitive::kPrimInt); - Register ref_reg = RegisterFrom(ref, Primitive::kPrimNot); + Register index_reg = RegisterFrom(index, DataType::Type::kInt32); + Register ref_reg = RegisterFrom(ref, DataType::Type::kReference); UseScratchRegisterScope temps(GetVIXLAssembler()); DCHECK(temps.IsAvailable(ip0)); @@ -6397,7 +6404,7 @@ void CodeGeneratorARM64::GenerateRawReferenceLoad(HInstruction* instruction, bool needs_null_check, bool use_load_acquire) { DCHECK(obj.IsW()); - Primitive::Type type = Primitive::kPrimNot; + DataType::Type type = DataType::Type::kReference; Register ref_reg = RegisterFrom(ref, type); // If needed, vixl::EmissionCheckScope guards are used to ensure diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index cebdaa102c..21da9557e5 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -100,7 +100,7 @@ const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegi vixl::aarch64::kDRegSize, vixl::aarch64::d8.GetCode(), vixl::aarch64::d15.GetCode()); -Location ARM64ReturnLocation(Primitive::Type return_type); +Location ARM64ReturnLocation(DataType::Type return_type); class SlowPathCodeARM64 : public SlowPathCode { public: @@ -171,7 +171,7 @@ class InvokeRuntimeCallingConvention : public CallingConvention<vixl::aarch64::R kRuntimeParameterFpuRegistersLength, kArm64PointerSize) {} - Location GetReturnLocation(Primitive::Type return_type); + Location GetReturnLocation(DataType::Type return_type); private: DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); @@ -187,7 +187,7 @@ class InvokeDexCallingConvention : public CallingConvention<vixl::aarch64::Regis kParameterFPRegistersLength, kArm64PointerSize) {} - Location GetReturnLocation(Primitive::Type return_type) const { + Location GetReturnLocation(DataType::Type return_type) const { return ARM64ReturnLocation(return_type); } @@ -201,8 +201,8 @@ class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConvention InvokeDexCallingConventionVisitorARM64() {} virtual ~InvokeDexCallingConventionVisitorARM64() {} - Location GetNextLocation(Primitive::Type type) OVERRIDE; - Location GetReturnLocation(Primitive::Type return_type) const OVERRIDE { + Location GetNextLocation(DataType::Type type) OVERRIDE; + Location GetReturnLocation(DataType::Type return_type) const OVERRIDE { return calling_convention.GetReturnLocation(return_type); } Location GetMethodLocation() const OVERRIDE; @@ -223,16 +223,16 @@ class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention { Location GetFieldIndexLocation() const OVERRIDE { return helpers::LocationFrom(vixl::aarch64::x0); } - Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return helpers::LocationFrom(vixl::aarch64::x0); } - Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED, + Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE { return is_instance ? helpers::LocationFrom(vixl::aarch64::x2) : helpers::LocationFrom(vixl::aarch64::x1); } - Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return helpers::LocationFrom(vixl::aarch64::d0); } @@ -498,13 +498,13 @@ class CodeGeneratorARM64 : public CodeGenerator { // Code generation helpers. void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant); void MoveConstant(Location destination, int32_t value) OVERRIDE; - void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; - void Load(Primitive::Type type, + void Load(DataType::Type type, vixl::aarch64::CPURegister dst, const vixl::aarch64::MemOperand& src); - void Store(Primitive::Type type, + void Store(DataType::Type type, vixl::aarch64::CPURegister src, const vixl::aarch64::MemOperand& dst); void LoadAcquire(HInstruction* instruction, @@ -512,7 +512,7 @@ class CodeGeneratorARM64 : public CodeGenerator { const vixl::aarch64::MemOperand& src, bool needs_null_check); void StoreRelease(HInstruction* instruction, - Primitive::Type type, + DataType::Type type, vixl::aarch64::CPURegister src, const vixl::aarch64::MemOperand& dst, bool needs_null_check); @@ -531,7 +531,7 @@ class CodeGeneratorARM64 : public CodeGenerator { ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; } - bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; } @@ -557,7 +557,7 @@ class CodeGeneratorARM64 : public CodeGenerator { HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED, - Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE { + DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE { UNIMPLEMENTED(FATAL); } diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index e1ea08073f..2b9e0febe8 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -450,10 +450,10 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { codegen->EmitParallelMoves( locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), - Primitive::kPrimInt, + DataType::Type::kInt32, locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt); + DataType::Type::kInt32); QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() ? kQuickThrowStringBounds : kQuickThrowArrayBounds; @@ -641,10 +641,10 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { codegen->EmitParallelMoves(locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); + DataType::Type::kReference); if (instruction_->IsInstanceOf()) { arm_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, @@ -715,17 +715,17 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL { parallel_move.AddMove( locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove( locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); parallel_move.AddMove( locations->InAt(2), LocationFrom(calling_convention.GetRegisterAt(2)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); @@ -1365,16 +1365,16 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL { HParallelMove parallel_move(codegen->GetGraph()->GetArena()); parallel_move.AddMove(ref_, LocationFrom(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove(obj_, LocationFrom(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); if (index.IsValid()) { parallel_move.AddMove(index, LocationFrom(calling_convention.GetRegisterAt(2)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } else { @@ -1641,7 +1641,7 @@ static Operand GetShifterOperand(vixl32::Register rm, ShiftType shift, uint32_t static void GenerateLongDataProc(HDataProcWithShifterOp* instruction, CodeGeneratorARMVIXL* codegen) { - DCHECK_EQ(instruction->GetType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetType(), DataType::Type::kInt64); DCHECK(HDataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())); const LocationSummary* const locations = instruction->GetLocations(); @@ -1776,12 +1776,12 @@ static void GenerateVcmp(HInstruction* instruction, CodeGeneratorARMVIXL* codege // care here. DCHECK(rhs_loc.GetConstant()->IsArithmeticZero()); - const Primitive::Type type = instruction->InputAt(0)->GetType(); + const DataType::Type type = instruction->InputAt(0)->GetType(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ Vcmp(F32, InputSRegisterAt(instruction, 0), 0.0); } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); __ Vcmp(F64, InputDRegisterAt(instruction, 0), 0.0); } } else { @@ -1821,7 +1821,7 @@ static std::pair<vixl32::Condition, vixl32::Condition> GenerateLongTestConstant( HCondition* condition, bool invert, CodeGeneratorARMVIXL* codegen) { - DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong); + DCHECK_EQ(condition->GetLeft()->GetType(), DataType::Type::kInt64); const LocationSummary* const locations = condition->GetLocations(); IfCondition cond = condition->GetCondition(); @@ -1942,7 +1942,7 @@ static std::pair<vixl32::Condition, vixl32::Condition> GenerateLongTest( HCondition* condition, bool invert, CodeGeneratorARMVIXL* codegen) { - DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong); + DCHECK_EQ(condition->GetLeft()->GetType(), DataType::Type::kInt64); const LocationSummary* const locations = condition->GetLocations(); IfCondition cond = condition->GetCondition(); @@ -2012,7 +2012,7 @@ static std::pair<vixl32::Condition, vixl32::Condition> GenerateLongTest( static std::pair<vixl32::Condition, vixl32::Condition> GenerateTest(HCondition* condition, bool invert, CodeGeneratorARMVIXL* codegen) { - const Primitive::Type type = condition->GetLeft()->GetType(); + const DataType::Type type = condition->GetLeft()->GetType(); IfCondition cond = condition->GetCondition(); IfCondition opposite = condition->GetOppositeCondition(); std::pair<vixl32::Condition, vixl32::Condition> ret(eq, ne); @@ -2021,17 +2021,17 @@ static std::pair<vixl32::Condition, vixl32::Condition> GenerateTest(HCondition* std::swap(cond, opposite); } - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { ret = condition->GetLocations()->InAt(1).IsConstant() ? GenerateLongTestConstant(condition, invert, codegen) : GenerateLongTest(condition, invert, codegen); - } else if (Primitive::IsFloatingPointType(type)) { + } else if (DataType::IsFloatingPointType(type)) { GenerateVcmp(condition, codegen); __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR); ret = std::make_pair(ARMFPCondition(cond, condition->IsGtBias()), ARMFPCondition(opposite, condition->IsGtBias())); } else { - DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type; + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; __ Cmp(InputRegisterAt(condition, 0), InputOperandAt(condition, 1)); ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite)); } @@ -2067,7 +2067,7 @@ static void GenerateConditionGeneric(HCondition* cond, CodeGeneratorARMVIXL* cod } static void GenerateEqualLong(HCondition* cond, CodeGeneratorARMVIXL* codegen) { - DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong); + DCHECK_EQ(cond->GetLeft()->GetType(), DataType::Type::kInt64); const LocationSummary* const locations = cond->GetLocations(); IfCondition condition = cond->GetCondition(); @@ -2123,7 +2123,7 @@ static void GenerateEqualLong(HCondition* cond, CodeGeneratorARMVIXL* codegen) { } static void GenerateConditionLong(HCondition* cond, CodeGeneratorARMVIXL* codegen) { - DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong); + DCHECK_EQ(cond->GetLeft()->GetType(), DataType::Type::kInt64); const LocationSummary* const locations = cond->GetLocations(); IfCondition condition = cond->GetCondition(); @@ -2188,11 +2188,11 @@ static void GenerateConditionLong(HCondition* cond, CodeGeneratorARMVIXL* codege static void GenerateConditionIntegralOrNonPrimitive(HCondition* cond, CodeGeneratorARMVIXL* codegen) { - const Primitive::Type type = cond->GetLeft()->GetType(); + const DataType::Type type = cond->GetLeft()->GetType(); - DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type; + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { GenerateConditionLong(cond, codegen); return; } @@ -2278,12 +2278,12 @@ static void GenerateConditionIntegralOrNonPrimitive(HCondition* cond, } static bool CanEncodeConstantAs8BitImmediate(HConstant* constant) { - const Primitive::Type type = constant->GetType(); + const DataType::Type type = constant->GetType(); bool ret = false; - DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type; + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { const uint64_t value = Uint64ConstantFrom(constant); ret = IsUint<8>(Low32Bits(value)) && IsUint<8>(High32Bits(value)); @@ -2295,7 +2295,7 @@ static bool CanEncodeConstantAs8BitImmediate(HConstant* constant) { } static Location Arm8BitEncodableConstantOrRegister(HInstruction* constant) { - DCHECK(!Primitive::IsFloatingPointType(constant->GetType())); + DCHECK(!DataType::IsFloatingPointType(constant->GetType())); if (constant->IsConstant() && CanEncodeConstantAs8BitImmediate(constant->AsConstant())) { return Location::ConstantLocation(constant->AsConstant()); @@ -2596,14 +2596,14 @@ void CodeGeneratorARMVIXL::Bind(HBasicBlock* block) { __ Bind(GetLabelOf(block)); } -Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(Primitive::Type type) { +Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(DataType::Type type) { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: { uint32_t index = gp_index_++; uint32_t stack_index = stack_index_++; if (index < calling_convention.GetNumberOfRegisters()) { @@ -2613,7 +2613,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(Primitive::Ty } } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { uint32_t index = gp_index_; uint32_t stack_index = stack_index_; gp_index_ += 2; @@ -2636,7 +2636,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(Primitive::Ty } } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { uint32_t stack_index = stack_index_++; if (float_index_ % 2 == 0) { float_index_ = std::max(double_index_, float_index_); @@ -2648,7 +2648,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(Primitive::Ty } } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { double_index_ = std::max(double_index_, RoundUp(float_index_, 2)); uint32_t stack_index = stack_index_; stack_index_ += 2; @@ -2665,37 +2665,37 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(Primitive::Ty } } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unexpected parameter type " << type; break; } return Location::NoLocation(); } -Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(Primitive::Type type) const { +Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::Type type) const { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: { return LocationFrom(r0); } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { return LocationFrom(s0); } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { return LocationFrom(r0, r1); } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { return LocationFrom(s0, s1); } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: return Location::NoLocation(); } @@ -2753,7 +2753,7 @@ void CodeGeneratorARMVIXL::MoveConstant(Location location, int32_t value) { __ Mov(RegisterFrom(location), value); } -void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, Primitive::Type dst_type) { +void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, DataType::Type dst_type) { // TODO(VIXL): Maybe refactor to have the 'move' implementation here and use it in // `ParallelMoveResolverARMVIXL::EmitMove`, as is done in the `arm64` backend. HParallelMove move(GetGraph()->GetArena()); @@ -2936,8 +2936,8 @@ void InstructionCodeGeneratorARMVIXL::GenerateTestAndBranch(HInstruction* instru // If this is a long or FP comparison that has been folded into // the HCondition, generate the comparison directly. - Primitive::Type type = condition->InputAt(0)->GetType(); - if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) { + DataType::Type type = condition->InputAt(0)->GetType(); + if (type == DataType::Type::kInt64 || DataType::IsFloatingPointType(type)) { GenerateCompareTestAndBranch(condition, true_target, false_target, far_target); return; } @@ -3028,7 +3028,7 @@ void InstructionCodeGeneratorARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimiz void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); - const bool is_floating_point = Primitive::IsFloatingPointType(select->GetType()); + const bool is_floating_point = DataType::IsFloatingPointType(select->GetType()); if (is_floating_point) { locations->SetInAt(0, Location::RequiresFpuRegister()); @@ -3056,7 +3056,7 @@ void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) { void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { HInstruction* const condition = select->GetCondition(); const LocationSummary* const locations = select->GetLocations(); - const Primitive::Type type = select->GetType(); + const DataType::Type type = select->GetType(); const Location first = locations->InAt(0); const Location out = locations->Out(); const Location second = locations->InAt(1); @@ -3073,7 +3073,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { return; } - if (!Primitive::IsFloatingPointType(type)) { + if (!DataType::IsFloatingPointType(type)) { bool invert = false; if (out.Equals(second)) { @@ -3261,7 +3261,7 @@ void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) { new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); // Handle the long/FP comparisons made in instruction simplification. switch (cond->InputAt(0)->GetType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); if (!cond->IsEmittedAtUseSite()) { @@ -3269,8 +3269,8 @@ void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) { } break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1))); if (!cond->IsEmittedAtUseSite()) { @@ -3292,22 +3292,22 @@ void InstructionCodeGeneratorARMVIXL::HandleCondition(HCondition* cond) { return; } - const Primitive::Type type = cond->GetLeft()->GetType(); + const DataType::Type type = cond->GetLeft()->GetType(); - if (Primitive::IsFloatingPointType(type)) { + if (DataType::IsFloatingPointType(type)) { GenerateConditionGeneric(cond, codegen_); return; } - DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type; + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; const IfCondition condition = cond->GetCondition(); // A condition with only one boolean input, or two boolean inputs without being equality or // inequality results from transformations done by the instruction simplifier, and is handled // as a regular condition with integral inputs. - if (type == Primitive::kPrimBoolean && - cond->GetRight()->GetType() == Primitive::kPrimBoolean && + if (type == DataType::Type::kBool && + cond->GetRight()->GetType() == DataType::Type::kBool && (condition == kCondEQ || condition == kCondNE)) { vixl32::Register left = InputRegisterAt(cond, 0); const vixl32::Register out = OutputRegister(cond); @@ -3670,19 +3670,19 @@ void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -3697,11 +3697,11 @@ void InstructionCodeGeneratorARMVIXL::VisitNeg(HNeg* neg) { Location out = locations->Out(); Location in = locations->InAt(0); switch (neg->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ Rsb(OutputRegister(neg), InputRegisterAt(neg, 0), 0); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // out.lo = 0 - in.lo (and update the carry/borrow (C) flag) __ Rsbs(LowRegisterFrom(out), LowRegisterFrom(in), 0); // We cannot emit an RSC (Reverse Subtract with Carry) @@ -3715,8 +3715,8 @@ void InstructionCodeGeneratorARMVIXL::VisitNeg(HNeg* neg) { __ Sub(HighRegisterFrom(out), HighRegisterFrom(out), HighRegisterFrom(in)); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Vneg(OutputVRegister(neg), InputVRegister(neg)); break; @@ -3726,16 +3726,16 @@ void InstructionCodeGeneratorARMVIXL::VisitNeg(HNeg* neg) { } void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); DCHECK_NE(result_type, input_type); // The float-to-long, double-to-long and long-to-float type conversions // rely on a call to the runtime. LocationSummary::CallKind call_kind = - (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble) - && result_type == Primitive::kPrimLong) - || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat)) + (((input_type == DataType::Type::kFloat32 || input_type == DataType::Type::kFloat64) + && result_type == DataType::Type::kInt64) + || (input_type == DataType::Type::kInt64 && result_type == DataType::Type::kFloat32)) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; LocationSummary* locations = @@ -3745,15 +3745,15 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { // our bit representation makes it safe. switch (result_type) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to byte is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-byte' instruction. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -3765,15 +3765,15 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to short is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-short' instruction. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -3785,22 +3785,22 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-int' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); locations->AddTemp(Location::RequiresFpuRegister()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); @@ -3813,20 +3813,20 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-long' instruction. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { // Processing a Dex `float-to-long' instruction. InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); @@ -3834,7 +3834,7 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { // Processing a Dex `double-to-long' instruction. InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0), @@ -3849,15 +3849,15 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to char is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: // Processing a Dex `int-to-char' instruction. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -3869,20 +3869,20 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-float' instruction. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { // Processing a Dex `long-to-float' instruction. InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0), @@ -3891,7 +3891,7 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { break; } - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-float' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -3903,20 +3903,20 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { }; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-double' instruction. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-double' instruction. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); @@ -3924,7 +3924,7 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { locations->AddTemp(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-double' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -3946,21 +3946,21 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve LocationSummary* locations = conversion->GetLocations(); Location out = locations->Out(); Location in = locations->InAt(0); - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); DCHECK_NE(result_type, input_type); switch (result_type) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to byte is a result of code transformations. __ Sbfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 8); break; - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-byte' instruction. __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 8); break; @@ -3971,17 +3971,17 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to short is a result of code transformations. __ Sbfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 16); break; - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-short' instruction. __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16); break; @@ -3992,9 +3992,9 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-int' instruction. DCHECK(out.IsRegister()); if (in.IsRegisterPair()) { @@ -4012,7 +4012,7 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve } break; - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { // Processing a Dex `float-to-int' instruction. vixl32::SRegister temp = LowSRegisterFrom(locations->GetTemp(0)); __ Vcvt(S32, F32, temp, InputSRegisterAt(conversion, 0)); @@ -4020,7 +4020,7 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { // Processing a Dex `double-to-int' instruction. vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0)); __ Vcvt(S32, F64, temp_s, DRegisterFrom(in)); @@ -4034,14 +4034,14 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-long' instruction. DCHECK(out.IsRegisterPair()); DCHECK(in.IsRegister()); @@ -4050,13 +4050,13 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve __ Asr(HighRegisterFrom(out), LowRegisterFrom(out), 31); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-long' instruction. codegen_->InvokeRuntime(kQuickF2l, conversion, conversion->GetDexPc()); CheckEntrypointTypes<kQuickF2l, int64_t, float>(); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-long' instruction. codegen_->InvokeRuntime(kQuickD2l, conversion, conversion->GetDexPc()); CheckEntrypointTypes<kQuickD2l, int64_t, double>(); @@ -4068,17 +4068,17 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve } break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to char is a result of code transformations. __ Ubfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 16); break; - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: // Processing a Dex `int-to-char' instruction. __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16); break; @@ -4089,27 +4089,27 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: { + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: { // Processing a Dex `int-to-float' instruction. __ Vmov(OutputSRegister(conversion), InputRegisterAt(conversion, 0)); __ Vcvt(F32, S32, OutputSRegister(conversion), OutputSRegister(conversion)); break; } - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-float' instruction. codegen_->InvokeRuntime(kQuickL2f, conversion, conversion->GetDexPc()); CheckEntrypointTypes<kQuickL2f, float, int64_t>(); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-float' instruction. __ Vcvt(F32, F64, OutputSRegister(conversion), DRegisterFrom(in)); break; @@ -4120,21 +4120,21 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve }; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: { + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: { // Processing a Dex `int-to-double' instruction. __ Vmov(LowSRegisterFrom(out), InputRegisterAt(conversion, 0)); __ Vcvt(F64, S32, DRegisterFrom(out), LowSRegisterFrom(out)); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { // Processing a Dex `long-to-double' instruction. vixl32::Register low = LowRegisterFrom(in); vixl32::Register high = HighRegisterFrom(in); @@ -4157,7 +4157,7 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve break; } - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-double' instruction. __ Vcvt(F64, F32, DRegisterFrom(out), InputSRegisterAt(conversion, 0)); break; @@ -4178,22 +4178,22 @@ void LocationsBuilderARMVIXL::VisitAdd(HAdd* add) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); switch (add->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, ArmEncodableConstantOrRegister(add->InputAt(1), ADD)); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -4212,12 +4212,12 @@ void InstructionCodeGeneratorARMVIXL::VisitAdd(HAdd* add) { Location second = locations->InAt(1); switch (add->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { __ Add(OutputRegister(add), InputRegisterAt(add, 0), InputOperandAt(add, 1)); } break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (second.IsConstant()) { uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant())); GenerateAddLongConst(out, first, value); @@ -4229,8 +4229,8 @@ void InstructionCodeGeneratorARMVIXL::VisitAdd(HAdd* add) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Vadd(OutputVRegister(add), InputVRegisterAt(add, 0), InputVRegisterAt(add, 1)); break; @@ -4243,21 +4243,21 @@ void LocationsBuilderARMVIXL::VisitSub(HSub* sub) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); switch (sub->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, ArmEncodableConstantOrRegister(sub->InputAt(1), SUB)); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -4274,12 +4274,12 @@ void InstructionCodeGeneratorARMVIXL::VisitSub(HSub* sub) { Location first = locations->InAt(0); Location second = locations->InAt(1); switch (sub->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { __ Sub(OutputRegister(sub), InputRegisterAt(sub, 0), InputOperandAt(sub, 1)); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (second.IsConstant()) { uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant())); GenerateAddLongConst(out, first, -value); @@ -4291,8 +4291,8 @@ void InstructionCodeGeneratorARMVIXL::VisitSub(HSub* sub) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Vsub(OutputVRegister(sub), InputVRegisterAt(sub, 0), InputVRegisterAt(sub, 1)); break; @@ -4305,16 +4305,16 @@ void LocationsBuilderARMVIXL::VisitMul(HMul* mul) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -4332,11 +4332,11 @@ void InstructionCodeGeneratorARMVIXL::VisitMul(HMul* mul) { Location first = locations->InAt(0); Location second = locations->InAt(1); switch (mul->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1)); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { vixl32::Register out_hi = HighRegisterFrom(out); vixl32::Register out_lo = LowRegisterFrom(out); vixl32::Register in1_hi = HighRegisterFrom(first); @@ -4369,8 +4369,8 @@ void InstructionCodeGeneratorARMVIXL::VisitMul(HMul* mul) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Vmul(OutputVRegister(mul), InputVRegisterAt(mul, 0), InputVRegisterAt(mul, 1)); break; @@ -4381,7 +4381,7 @@ void InstructionCodeGeneratorARMVIXL::VisitMul(HMul* mul) { void InstructionCodeGeneratorARMVIXL::DivRemOneOrMinusOne(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - DCHECK(instruction->GetResultType() == Primitive::kPrimInt); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32); Location second = instruction->GetLocations()->InAt(1); DCHECK(second.IsConstant()); @@ -4404,7 +4404,7 @@ void InstructionCodeGeneratorARMVIXL::DivRemOneOrMinusOne(HBinaryOperation* inst void InstructionCodeGeneratorARMVIXL::DivRemByPowerOfTwo(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - DCHECK(instruction->GetResultType() == Primitive::kPrimInt); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32); LocationSummary* locations = instruction->GetLocations(); Location second = locations->InAt(1); @@ -4438,7 +4438,7 @@ void InstructionCodeGeneratorARMVIXL::DivRemByPowerOfTwo(HBinaryOperation* instr void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - DCHECK(instruction->GetResultType() == Primitive::kPrimInt); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32); LocationSummary* locations = instruction->GetLocations(); Location second = locations->InAt(1); @@ -4481,7 +4481,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOpera void InstructionCodeGeneratorARMVIXL::GenerateDivRemConstantIntegral( HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - DCHECK(instruction->GetResultType() == Primitive::kPrimInt); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32); Location second = instruction->GetLocations()->InAt(1); DCHECK(second.IsConstant()); @@ -4501,12 +4501,12 @@ void InstructionCodeGeneratorARMVIXL::GenerateDivRemConstantIntegral( void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) { LocationSummary::CallKind call_kind = LocationSummary::kNoCall; - if (div->GetResultType() == Primitive::kPrimLong) { + if (div->GetResultType() == DataType::Type::kInt64) { // pLdiv runtime call. call_kind = LocationSummary::kCallOnMainOnly; - } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) { + } else if (div->GetResultType() == DataType::Type::kInt32 && div->InputAt(1)->IsConstant()) { // sdiv will be replaced by other instruction sequence. - } else if (div->GetResultType() == Primitive::kPrimInt && + } else if (div->GetResultType() == DataType::Type::kInt32 && !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { // pIdivmod runtime call. call_kind = LocationSummary::kCallOnMainOnly; @@ -4515,7 +4515,7 @@ void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind); switch (div->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { if (div->InputAt(1)->IsConstant()) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant())); @@ -4543,7 +4543,7 @@ void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom( calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); @@ -4552,8 +4552,8 @@ void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) { locations->SetOut(LocationFrom(r0, r1)); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -4570,7 +4570,7 @@ void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) { Location rhs = div->GetLocations()->InAt(1); switch (div->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { if (rhs.IsConstant()) { GenerateDivRemConstantIntegral(div); } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { @@ -4587,7 +4587,7 @@ void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { InvokeRuntimeCallingConventionARMVIXL calling_convention; DCHECK(calling_convention.GetRegisterAt(0).Is(LowRegisterFrom(lhs))); DCHECK(calling_convention.GetRegisterAt(1).Is(HighRegisterFrom(lhs))); @@ -4601,8 +4601,8 @@ void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Vdiv(OutputVRegister(div), InputVRegisterAt(div, 0), InputVRegisterAt(div, 1)); break; @@ -4612,14 +4612,14 @@ void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) { } void LocationsBuilderARMVIXL::VisitRem(HRem* rem) { - Primitive::Type type = rem->GetResultType(); + DataType::Type type = rem->GetResultType(); // Most remainders are implemented in the runtime. LocationSummary::CallKind call_kind = LocationSummary::kCallOnMainOnly; - if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) { + if (rem->GetResultType() == DataType::Type::kInt32 && rem->InputAt(1)->IsConstant()) { // sdiv will be replaced by other instruction sequence. call_kind = LocationSummary::kNoCall; - } else if ((rem->GetResultType() == Primitive::kPrimInt) + } else if ((rem->GetResultType() == DataType::Type::kInt32) && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { // Have hardware divide instruction for int, do it with three instructions. call_kind = LocationSummary::kNoCall; @@ -4628,7 +4628,7 @@ void LocationsBuilderARMVIXL::VisitRem(HRem* rem) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { if (rem->InputAt(1)->IsConstant()) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant())); @@ -4657,7 +4657,7 @@ void LocationsBuilderARMVIXL::VisitRem(HRem* rem) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom( calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); @@ -4667,7 +4667,7 @@ void LocationsBuilderARMVIXL::VisitRem(HRem* rem) { locations->SetOut(LocationFrom(r2, r3)); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1))); @@ -4675,7 +4675,7 @@ void LocationsBuilderARMVIXL::VisitRem(HRem* rem) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom( calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1))); @@ -4694,9 +4694,9 @@ void InstructionCodeGeneratorARMVIXL::VisitRem(HRem* rem) { LocationSummary* locations = rem->GetLocations(); Location second = locations->InAt(1); - Primitive::Type type = rem->GetResultType(); + DataType::Type type = rem->GetResultType(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { vixl32::Register reg1 = InputRegisterAt(rem, 0); vixl32::Register out_reg = OutputRegister(rem); if (second.IsConstant()) { @@ -4721,19 +4721,19 @@ void InstructionCodeGeneratorARMVIXL::VisitRem(HRem* rem) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { codegen_->InvokeRuntime(kQuickLmod, rem, rem->GetDexPc()); CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>(); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { codegen_->InvokeRuntime(kQuickFmodf, rem, rem->GetDexPc()); CheckEntrypointTypes<kQuickFmodf, float, float, float>(); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { codegen_->InvokeRuntime(kQuickFmod, rem, rem->GetDexPc()); CheckEntrypointTypes<kQuickFmod, double, double, double>(); break; @@ -4759,11 +4759,11 @@ void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instructi Location value = locations->InAt(0); switch (instruction->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { if (value.IsRegister()) { __ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); } else { @@ -4774,7 +4774,7 @@ void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instructi } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (value.IsRegisterPair()) { UseScratchRegisterScope temps(GetVIXLAssembler()); vixl32::Register temp = temps.Acquire(); @@ -4891,13 +4891,13 @@ void LocationsBuilderARMVIXL::VisitRor(HRor* ror) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall); switch (ror->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(ror->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); if (ror->InputAt(1)->IsConstant()) { locations->SetInAt(1, Location::ConstantLocation(ror->InputAt(1)->AsConstant())); @@ -4915,13 +4915,13 @@ void LocationsBuilderARMVIXL::VisitRor(HRor* ror) { } void InstructionCodeGeneratorARMVIXL::VisitRor(HRor* ror) { - Primitive::Type type = ror->GetResultType(); + DataType::Type type = ror->GetResultType(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { HandleIntegerRotate(ror); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { HandleLongRotate(ror); break; } @@ -4938,7 +4938,7 @@ void LocationsBuilderARMVIXL::HandleShift(HBinaryOperation* op) { new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall); switch (op->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); if (op->InputAt(1)->IsConstant()) { locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant())); @@ -4951,7 +4951,7 @@ void LocationsBuilderARMVIXL::HandleShift(HBinaryOperation* op) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); if (op->InputAt(1)->IsConstant()) { locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant())); @@ -4978,9 +4978,9 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) { Location first = locations->InAt(0); Location second = locations->InAt(1); - Primitive::Type type = op->GetResultType(); + DataType::Type type = op->GetResultType(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { vixl32::Register out_reg = OutputRegister(op); vixl32::Register first_reg = InputRegisterAt(op, 0); if (second.IsRegister()) { @@ -5009,7 +5009,7 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { vixl32::Register o_h = HighRegisterFrom(out); vixl32::Register o_l = LowRegisterFrom(out); @@ -5258,11 +5258,11 @@ void InstructionCodeGeneratorARMVIXL::VisitNot(HNot* not_) { Location out = locations->Out(); Location in = locations->InAt(0); switch (not_->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ Mvn(OutputRegister(not_), InputRegisterAt(not_, 0)); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ Mvn(LowRegisterFrom(out), LowRegisterFrom(in)); __ Mvn(HighRegisterFrom(out), HighRegisterFrom(in)); break; @@ -5287,20 +5287,20 @@ void LocationsBuilderARMVIXL::VisitCompare(HCompare* compare) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); switch (compare->InputAt(0)->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); // Output overlaps because it is written before doing the low comparison. locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, ArithmeticZeroOrFpuRegister(compare->InputAt(1))); locations->SetOut(Location::RequiresRegister()); @@ -5319,21 +5319,21 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) { vixl32::Label less, greater, done; vixl32::Label* final_label = codegen_->GetFinalLabel(compare, &done); - Primitive::Type type = compare->InputAt(0)->GetType(); + DataType::Type type = compare->InputAt(0)->GetType(); vixl32::Condition less_cond = vixl32::Condition(kNone); switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: { // Emit move to `out` before the `Cmp`, as `Mov` might affect the status flags. __ Mov(out, 0); __ Cmp(RegisterFrom(left), RegisterFrom(right)); // Signed compare. less_cond = lt; break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { __ Cmp(HighRegisterFrom(left), HighRegisterFrom(right)); // Signed compare. __ B(lt, &less, /* far_target */ false); __ B(gt, &greater, /* far_target */ false); @@ -5343,8 +5343,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) { less_cond = lo; break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { __ Mov(out, 0); GenerateVcmp(compare, codegen_); // To branch on the FP compare result we transfer FPSCR to APSR (encoded as PC in VMRS). @@ -5455,14 +5455,14 @@ void LocationsBuilderARMVIXL::HandleFieldSet( new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); - Primitive::Type field_type = field_info.GetFieldType(); - if (Primitive::IsFloatingPointType(field_type)) { + DataType::Type field_type = field_info.GetFieldType(); + if (DataType::IsFloatingPointType(field_type)) { locations->SetInAt(1, Location::RequiresFpuRegister()); } else { locations->SetInAt(1, Location::RequiresRegister()); } - bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble; + bool is_wide = field_type == DataType::Type::kInt64 || field_type == DataType::Type::kFloat64; bool generate_volatile = field_info.IsVolatile() && is_wide && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); @@ -5483,7 +5483,7 @@ void LocationsBuilderARMVIXL::HandleFieldSet( locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); - if (field_type == Primitive::kPrimDouble) { + if (field_type == DataType::Type::kFloat64) { // For doubles we need two more registers to copy the value. locations->AddTemp(LocationFrom(r2)); locations->AddTemp(LocationFrom(r3)); @@ -5502,7 +5502,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, bool is_volatile = field_info.IsVolatile(); bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); @@ -5512,25 +5512,25 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, } switch (field_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: { + case DataType::Type::kBool: + case DataType::Type::kInt8: { GetAssembler()->StoreToOffset(kStoreByte, RegisterFrom(value), base, offset); break; } - case Primitive::kPrimShort: - case Primitive::kPrimChar: { + case DataType::Type::kInt16: + case DataType::Type::kUint16: { GetAssembler()->StoreToOffset(kStoreHalfword, RegisterFrom(value), base, offset); break; } - case Primitive::kPrimInt: - case Primitive::kPrimNot: { + case DataType::Type::kInt32: + case DataType::Type::kReference: { if (kPoisonHeapReferences && needs_write_barrier) { // Note that in the case where `value` is a null reference, // we do not enter this block, as a null reference does not // need poisoning. - DCHECK_EQ(field_type, Primitive::kPrimNot); + DCHECK_EQ(field_type, DataType::Type::kReference); vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); __ Mov(temp, RegisterFrom(value)); GetAssembler()->PoisonHeapReference(temp); @@ -5541,7 +5541,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (is_volatile && !atomic_ldrd_strd) { GenerateWideAtomicStore(base, offset, @@ -5557,12 +5557,12 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { GetAssembler()->StoreSToOffset(SRegisterFrom(value), base, offset); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { vixl32::DRegister value_reg = DRegisterFrom(value); if (is_volatile && !atomic_ldrd_strd) { vixl32::Register value_reg_lo = RegisterFrom(locations->GetTemp(0)); @@ -5584,13 +5584,13 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << field_type; UNREACHABLE(); } // Longs and doubles are handled in the switch. - if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) { + if (field_type != DataType::Type::kInt64 && field_type != DataType::Type::kFloat64) { // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method, we // should use a scope and the assembler to emit the store instruction to guarantee that we // record the pc at the correct position. But the `Assembler` does not automatically handle @@ -5615,7 +5615,7 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction, DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (field_info.GetFieldType() == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, object_field_get_with_read_barrier ? @@ -5627,17 +5627,18 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction, locations->SetInAt(0, Location::RequiresRegister()); bool volatile_for_double = field_info.IsVolatile() - && (field_info.GetFieldType() == Primitive::kPrimDouble) + && (field_info.GetFieldType() == DataType::Type::kFloat64) && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); // The output overlaps in case of volatile long: we don't want the // code generated by GenerateWideAtomicLoad to overwrite the // object's location. Likewise, in the case of an object field get // with read barriers enabled, we do not want the load to overwrite // the object's location, as we need it to emit the read barrier. - bool overlap = (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong)) || + bool overlap = + (field_info.IsVolatile() && (field_info.GetFieldType() == DataType::Type::kInt64)) || object_field_get_with_read_barrier; - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister()); } else { locations->SetOut(Location::RequiresRegister(), @@ -5671,7 +5672,7 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction, } Location LocationsBuilderARMVIXL::ArithmeticZeroOrFpuRegister(HInstruction* input) { - DCHECK(Primitive::IsFloatingPointType(input->GetType())) << input->GetType(); + DCHECK(DataType::IsFloatingPointType(input->GetType())) << input->GetType(); if ((input->IsFloatConstant() && (input->AsFloatConstant()->IsArithmeticZero())) || (input->IsDoubleConstant() && (input->AsDoubleConstant()->IsArithmeticZero()))) { return Location::ConstantLocation(input->AsConstant()); @@ -5682,7 +5683,7 @@ Location LocationsBuilderARMVIXL::ArithmeticZeroOrFpuRegister(HInstruction* inpu Location LocationsBuilderARMVIXL::ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode) { - DCHECK(!Primitive::IsFloatingPointType(constant->GetType())); + DCHECK(!DataType::IsFloatingPointType(constant->GetType())); if (constant->IsConstant() && CanEncodeConstantAsImmediate(constant->AsConstant(), opcode)) { return Location::ConstantLocation(constant->AsConstant()); @@ -5693,7 +5694,7 @@ Location LocationsBuilderARMVIXL::ArmEncodableConstantOrRegister(HInstruction* c bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode) { uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst)); - if (Primitive::Is64BitType(input_cst->GetType())) { + if (DataType::Is64BitType(input_cst->GetType())) { Opcode high_opcode = opcode; SetCc low_set_cc = kCcDontCare; switch (opcode) { @@ -5758,31 +5759,31 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, Location out = locations->Out(); bool is_volatile = field_info.IsVolatile(); bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); switch (field_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: GetAssembler()->LoadFromOffset(kLoadUnsignedByte, RegisterFrom(out), base, offset); break; - case Primitive::kPrimByte: + case DataType::Type::kInt8: GetAssembler()->LoadFromOffset(kLoadSignedByte, RegisterFrom(out), base, offset); break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: GetAssembler()->LoadFromOffset(kLoadSignedHalfword, RegisterFrom(out), base, offset); break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, RegisterFrom(out), base, offset); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(out), base, offset); break; - case Primitive::kPrimNot: { + case DataType::Type::kReference: { // /* HeapReference<Object> */ out = *(base + offset) if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { Location temp_loc = locations->GetTemp(0); @@ -5807,7 +5808,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, break; } - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (is_volatile && !atomic_ldrd_strd) { GenerateWideAtomicLoad(base, offset, LowRegisterFrom(out), HighRegisterFrom(out)); } else { @@ -5815,11 +5816,11 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: GetAssembler()->LoadSFromOffset(SRegisterFrom(out), base, offset); break; - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { vixl32::DRegister out_dreg = DRegisterFrom(out); if (is_volatile && !atomic_ldrd_strd) { vixl32::Register lo = RegisterFrom(locations->GetTemp(0)); @@ -5836,12 +5837,12 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << field_type; UNREACHABLE(); } - if (field_type == Primitive::kPrimNot || field_type == Primitive::kPrimDouble) { + if (field_type == DataType::Type::kReference || field_type == DataType::Type::kFloat64) { // Potential implicit null checks, in the case of reference or // double fields, are handled in the previous switch statement. } else { @@ -5855,7 +5856,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, } if (is_volatile) { - if (field_type == Primitive::kPrimNot) { + if (field_type == DataType::Type::kReference) { // Memory barriers, in the case of references, are also handled // in the previous switch statement. } else { @@ -5994,25 +5995,25 @@ void InstructionCodeGeneratorARMVIXL::VisitNullCheck(HNullCheck* instruction) { codegen_->GenerateNullCheck(instruction); } -static LoadOperandType GetLoadOperandType(Primitive::Type type) { +static LoadOperandType GetLoadOperandType(DataType::Type type) { switch (type) { - case Primitive::kPrimNot: + case DataType::Type::kReference: return kLoadWord; - case Primitive::kPrimBoolean: + case DataType::Type::kBool: return kLoadUnsignedByte; - case Primitive::kPrimByte: + case DataType::Type::kInt8: return kLoadSignedByte; - case Primitive::kPrimChar: + case DataType::Type::kUint16: return kLoadUnsignedHalfword; - case Primitive::kPrimShort: + case DataType::Type::kInt16: return kLoadSignedHalfword; - case Primitive::kPrimInt: + case DataType::Type::kInt32: return kLoadWord; - case Primitive::kPrimLong: + case DataType::Type::kInt64: return kLoadWordPair; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: return kLoadSWord; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: return kLoadDWord; default: LOG(FATAL) << "Unreachable type " << type; @@ -6020,23 +6021,23 @@ static LoadOperandType GetLoadOperandType(Primitive::Type type) { } } -static StoreOperandType GetStoreOperandType(Primitive::Type type) { +static StoreOperandType GetStoreOperandType(DataType::Type type) { switch (type) { - case Primitive::kPrimNot: + case DataType::Type::kReference: return kStoreWord; - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: return kStoreByte; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: return kStoreHalfword; - case Primitive::kPrimInt: + case DataType::Type::kInt32: return kStoreWord; - case Primitive::kPrimLong: + case DataType::Type::kInt64: return kStoreWordPair; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: return kStoreSWord; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: return kStoreDWord; default: LOG(FATAL) << "Unreachable type " << type; @@ -6044,66 +6045,66 @@ static StoreOperandType GetStoreOperandType(Primitive::Type type) { } } -void CodeGeneratorARMVIXL::LoadFromShiftedRegOffset(Primitive::Type type, +void CodeGeneratorARMVIXL::LoadFromShiftedRegOffset(DataType::Type type, Location out_loc, vixl32::Register base, vixl32::Register reg_index, vixl32::Condition cond) { - uint32_t shift_count = Primitive::ComponentSizeShift(type); + uint32_t shift_count = DataType::SizeShift(type); MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count); switch (type) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: __ Ldrsb(cond, RegisterFrom(out_loc), mem_address); break; - case Primitive::kPrimBoolean: + case DataType::Type::kBool: __ Ldrb(cond, RegisterFrom(out_loc), mem_address); break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: __ Ldrsh(cond, RegisterFrom(out_loc), mem_address); break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: __ Ldrh(cond, RegisterFrom(out_loc), mem_address); break; - case Primitive::kPrimNot: - case Primitive::kPrimInt: + case DataType::Type::kReference: + case DataType::Type::kInt32: __ Ldr(cond, RegisterFrom(out_loc), mem_address); break; // T32 doesn't support LoadFromShiftedRegOffset mem address mode for these types. - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: default: LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } } -void CodeGeneratorARMVIXL::StoreToShiftedRegOffset(Primitive::Type type, +void CodeGeneratorARMVIXL::StoreToShiftedRegOffset(DataType::Type type, Location loc, vixl32::Register base, vixl32::Register reg_index, vixl32::Condition cond) { - uint32_t shift_count = Primitive::ComponentSizeShift(type); + uint32_t shift_count = DataType::SizeShift(type); MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count); switch (type) { - case Primitive::kPrimByte: - case Primitive::kPrimBoolean: + case DataType::Type::kInt8: + case DataType::Type::kBool: __ Strb(cond, RegisterFrom(loc), mem_address); break; - case Primitive::kPrimShort: - case Primitive::kPrimChar: + case DataType::Type::kInt16: + case DataType::Type::kUint16: __ Strh(cond, RegisterFrom(loc), mem_address); break; - case Primitive::kPrimNot: - case Primitive::kPrimInt: + case DataType::Type::kReference: + case DataType::Type::kInt32: __ Str(cond, RegisterFrom(loc), mem_address); break; // T32 doesn't support StoreToShiftedRegOffset mem address mode for these types. - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: default: LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); @@ -6112,7 +6113,7 @@ void CodeGeneratorARMVIXL::StoreToShiftedRegOffset(Primitive::Type type, void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, object_array_get_with_read_barrier ? @@ -6123,7 +6124,7 @@ void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) { } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { // The output overlaps in the case of an object array get with @@ -6144,7 +6145,7 @@ void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) { // constant index loads we need a temporary only if the offset is too big. uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction); uint32_t index = instruction->GetIndex()->AsIntConstant()->GetValue(); - offset += index << Primitive::ComponentSizeShift(Primitive::kPrimNot); + offset += index << DataType::SizeShift(DataType::Type::kReference); if (offset >= kReferenceLoadMinFarOffset) { locations->AddTemp(Location::RequiresRegister()); } @@ -6173,18 +6174,18 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { Location index = locations->InAt(1); Location out_loc = locations->Out(); uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); const bool maybe_compressed_char_at = mirror::kUseStringCompression && instruction->IsStringCharAt(); HInstruction* array_instr = instruction->GetArray(); bool has_intermediate_address = array_instr->IsIntermediateAddress(); switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: { vixl32::Register length; if (maybe_compressed_char_at) { length = RegisterFrom(locations->GetTemp(0)); @@ -6207,7 +6208,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { data_offset + const_index); __ B(final_label); __ Bind(&uncompressed_load); - GetAssembler()->LoadFromOffset(GetLoadOperandType(Primitive::kPrimChar), + GetAssembler()->LoadFromOffset(GetLoadOperandType(DataType::Type::kUint16), RegisterFrom(out_loc), obj, data_offset + (const_index << 1)); @@ -6215,7 +6216,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { __ Bind(&done); } } else { - uint32_t full_offset = data_offset + (const_index << Primitive::ComponentSizeShift(type)); + uint32_t full_offset = data_offset + (const_index << DataType::SizeShift(type)); LoadOperandType load_type = GetLoadOperandType(type); GetAssembler()->LoadFromOffset(load_type, RegisterFrom(out_loc), obj, full_offset); @@ -6257,7 +6258,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { // The read barrier instrumentation of object ArrayGet // instructions does not support the HIntermediateAddress // instruction. @@ -6275,7 +6276,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0))); if (index.IsConstant()) { // Array load with a constant index can be treated as a field load. - data_offset += Int32ConstantFrom(index) << Primitive::ComponentSizeShift(type); + data_offset += Int32ConstantFrom(index) << DataType::SizeShift(type); codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction, out_loc, obj, @@ -6334,7 +6335,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (index.IsConstant()) { size_t offset = (Int32ConstantFrom(index) << TIMES_8) + data_offset; @@ -6348,7 +6349,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { vixl32::SRegister out = SRegisterFrom(out_loc); if (index.IsConstant()) { size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; @@ -6362,7 +6363,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (index.IsConstant()) { size_t offset = (Int32ConstantFrom(index) << TIMES_8) + data_offset; GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), obj, offset); @@ -6375,12 +6376,12 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // Potential implicit null checks, in the case of reference // arrays, are handled in the previous switch statement. } else if (!maybe_compressed_char_at) { @@ -6391,7 +6392,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { } void LocationsBuilderARMVIXL::VisitArraySet(HArraySet* instruction) { - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); @@ -6405,7 +6406,7 @@ void LocationsBuilderARMVIXL::VisitArraySet(HArraySet* instruction) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(value_type)) { + if (DataType::IsFloatingPointType(value_type)) { locations->SetInAt(2, Location::RequiresFpuRegister()); } else { locations->SetInAt(2, Location::RequiresRegister()); @@ -6421,26 +6422,26 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { LocationSummary* locations = instruction->GetLocations(); vixl32::Register array = InputRegisterAt(instruction, 0); Location index = locations->InAt(1); - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); uint32_t data_offset = - mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value(); + mirror::Array::DataOffset(DataType::Size(value_type)).Uint32Value(); Location value_loc = locations->InAt(2); HInstruction* array_instr = instruction->GetArray(); bool has_intermediate_address = array_instr->IsIntermediateAddress(); switch (value_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: { if (index.IsConstant()) { int32_t const_index = Int32ConstantFrom(index); uint32_t full_offset = - data_offset + (const_index << Primitive::ComponentSizeShift(value_type)); + data_offset + (const_index << DataType::SizeShift(value_type)); StoreOperandType store_type = GetStoreOperandType(value_type); GetAssembler()->StoreToOffset(store_type, RegisterFrom(value_loc), array, full_offset); } else { @@ -6464,7 +6465,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { vixl32::Register value = RegisterFrom(value_loc); // TryExtractArrayAccessAddress optimization is never applied for non-primitive ArraySet. // See the comment in instruction_simplifier_shared.cc. @@ -6577,7 +6578,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { // Note that in the case where `value` is a null reference, // we do not enter this block, as a null reference does not // need poisoning. - DCHECK_EQ(value_type, Primitive::kPrimNot); + DCHECK_EQ(value_type, DataType::Type::kReference); __ Mov(temp1, value); GetAssembler()->PoisonHeapReference(temp1); source = temp1; @@ -6618,7 +6619,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Location value = locations->InAt(2); if (index.IsConstant()) { size_t offset = @@ -6633,7 +6634,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { Location value = locations->InAt(2); DCHECK(value.IsFpuRegister()); if (index.IsConstant()) { @@ -6648,7 +6649,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { Location value = locations->InAt(2); DCHECK(value.IsFpuRegisterPair()); if (index.IsConstant()) { @@ -6663,13 +6664,13 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << value_type; UNREACHABLE(); } // Objects are handled in the switch. - if (value_type != Primitive::kPrimNot) { + if (value_type != DataType::Type::kReference) { // TODO(VIXL): Ensure we record the pc position immediately after the preceding store // instruction. codegen_->MaybeRecordImplicitNullCheck(instruction); @@ -7966,7 +7967,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { // Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type` // to further check that this component type is not a primitive type. GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset); - static_assert(Primitive::kPrimNot == 0, "Expected 0 for art::Primitive::kPrimNot"); + static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); __ CompareAndBranchIfNonZero(temp, type_check_slow_path->GetEntryLabel()); break; } @@ -8061,8 +8062,8 @@ void LocationsBuilderARMVIXL::VisitXor(HXor* instruction) { void LocationsBuilderARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); - DCHECK(instruction->GetResultType() == Primitive::kPrimInt - || instruction->GetResultType() == Primitive::kPrimLong); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32 + || instruction->GetResultType() == DataType::Type::kInt64); // Note: GVN reorders commutative operations to have the constant on the right hand side. locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, ArmEncodableConstantOrRegister(instruction->InputAt(1), opcode)); @@ -8084,8 +8085,8 @@ void InstructionCodeGeneratorARMVIXL::VisitXor(HXor* instruction) { void LocationsBuilderARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); - DCHECK(instruction->GetResultType() == Primitive::kPrimInt - || instruction->GetResultType() == Primitive::kPrimLong); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32 + || instruction->GetResultType() == DataType::Type::kInt64); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); @@ -8098,7 +8099,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRi Location second = locations->InAt(1); Location out = locations->Out(); - if (instruction->GetResultType() == Primitive::kPrimInt) { + if (instruction->GetResultType() == DataType::Type::kInt32) { vixl32::Register first_reg = RegisterFrom(first); vixl32::Register second_reg = RegisterFrom(second); vixl32::Register out_reg = RegisterFrom(out); @@ -8119,7 +8120,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRi return; } else { - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); vixl32::Register first_low = LowRegisterFrom(first); vixl32::Register first_high = HighRegisterFrom(first); vixl32::Register second_low = LowRegisterFrom(second); @@ -8147,11 +8148,11 @@ void InstructionCodeGeneratorARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRi void LocationsBuilderARMVIXL::VisitDataProcWithShifterOp( HDataProcWithShifterOp* instruction) { - DCHECK(instruction->GetType() == Primitive::kPrimInt || - instruction->GetType() == Primitive::kPrimLong); + DCHECK(instruction->GetType() == DataType::Type::kInt32 || + instruction->GetType() == DataType::Type::kInt64); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); - const bool overlap = instruction->GetType() == Primitive::kPrimLong && + const bool overlap = instruction->GetType() == DataType::Type::kInt64 && HDataProcWithShifterOp::IsExtensionOp(instruction->GetOpKind()); locations->SetInAt(0, Location::RequiresRegister()); @@ -8166,10 +8167,10 @@ void InstructionCodeGeneratorARMVIXL::VisitDataProcWithShifterOp( const HInstruction::InstructionKind kind = instruction->GetInstrKind(); const HDataProcWithShifterOp::OpKind op_kind = instruction->GetOpKind(); - if (instruction->GetType() == Primitive::kPrimInt) { + if (instruction->GetType() == DataType::Type::kInt32) { const vixl32::Register first = InputRegisterAt(instruction, 0); const vixl32::Register output = OutputRegister(instruction); - const vixl32::Register second = instruction->InputAt(1)->GetType() == Primitive::kPrimLong + const vixl32::Register second = instruction->InputAt(1)->GetType() == DataType::Type::kInt64 ? LowRegisterFrom(locations->InAt(1)) : InputRegisterAt(instruction, 1); @@ -8203,7 +8204,7 @@ void InstructionCodeGeneratorARMVIXL::VisitDataProcWithShifterOp( codegen_); } } else { - DCHECK_EQ(instruction->GetType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetType(), DataType::Type::kInt64); if (HDataProcWithShifterOp::IsExtensionOp(op_kind)) { const vixl32::Register second = InputRegisterAt(instruction, 1); @@ -8319,7 +8320,7 @@ void InstructionCodeGeneratorARMVIXL::HandleBitwiseOperation(HBinaryOperation* i if (second.IsConstant()) { uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant())); uint32_t value_low = Low32Bits(value); - if (instruction->GetResultType() == Primitive::kPrimInt) { + if (instruction->GetResultType() == DataType::Type::kInt32) { vixl32::Register first_reg = InputRegisterAt(instruction, 0); vixl32::Register out_reg = OutputRegister(instruction); if (instruction->IsAnd()) { @@ -8331,7 +8332,7 @@ void InstructionCodeGeneratorARMVIXL::HandleBitwiseOperation(HBinaryOperation* i GenerateEorConst(out_reg, first_reg, value_low); } } else { - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); uint32_t value_high = High32Bits(value); vixl32::Register first_low = LowRegisterFrom(first); vixl32::Register first_high = HighRegisterFrom(first); @@ -8352,7 +8353,7 @@ void InstructionCodeGeneratorARMVIXL::HandleBitwiseOperation(HBinaryOperation* i return; } - if (instruction->GetResultType() == Primitive::kPrimInt) { + if (instruction->GetResultType() == DataType::Type::kInt32) { vixl32::Register first_reg = InputRegisterAt(instruction, 0); vixl32::Register second_reg = InputRegisterAt(instruction, 1); vixl32::Register out_reg = OutputRegister(instruction); @@ -8365,7 +8366,7 @@ void InstructionCodeGeneratorARMVIXL::HandleBitwiseOperation(HBinaryOperation* i __ Eor(out_reg, first_reg, second_reg); } } else { - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); vixl32::Register first_low = LowRegisterFrom(first); vixl32::Register first_high = HighRegisterFrom(first); vixl32::Register second_low = LowRegisterFrom(second); @@ -8590,7 +8591,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i // gray_return_address: DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>)); - vixl32::Register ref_reg = RegisterFrom(ref, Primitive::kPrimNot); + vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference); bool narrow = CanEmitNarrowLdr(ref_reg, obj, offset); vixl32::Register base = obj; if (offset >= kReferenceLoadMinFarOffset) { @@ -8686,9 +8687,9 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(HInstruction* i // gray_return_address: DCHECK(index.IsValid()); - vixl32::Register index_reg = RegisterFrom(index, Primitive::kPrimInt); - vixl32::Register ref_reg = RegisterFrom(ref, Primitive::kPrimNot); - vixl32::Register data_reg = RegisterFrom(temp, Primitive::kPrimInt); // Raw pointer. + vixl32::Register index_reg = RegisterFrom(index, DataType::Type::kInt32); + vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference); + vixl32::Register data_reg = RegisterFrom(temp, DataType::Type::kInt32); // Raw pointer. DCHECK(!data_reg.Is(kBakerCcEntrypointRegister)); UseScratchRegisterScope temps(GetVIXLAssembler()); @@ -8836,7 +8837,7 @@ void CodeGeneratorARMVIXL::GenerateRawReferenceLoad(HInstruction* instruction, Location index, ScaleFactor scale_factor, bool needs_null_check) { - Primitive::Type type = Primitive::kPrimNot; + DataType::Type type = DataType::Type::kReference; vixl32::Register ref_reg = RegisterFrom(ref, type); // If needed, vixl::EmissionCheckScope guards are used to ensure @@ -9392,13 +9393,13 @@ void InstructionCodeGeneratorARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_in } // Copy the result of a call into the given target. -void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, Primitive::Type type) { +void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, DataType::Type type) { if (!trg.IsValid()) { - DCHECK_EQ(type, Primitive::kPrimVoid); + DCHECK_EQ(type, DataType::Type::kVoid); return; } - DCHECK_NE(type, Primitive::kPrimVoid); + DCHECK_NE(type, DataType::Type::kVoid); Location return_loc = InvokeDexCallingConventionVisitorARMVIXL().GetReturnLocation(type); if (return_loc.Equals(trg)) { @@ -9407,9 +9408,9 @@ void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, Primitive::Type // TODO: Consider pairs in the parallel move resolver, then this could be nicely merged // with the last branch. - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { TODO_VIXL32(FATAL); - } else if (type == Primitive::kPrimDouble) { + } else if (type == DataType::Type::kFloat64) { TODO_VIXL32(FATAL); } else { // Let the parallel move resolver take care of all of this. diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index 337ecf1163..58b85259e7 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -173,8 +173,8 @@ class InvokeDexCallingConventionVisitorARMVIXL : public InvokeDexCallingConventi InvokeDexCallingConventionVisitorARMVIXL() {} virtual ~InvokeDexCallingConventionVisitorARMVIXL() {} - Location GetNextLocation(Primitive::Type type) OVERRIDE; - Location GetReturnLocation(Primitive::Type type) const OVERRIDE; + Location GetNextLocation(DataType::Type type) OVERRIDE; + Location GetReturnLocation(DataType::Type type) const OVERRIDE; Location GetMethodLocation() const OVERRIDE; private: @@ -194,20 +194,20 @@ class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention Location GetFieldIndexLocation() const OVERRIDE { return helpers::LocationFrom(vixl::aarch32::r0); } - Location GetReturnLocation(Primitive::Type type) const OVERRIDE { - return Primitive::Is64BitType(type) + Location GetReturnLocation(DataType::Type type) const OVERRIDE { + return DataType::Is64BitType(type) ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1) : helpers::LocationFrom(vixl::aarch32::r0); } - Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE { - return Primitive::Is64BitType(type) + Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE { + return DataType::Is64BitType(type) ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3) : (is_instance ? helpers::LocationFrom(vixl::aarch32::r2) : helpers::LocationFrom(vixl::aarch32::r1)); } - Location GetFpuLocation(Primitive::Type type) const OVERRIDE { - return Primitive::Is64BitType(type) + Location GetFpuLocation(DataType::Type type) const OVERRIDE { + return DataType::Is64BitType(type) ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1) : helpers::LocationFrom(vixl::aarch32::s0); } @@ -434,7 +434,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator { void GenerateFrameExit() OVERRIDE; void Bind(HBasicBlock* block) OVERRIDE; void MoveConstant(Location destination, int32_t value) OVERRIDE; - void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; @@ -475,12 +475,12 @@ class CodeGeneratorARMVIXL : public CodeGenerator { // Helper method to move a 32-bit value between two locations. void Move32(Location destination, Location source); - void LoadFromShiftedRegOffset(Primitive::Type type, + void LoadFromShiftedRegOffset(DataType::Type type, Location out_loc, vixl::aarch32::Register base, vixl::aarch32::Register reg_index, vixl::aarch32::Condition cond = vixl::aarch32::al); - void StoreToShiftedRegOffset(Primitive::Type type, + void StoreToShiftedRegOffset(DataType::Type type, Location out_loc, vixl::aarch32::Register base, vixl::aarch32::Register reg_index, @@ -522,8 +522,8 @@ class CodeGeneratorARMVIXL : public CodeGenerator { const ArmInstructionSetFeatures& GetInstructionSetFeatures() const { return isa_features_; } - bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE { - return type == Primitive::kPrimDouble || type == Primitive::kPrimLong; + bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE { + return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64; } void ComputeSpillMask() OVERRIDE; @@ -551,7 +551,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator { void GenerateVirtualCall( HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; - void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE; + void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE; // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays // and boot image strings/types. The only difference is the interpretation of the diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 0e6d210f10..a7c85574ee 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -49,30 +49,30 @@ constexpr bool kBakerReadBarrierThunksEnableForFields = true; constexpr bool kBakerReadBarrierThunksEnableForArrays = true; constexpr bool kBakerReadBarrierThunksEnableForGcRoots = true; -Location MipsReturnLocation(Primitive::Type return_type) { +Location MipsReturnLocation(DataType::Type return_type) { switch (return_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: return Location::RegisterLocation(V0); - case Primitive::kPrimLong: + case DataType::Type::kInt64: return Location::RegisterPairLocation(V0, V1); - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: return Location::FpuRegisterLocation(F0); - case Primitive::kPrimVoid: + case DataType::Type::kVoid: return Location(); } UNREACHABLE(); } -Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(Primitive::Type type) const { +Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(DataType::Type type) const { return MipsReturnLocation(type); } @@ -80,16 +80,16 @@ Location InvokeDexCallingConventionVisitorMIPS::GetMethodLocation() const { return Location::RegisterLocation(kMethodRegisterArgument); } -Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(Primitive::Type type) { +Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(DataType::Type type) { Location next_location; switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: { uint32_t gp_index = gp_index_++; if (gp_index < calling_convention.GetNumberOfRegisters()) { next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index)); @@ -100,7 +100,7 @@ Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(Primitive::Type break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { uint32_t gp_index = gp_index_; gp_index_ += 2; if (gp_index + 1 < calling_convention.GetNumberOfRegisters()) { @@ -123,32 +123,32 @@ Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(Primitive::Type // Note: both float and double types are stored in even FPU registers. On 32 bit FPU, double // will take up the even/odd pair, while floats are stored in even regs only. // On 64 bit FPU, both double and float are stored in even registers only. - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { uint32_t float_index = float_index_++; if (float_index < calling_convention.GetNumberOfFpuRegisters()) { next_location = Location::FpuRegisterLocation( calling_convention.GetFpuRegisterAt(float_index)); } else { size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_); - next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) - : Location::StackSlot(stack_offset); + next_location = DataType::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) + : Location::StackSlot(stack_offset); } break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unexpected parameter type " << type; break; } // Space on the stack is reserved for all arguments. - stack_index_ += Primitive::Is64BitType(type) ? 2 : 1; + stack_index_ += DataType::Is64BitType(type) ? 2 : 1; return next_location; } -Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) { +Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type type) { return MipsReturnLocation(type); } @@ -173,10 +173,10 @@ class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS { InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimInt, + DataType::Type::kInt32, locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt); + DataType::Type::kInt32); QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() ? kQuickThrowStringBounds : kQuickThrowArrayBounds; @@ -279,7 +279,7 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS { // Move the class to the desired location. if (out.IsValid()) { DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); - Primitive::Type type = instruction_->GetType(); + DataType::Type type = instruction_->GetType(); mips_codegen->MoveLocation(out, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), type); @@ -372,7 +372,7 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS { &info_low->label); } - Primitive::Type type = instruction_->GetType(); + DataType::Type type = instruction_->GetType(); mips_codegen->MoveLocation(locations->Out(), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), type); @@ -490,14 +490,14 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); + DataType::Type::kReference); if (instruction_->IsInstanceOf()) { mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); - Primitive::Type ret_type = instruction_->GetType(); + DataType::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); } else { @@ -559,17 +559,17 @@ class ArraySetSlowPathMIPS : public SlowPathCodeMIPS { parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove( locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); parallel_move.AddMove( locations->InAt(2), Location::RegisterLocation(calling_convention.GetRegisterAt(2)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); @@ -969,16 +969,16 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS { HParallelMove parallel_move(codegen->GetGraph()->GetArena()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove(obj_, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); if (index.IsValid()) { parallel_move.AddMove(index, Location::RegisterLocation(calling_convention.GetRegisterAt(2)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } else { @@ -992,8 +992,8 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS { CheckEntrypointTypes< kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>(); mips_codegen->MoveLocation(out_, - calling_convention.GetReturnLocation(Primitive::kPrimNot), - Primitive::kPrimNot); + calling_convention.GetReturnLocation(DataType::Type::kReference), + DataType::Type::kReference); RestoreLiveRegisters(codegen, locations); __ B(GetExitLabel()); @@ -1058,15 +1058,15 @@ class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS { CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen); mips_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_, - Primitive::kPrimNot); + DataType::Type::kReference); mips_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>(); mips_codegen->MoveLocation(out_, - calling_convention.GetReturnLocation(Primitive::kPrimNot), - Primitive::kPrimNot); + calling_convention.GetReturnLocation(DataType::Type::kReference), + DataType::Type::kReference); RestoreLiveRegisters(codegen, locations); __ B(GetExitLabel()); @@ -1165,7 +1165,7 @@ void ParallelMoveResolverMIPS::EmitMove(size_t index) { void ParallelMoveResolverMIPS::EmitSwap(size_t index) { DCHECK_LT(index, moves_.size()); MoveOperands* move = moves_[index]; - Primitive::Type type = move->GetType(); + DataType::Type type = move->GetType(); Location loc1 = move->GetDestination(); Location loc2 = move->GetSource(); @@ -1186,12 +1186,12 @@ void ParallelMoveResolverMIPS::EmitSwap(size_t index) { } else if (loc1.IsFpuRegister() && loc2.IsFpuRegister()) { FRegister f1 = loc1.AsFpuRegister<FRegister>(); FRegister f2 = loc2.AsFpuRegister<FRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ MovS(FTMP, f2); __ MovS(f2, f1); __ MovS(f1, FTMP); } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); __ MovD(FTMP, f2); __ MovD(f2, f1); __ MovD(f1, FTMP); @@ -1199,7 +1199,7 @@ void ParallelMoveResolverMIPS::EmitSwap(size_t index) { } else if ((loc1.IsRegister() && loc2.IsFpuRegister()) || (loc1.IsFpuRegister() && loc2.IsRegister())) { // Swap FPR and GPR. - DCHECK_EQ(type, Primitive::kPrimFloat); // Can only swap a float. + DCHECK_EQ(type, DataType::Type::kFloat32); // Can only swap a float. FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>() : loc2.AsFpuRegister<FRegister>(); Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>(); @@ -1221,7 +1221,7 @@ void ParallelMoveResolverMIPS::EmitSwap(size_t index) { } else if ((loc1.IsRegisterPair() && loc2.IsFpuRegister()) || (loc1.IsFpuRegister() && loc2.IsRegisterPair())) { // Swap FPR and GPR register pair. - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>() : loc2.AsFpuRegister<FRegister>(); Register r2_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>() @@ -1267,12 +1267,12 @@ void ParallelMoveResolverMIPS::EmitSwap(size_t index) { FRegister reg = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>() : loc2.AsFpuRegister<FRegister>(); intptr_t offset = loc1.IsFpuRegister() ? loc2.GetStackIndex() : loc1.GetStackIndex(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ MovS(FTMP, reg); __ LoadSFromOffset(reg, SP, offset); __ StoreSToOffset(FTMP, SP, offset); } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); __ MovD(FTMP, reg); __ LoadDFromOffset(reg, SP, offset); __ StoreDToOffset(FTMP, SP, offset); @@ -1462,7 +1462,7 @@ VectorRegister VectorRegisterFrom(Location location) { void CodeGeneratorMIPS::MoveLocation(Location destination, Location source, - Primitive::Type dst_type) { + DataType::Type dst_type) { if (source.Equals(destination)) { return; } @@ -1498,10 +1498,10 @@ void CodeGeneratorMIPS::MoveLocation(Location destination, } } else if (destination.IsFpuRegister()) { if (source.IsRegister()) { - DCHECK(!Primitive::Is64BitType(dst_type)); + DCHECK(!DataType::Is64BitType(dst_type)); __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>()); } else if (source.IsRegisterPair()) { - DCHECK(Primitive::Is64BitType(dst_type)); + DCHECK(DataType::Is64BitType(dst_type)); FRegister dst = destination.AsFpuRegister<FRegister>(); Register src_high = source.AsRegisterPairHigh<Register>(); Register src_low = source.AsRegisterPairLow<Register>(); @@ -1512,20 +1512,20 @@ void CodeGeneratorMIPS::MoveLocation(Location destination, __ MoveV(VectorRegisterFrom(destination), VectorRegisterFrom(source)); } else { - if (Primitive::Is64BitType(dst_type)) { + if (DataType::Is64BitType(dst_type)) { __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>()); } else { - DCHECK_EQ(dst_type, Primitive::kPrimFloat); + DCHECK_EQ(dst_type, DataType::Type::kFloat32); __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>()); } } } else if (source.IsSIMDStackSlot()) { __ LoadQFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex()); } else if (source.IsDoubleStackSlot()) { - DCHECK(Primitive::Is64BitType(dst_type)); + DCHECK(DataType::Is64BitType(dst_type)); __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex()); } else { - DCHECK(!Primitive::Is64BitType(dst_type)); + DCHECK(!DataType::Is64BitType(dst_type)); DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination; __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex()); } @@ -2022,9 +2022,9 @@ InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph, void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) { DCHECK_EQ(instruction->InputCount(), 2U); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); - Primitive::Type type = instruction->GetResultType(); + DataType::Type type = instruction->GetResultType(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); HInstruction* right = instruction->InputAt(1); bool can_use_imm = false; @@ -2047,15 +2047,15 @@ void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK(instruction->IsAdd() || instruction->IsSub()); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); @@ -2068,11 +2068,11 @@ void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) { } void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { Register dst = locations->Out().AsRegister<Register>(); Register lhs = locations->InAt(0).AsRegister<Register>(); Location rhs_location = locations->InAt(1); @@ -2116,7 +2116,7 @@ void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register dst_high = locations->Out().AsRegisterPairHigh<Register>(); Register dst_low = locations->Out().AsRegisterPairLow<Register>(); Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>(); @@ -2257,20 +2257,20 @@ void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { FRegister dst = locations->Out().AsFpuRegister<FRegister>(); FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>(); FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>(); if (instruction->IsAdd()) { - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ AddS(dst, lhs, rhs); } else { __ AddD(dst, lhs, rhs); } } else { DCHECK(instruction->IsSub()); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ SubS(dst, lhs, rhs); } else { __ SubD(dst, lhs, rhs); @@ -2288,14 +2288,14 @@ void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) { DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor()); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); - Primitive::Type type = instr->GetResultType(); + DataType::Type type = instr->GetResultType(); switch (type) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); locations->SetOut(Location::RequiresRegister()); @@ -2310,20 +2310,20 @@ static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte; void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) { DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor()); LocationSummary* locations = instr->GetLocations(); - Primitive::Type type = instr->GetType(); + DataType::Type type = instr->GetType(); Location rhs_location = locations->InAt(1); bool use_imm = rhs_location.IsConstant(); Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>(); int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0; const uint32_t shift_mask = - (type == Primitive::kPrimInt) ? kMaxIntShiftDistance : kMaxLongShiftDistance; + (type == DataType::Type::kInt32) ? kMaxIntShiftDistance : kMaxLongShiftDistance; const uint32_t shift_value = rhs_imm & shift_mask; // Are the INS (Insert Bit Field) and ROTR instructions supported? bool has_ins_rotr = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { Register dst = locations->Out().AsRegister<Register>(); Register lhs = locations->InAt(0).AsRegister<Register>(); if (use_imm) { @@ -2372,7 +2372,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register dst_high = locations->Out().AsRegisterPairHigh<Register>(); Register dst_low = locations->Out().AsRegisterPairLow<Register>(); Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>(); @@ -2536,9 +2536,9 @@ void InstructionCodeGeneratorMIPS::VisitAnd(HAnd* instruction) { } void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (type == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (type == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -2549,7 +2549,7 @@ void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) { } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(type)) { + if (DataType::IsFloatingPointType(type)) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { // The output overlaps in the case of an object array get with @@ -2588,11 +2588,11 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction); auto null_checker = GetImplicitNullChecker(instruction, codegen_); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); const bool maybe_compressed_char_at = mirror::kUseStringCompression && instruction->IsStringCharAt(); switch (type) { - case Primitive::kPrimBoolean: { + case DataType::Type::kBool: { Register out = out_loc.AsRegister<Register>(); if (index.IsConstant()) { size_t offset = @@ -2605,7 +2605,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimByte: { + case DataType::Type::kInt8: { Register out = out_loc.AsRegister<Register>(); if (index.IsConstant()) { size_t offset = @@ -2618,7 +2618,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimShort: { + case DataType::Type::kInt16: { Register out = out_loc.AsRegister<Register>(); if (index.IsConstant()) { size_t offset = @@ -2631,7 +2631,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimChar: { + case DataType::Type::kUint16: { Register out = out_loc.AsRegister<Register>(); if (maybe_compressed_char_at) { uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); @@ -2683,7 +2683,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t)); Register out = out_loc.AsRegister<Register>(); if (index.IsConstant()) { @@ -2697,7 +2697,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { static_assert( sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); @@ -2757,7 +2757,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register out = out_loc.AsRegisterPairLow<Register>(); if (index.IsConstant()) { size_t offset = @@ -2770,7 +2770,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { FRegister out = out_loc.AsFpuRegister<FRegister>(); if (index.IsConstant()) { size_t offset = @@ -2783,7 +2783,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { FRegister out = out_loc.AsFpuRegister<FRegister>(); if (index.IsConstant()) { size_t offset = @@ -2796,7 +2796,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << instruction->GetType(); UNREACHABLE(); } @@ -2841,7 +2841,7 @@ Location LocationsBuilderMIPS::FpuRegisterOrConstantForStore(HInstruction* instr } void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) { - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); @@ -2855,7 +2855,7 @@ void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) { + if (DataType::IsFloatingPointType(instruction->InputAt(2)->GetType())) { locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2))); } else { locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2))); @@ -2871,7 +2871,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { Register obj = locations->InAt(0).AsRegister<Register>(); Location index = locations->InAt(1); Location value_location = locations->InAt(2); - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); @@ -2879,8 +2879,8 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { Register base_reg = index.IsConstant() ? obj : TMP; switch (value_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: { + case DataType::Type::kBool: + case DataType::Type::kInt8: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1; @@ -2897,8 +2897,8 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimShort: - case Primitive::kPrimChar: { + case DataType::Type::kInt16: + case DataType::Type::kUint16: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2; @@ -2915,7 +2915,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4; @@ -2932,7 +2932,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { if (value_location.IsConstant()) { // Just setting null. uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); @@ -3047,7 +3047,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8; @@ -3064,7 +3064,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4; @@ -3081,7 +3081,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8; @@ -3098,7 +3098,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << instruction->GetType(); UNREACHABLE(); } @@ -3383,31 +3383,31 @@ void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) { } void LocationsBuilderMIPS::VisitCompare(HCompare* compare) { - Primitive::Type in_type = compare->InputAt(0)->GetType(); + DataType::Type in_type = compare->InputAt(0)->GetType(); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); switch (in_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); // Output overlaps because it is written before doing the low comparison. locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -3421,18 +3421,18 @@ void LocationsBuilderMIPS::VisitCompare(HCompare* compare) { void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) { LocationSummary* locations = instruction->GetLocations(); Register res = locations->Out().AsRegister<Register>(); - Primitive::Type in_type = instruction->InputAt(0)->GetType(); + DataType::Type in_type = instruction->InputAt(0)->GetType(); bool isR6 = codegen_->GetInstructionSetFeatures().IsR6(); // 0 if: left == right // 1 if: left > right // -1 if: left < right switch (in_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: { Register lhs = locations->InAt(0).AsRegister<Register>(); Register rhs = locations->InAt(1).AsRegister<Register>(); __ Slt(TMP, lhs, rhs); @@ -3440,7 +3440,7 @@ void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) { __ Subu(res, res, TMP); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { MipsLabel done; Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>(); Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>(); @@ -3458,7 +3458,7 @@ void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { bool gt_bias = instruction->IsGtBias(); FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>(); FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>(); @@ -3498,7 +3498,7 @@ void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) { __ Bind(&done); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { bool gt_bias = instruction->IsGtBias(); FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>(); FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>(); @@ -3548,13 +3548,13 @@ void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); switch (instruction->InputAt(0)->GetType()) { default: - case Primitive::kPrimLong: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); break; @@ -3569,7 +3569,7 @@ void InstructionCodeGeneratorMIPS::HandleCondition(HCondition* instruction) { return; } - Primitive::Type type = instruction->InputAt(0)->GetType(); + DataType::Type type = instruction->InputAt(0)->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { @@ -3578,12 +3578,12 @@ void InstructionCodeGeneratorMIPS::HandleCondition(HCondition* instruction) { GenerateIntCompare(instruction->GetCondition(), locations); return; - case Primitive::kPrimLong: + case DataType::Type::kInt64: GenerateLongCompare(instruction->GetCondition(), locations); return; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations); return; } @@ -3591,7 +3591,7 @@ void InstructionCodeGeneratorMIPS::HandleCondition(HCondition* instruction) { void InstructionCodeGeneratorMIPS::DivRemOneOrMinusOne(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt32); LocationSummary* locations = instruction->GetLocations(); Location second = locations->InAt(1); @@ -3615,7 +3615,7 @@ void InstructionCodeGeneratorMIPS::DivRemOneOrMinusOne(HBinaryOperation* instruc void InstructionCodeGeneratorMIPS::DivRemByPowerOfTwo(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt32); LocationSummary* locations = instruction->GetLocations(); Location second = locations->InAt(1); @@ -3664,7 +3664,7 @@ void InstructionCodeGeneratorMIPS::DivRemByPowerOfTwo(HBinaryOperation* instruct void InstructionCodeGeneratorMIPS::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt32); LocationSummary* locations = instruction->GetLocations(); Location second = locations->InAt(1); @@ -3715,7 +3715,7 @@ void InstructionCodeGeneratorMIPS::GenerateDivRemWithAnyConstant(HBinaryOperatio void InstructionCodeGeneratorMIPS::GenerateDivRemIntegral(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt32); LocationSummary* locations = instruction->GetLocations(); Register out = locations->Out().AsRegister<Register>(); @@ -3754,21 +3754,21 @@ void InstructionCodeGeneratorMIPS::GenerateDivRemIntegral(HBinaryOperation* inst } void LocationsBuilderMIPS::VisitDiv(HDiv* div) { - Primitive::Type type = div->GetResultType(); - LocationSummary::CallKind call_kind = (type == Primitive::kPrimLong) + DataType::Type type = div->GetResultType(); + LocationSummary::CallKind call_kind = (type == DataType::Type::kInt64) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind); switch (type) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterPairLocation( calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); @@ -3778,8 +3778,8 @@ void LocationsBuilderMIPS::VisitDiv(HDiv* div) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -3791,24 +3791,24 @@ void LocationsBuilderMIPS::VisitDiv(HDiv* div) { } void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: GenerateDivRemIntegral(instruction); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { codegen_->InvokeRuntime(kQuickLdiv, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>(); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { FRegister dst = locations->Out().AsFpuRegister<FRegister>(); FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>(); FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ DivS(dst, lhs, rhs); } else { __ DivD(dst, lhs, rhs); @@ -3829,14 +3829,14 @@ void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction); codegen_->AddSlowPath(slow_path); Location value = instruction->GetLocations()->InAt(0); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { if (value.IsConstant()) { if (value.GetConstant()->AsIntConstant()->GetValue() == 0) { __ B(slow_path->GetEntryLabel()); @@ -3850,7 +3850,7 @@ void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (value.IsConstant()) { if (value.GetConstant()->AsLongConstant()->GetValue() == 0) { __ B(slow_path->GetEntryLabel()); @@ -4786,13 +4786,13 @@ void InstructionCodeGeneratorMIPS::GenerateLongCompareAndBranch(IfCondition cond void InstructionCodeGeneratorMIPS::GenerateFpCompare(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* locations) { Register dst = locations->Out().AsRegister<Register>(); FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>(); FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>(); bool isR6 = codegen_->GetInstructionSetFeatures().IsR6(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { if (isR6) { switch (cond) { case kCondEQ: @@ -4899,7 +4899,7 @@ void InstructionCodeGeneratorMIPS::GenerateFpCompare(IfCondition cond, } } } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); if (isR6) { switch (cond) { case kCondEQ: @@ -5010,13 +5010,13 @@ void InstructionCodeGeneratorMIPS::GenerateFpCompare(IfCondition cond, bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR2(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* input_locations, int cc) { FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>(); FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>(); CHECK(!codegen_->GetInstructionSetFeatures().IsR6()); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { switch (cond) { case kCondEQ: __ CeqS(cc, lhs, rhs); @@ -5057,7 +5057,7 @@ bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR2(IfCondition cond, UNREACHABLE(); } } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); switch (cond) { case kCondEQ: __ CeqD(cc, lhs, rhs); @@ -5102,13 +5102,13 @@ bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR2(IfCondition cond, bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR6(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* input_locations, FRegister dst) { FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>(); FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>(); CHECK(codegen_->GetInstructionSetFeatures().IsR6()); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { switch (cond) { case kCondEQ: __ CmpEqS(dst, lhs, rhs); @@ -5149,7 +5149,7 @@ bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR6(IfCondition cond, UNREACHABLE(); } } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); switch (cond) { case kCondEQ: __ CmpEqD(dst, lhs, rhs); @@ -5194,13 +5194,13 @@ bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR6(IfCondition cond, void InstructionCodeGeneratorMIPS::GenerateFpCompareAndBranch(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* locations, MipsLabel* label) { FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>(); FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>(); bool isR6 = codegen_->GetInstructionSetFeatures().IsR6(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { if (isR6) { switch (cond) { case kCondEQ: @@ -5295,7 +5295,7 @@ void InstructionCodeGeneratorMIPS::GenerateFpCompareAndBranch(IfCondition cond, } } } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); if (isR6) { switch (cond) { case kCondEQ: @@ -5437,7 +5437,7 @@ void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instructi // The condition instruction has not been materialized, use its inputs as // the comparison and its condition as the branch condition. HCondition* condition = cond->AsCondition(); - Primitive::Type type = condition->InputAt(0)->GetType(); + DataType::Type type = condition->InputAt(0)->GetType(); LocationSummary* locations = cond->GetLocations(); IfCondition if_cond = condition->GetCondition(); MipsLabel* branch_target = true_target; @@ -5451,11 +5451,11 @@ void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instructi default: GenerateIntCompareAndBranch(if_cond, locations, branch_target); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: GenerateLongCompareAndBranch(if_cond, locations, branch_target); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target); break; } @@ -5520,8 +5520,9 @@ static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* l HInstruction* cond = select->InputAt(/* condition_input_index */ 2); HCondition* condition = cond->AsCondition(); - Primitive::Type cond_type = materialized ? Primitive::kPrimInt : condition->InputAt(0)->GetType(); - Primitive::Type dst_type = select->GetType(); + DataType::Type cond_type = + materialized ? DataType::Type::kInt32 : condition->InputAt(0)->GetType(); + DataType::Type dst_type = select->GetType(); HConstant* cst_true_value = select->GetTrueValue()->AsConstant(); HConstant* cst_false_value = select->GetFalseValue()->AsConstant(); @@ -5563,7 +5564,7 @@ static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* l use_const_for_true_in = is_true_value_zero_constant; } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Moving long on int condition. if (is_r6) { if (is_true_value_zero_constant) { @@ -5586,8 +5587,8 @@ static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* l use_const_for_true_in = is_true_value_zero_constant; } break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: // Moving float/double on int condition. if (is_r6) { if (materialized) { @@ -5618,12 +5619,12 @@ static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* l break; } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // We don't materialize long comparison now // and use conditional branches instead. break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: switch (dst_type) { default: // Moving int on float/double condition. @@ -5651,7 +5652,7 @@ static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* l use_const_for_true_in = is_true_value_zero_constant; } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Moving long on float/double condition. if (is_r6) { if (is_true_value_zero_constant) { @@ -5676,8 +5677,8 @@ static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* l use_const_for_true_in = is_true_value_zero_constant; } break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: // Moving float/double on float/double condition. if (is_r6) { can_move_conditionally = true; @@ -5713,7 +5714,7 @@ static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* l locations_to_set->SetInAt(0, Location::ConstantLocation(cst_false_value)); } else { locations_to_set->SetInAt(0, - Primitive::IsFloatingPointType(dst_type) + DataType::IsFloatingPointType(dst_type) ? Location::RequiresFpuRegister() : Location::RequiresRegister()); } @@ -5721,7 +5722,7 @@ static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* l locations_to_set->SetInAt(1, Location::ConstantLocation(cst_true_value)); } else { locations_to_set->SetInAt(1, - Primitive::IsFloatingPointType(dst_type) + DataType::IsFloatingPointType(dst_type) ? Location::RequiresFpuRegister() : Location::RequiresRegister()); } @@ -5734,7 +5735,7 @@ static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* l if (is_out_same_as_first_in) { locations_to_set->SetOut(Location::SameAsFirstInput()); } else { - locations_to_set->SetOut(Primitive::IsFloatingPointType(dst_type) + locations_to_set->SetOut(DataType::IsFloatingPointType(dst_type) ? Location::RequiresFpuRegister() : Location::RequiresRegister()); } @@ -5752,9 +5753,9 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) { HInstruction* cond = select->InputAt(/* condition_input_index */ 2); Register cond_reg = TMP; int cond_cc = 0; - Primitive::Type cond_type = Primitive::kPrimInt; + DataType::Type cond_type = DataType::Type::kInt32; bool cond_inverted = false; - Primitive::Type dst_type = select->GetType(); + DataType::Type dst_type = select->GetType(); if (IsBooleanValueOrMaterializedCondition(cond)) { cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>(); @@ -5765,11 +5766,11 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) { cond_type = condition->InputAt(0)->GetType(); switch (cond_type) { default: - DCHECK_NE(cond_type, Primitive::kPrimLong); + DCHECK_NE(cond_type, DataType::Type::kInt64); cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: cond_inverted = MaterializeFpCompareR2(if_cond, condition->IsGtBias(), cond_type, @@ -5799,7 +5800,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) { __ Movn(dst.AsRegister<Register>(), src_reg, cond_reg); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (cond_inverted) { __ Movz(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg); __ Movz(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg); @@ -5808,14 +5809,14 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) { __ Movn(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg); } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: if (cond_inverted) { __ MovzS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg); } else { __ MovnS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg); } break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: if (cond_inverted) { __ MovzD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg); } else { @@ -5824,11 +5825,11 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) { break; } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: LOG(FATAL) << "Unreachable"; UNREACHABLE(); - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: switch (dst_type) { default: if (cond_inverted) { @@ -5837,7 +5838,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) { __ Movt(dst.AsRegister<Register>(), src_reg, cond_cc); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (cond_inverted) { __ Movf(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc); __ Movf(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc); @@ -5846,14 +5847,14 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) { __ Movt(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc); } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: if (cond_inverted) { __ MovfS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc); } else { __ MovtS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc); } break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: if (cond_inverted) { __ MovfD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc); } else { @@ -5873,9 +5874,9 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) { HInstruction* cond = select->InputAt(/* condition_input_index */ 2); Register cond_reg = TMP; FRegister fcond_reg = FTMP; - Primitive::Type cond_type = Primitive::kPrimInt; + DataType::Type cond_type = DataType::Type::kInt32; bool cond_inverted = false; - Primitive::Type dst_type = select->GetType(); + DataType::Type dst_type = select->GetType(); if (IsBooleanValueOrMaterializedCondition(cond)) { cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>(); @@ -5886,11 +5887,11 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) { cond_type = condition->InputAt(0)->GetType(); switch (cond_type) { default: - DCHECK_NE(cond_type, Primitive::kPrimLong); + DCHECK_NE(cond_type, DataType::Type::kInt64); cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: cond_inverted = MaterializeFpCompareR6(if_cond, condition->IsGtBias(), cond_type, @@ -5909,7 +5910,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) { switch (dst_type) { default: - if (Primitive::IsFloatingPointType(cond_type)) { + if (DataType::IsFloatingPointType(cond_type)) { __ Mfc1(cond_reg, fcond_reg); } if (true_src.IsConstant()) { @@ -5936,8 +5937,8 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) { __ Or(dst.AsRegister<Register>(), AT, TMP); } break; - case Primitive::kPrimLong: { - if (Primitive::IsFloatingPointType(cond_type)) { + case DataType::Type::kInt64: { + if (DataType::IsFloatingPointType(cond_type)) { __ Mfc1(cond_reg, fcond_reg); } Register dst_lo = dst.AsRegisterPairLow<Register>(); @@ -5966,8 +5967,8 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) { } break; } - case Primitive::kPrimFloat: { - if (!Primitive::IsFloatingPointType(cond_type)) { + case DataType::Type::kFloat32: { + if (!DataType::IsFloatingPointType(cond_type)) { // sel*.fmt tests bit 0 of the condition register, account for that. __ Sltu(TMP, ZERO, cond_reg); __ Mtc1(TMP, fcond_reg); @@ -6001,8 +6002,8 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) { } break; } - case Primitive::kPrimDouble: { - if (!Primitive::IsFloatingPointType(cond_type)) { + case DataType::Type::kFloat64: { + if (!DataType::IsFloatingPointType(cond_type)) { // sel*.fmt tests bit 0 of the condition register, account for that. __ Sltu(TMP, ZERO, cond_reg); __ Mtc1(TMP, fcond_reg); @@ -6090,11 +6091,11 @@ void CodeGeneratorMIPS::GenerateNop() { } void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) { - Primitive::Type field_type = field_info.GetFieldType(); - bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble); + DataType::Type field_type = field_info.GetFieldType(); + bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64); bool generate_volatile = field_info.IsVolatile() && is_wide; bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (field_type == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( instruction, generate_volatile @@ -6111,18 +6112,18 @@ void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const Field InvokeRuntimeCallingConvention calling_convention; // need A0 to hold base + offset locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - if (field_type == Primitive::kPrimLong) { - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimLong)); + if (field_type == DataType::Type::kInt64) { + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kInt64)); } else { // Use Location::Any() to prevent situations when running out of available fp registers. locations->SetOut(Location::Any()); // Need some temp core regs since FP results are returned in core registers - Location reg = calling_convention.GetReturnLocation(Primitive::kPrimLong); + Location reg = calling_convention.GetReturnLocation(DataType::Type::kInt64); locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>())); locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairHigh<Register>())); } } else { - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister()); } else { // The output overlaps in the case of an object field get with @@ -6146,7 +6147,7 @@ void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const Field void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc) { - Primitive::Type type = field_info.GetFieldType(); + DataType::Type type = field_info.GetFieldType(); LocationSummary* locations = instruction->GetLocations(); Location obj_loc = locations->InAt(0); Register obj = obj_loc.AsRegister<Register>(); @@ -6157,28 +6158,28 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction, auto null_checker = GetImplicitNullChecker(instruction, codegen_); switch (type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: load_type = kLoadUnsignedByte; break; - case Primitive::kPrimByte: + case DataType::Type::kInt8: load_type = kLoadSignedByte; break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: load_type = kLoadSignedHalfword; break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: load_type = kLoadUnsignedHalfword; break; - case Primitive::kPrimInt: - case Primitive::kPrimFloat: - case Primitive::kPrimNot: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: + case DataType::Type::kReference: load_type = kLoadWord; break; - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: load_type = kLoadDoubleword; break; - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } @@ -6191,7 +6192,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction, codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); codegen_->InvokeRuntime(kQuickA64Load, instruction, dex_pc); CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>(); - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { // FP results are returned in core registers. Need to move them. if (dst_loc.IsFpuRegister()) { __ Mtc1(locations->GetTemp(1).AsRegister<Register>(), dst_loc.AsFpuRegister<FRegister>()); @@ -6210,7 +6211,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction, } } } else { - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // /* HeapReference<Object> */ dst = *(obj + offset) if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { Location temp_loc = @@ -6236,9 +6237,9 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction, // reference, if heap poisoning is enabled). codegen_->MaybeGenerateReadBarrierSlow(instruction, dst_loc, dst_loc, obj_loc, offset); } - } else if (!Primitive::IsFloatingPointType(type)) { + } else if (!DataType::IsFloatingPointType(type)) { Register dst; - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { DCHECK(dst_loc.IsRegisterPair()); dst = dst_loc.AsRegisterPairLow<Register>(); } else { @@ -6249,7 +6250,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction, } else { DCHECK(dst_loc.IsFpuRegister()); FRegister dst = dst_loc.AsFpuRegister<FRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ LoadSFromOffset(dst, obj, offset, null_checker); } else { __ LoadDFromOffset(dst, obj, offset, null_checker); @@ -6259,14 +6260,14 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction, // Memory barriers, in the case of references, are handled in the // previous switch statement. - if (is_volatile && (type != Primitive::kPrimNot)) { + if (is_volatile && (type != DataType::Type::kReference)) { GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } } void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) { - Primitive::Type field_type = field_info.GetFieldType(); - bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble); + DataType::Type field_type = field_info.GetFieldType(); + bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64); bool generate_volatile = field_info.IsVolatile() && is_wide; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall); @@ -6276,7 +6277,7 @@ void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const Field InvokeRuntimeCallingConvention calling_convention; // need A0 to hold base + offset locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - if (field_type == Primitive::kPrimLong) { + if (field_type == DataType::Type::kInt64) { locations->SetInAt(1, Location::RegisterPairLocation( calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3))); } else { @@ -6287,7 +6288,7 @@ void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const Field locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3))); } } else { - if (Primitive::IsFloatingPointType(field_type)) { + if (DataType::IsFloatingPointType(field_type)) { locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1))); } else { locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1))); @@ -6299,7 +6300,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc, bool value_can_be_null) { - Primitive::Type type = field_info.GetFieldType(); + DataType::Type type = field_info.GetFieldType(); LocationSummary* locations = instruction->GetLocations(); Register obj = locations->InAt(0).AsRegister<Register>(); Location value_location = locations->InAt(1); @@ -6310,24 +6311,24 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, auto null_checker = GetImplicitNullChecker(instruction, codegen_); switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: store_type = kStoreByte; break; - case Primitive::kPrimShort: - case Primitive::kPrimChar: + case DataType::Type::kInt16: + case DataType::Type::kUint16: store_type = kStoreHalfword; break; - case Primitive::kPrimInt: - case Primitive::kPrimFloat: - case Primitive::kPrimNot: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: + case DataType::Type::kReference: store_type = kStoreWord; break; - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: store_type = kStoreDoubleword; break; - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } @@ -6342,7 +6343,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, // Do implicit Null check. __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0); codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { // Pass FP parameters in core registers. if (value_location.IsFpuRegister()) { __ Mfc1(locations->GetTemp(1).AsRegister<Register>(), @@ -6373,9 +6374,9 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, if (value_location.IsConstant()) { int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant()); __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker); - } else if (!Primitive::IsFloatingPointType(type)) { + } else if (!DataType::IsFloatingPointType(type)) { Register src; - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { src = value_location.AsRegisterPairLow<Register>(); } else { src = value_location.AsRegister<Register>(); @@ -6384,7 +6385,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, // Note that in the case where `value` is a null reference, // we do not enter this block, as a null reference does not // need poisoning. - DCHECK_EQ(type, Primitive::kPrimNot); + DCHECK_EQ(type, DataType::Type::kReference); __ PoisonHeapReference(TMP, src); __ StoreToOffset(store_type, TMP, obj, offset, null_checker); } else { @@ -6392,7 +6393,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, } } else { FRegister src = value_location.AsFpuRegister<FRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ StoreSToOffset(src, obj, offset, null_checker); } else { __ StoreDToOffset(src, obj, offset, null_checker); @@ -8010,15 +8011,15 @@ void LocationsBuilderMIPS::VisitMul(HMul* mul) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -8030,12 +8031,12 @@ void LocationsBuilderMIPS::VisitMul(HMul* mul) { } void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); bool isR6 = codegen_->GetInstructionSetFeatures().IsR6(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { Register dst = locations->Out().AsRegister<Register>(); Register lhs = locations->InAt(0).AsRegister<Register>(); Register rhs = locations->InAt(1).AsRegister<Register>(); @@ -8047,7 +8048,7 @@ void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register dst_high = locations->Out().AsRegisterPairHigh<Register>(); Register dst_low = locations->Out().AsRegisterPairLow<Register>(); Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>(); @@ -8084,12 +8085,12 @@ void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) { } break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { FRegister dst = locations->Out().AsFpuRegister<FRegister>(); FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>(); FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ MulS(dst, lhs, rhs); } else { __ MulD(dst, lhs, rhs); @@ -8105,14 +8106,14 @@ void LocationsBuilderMIPS::VisitNeg(HNeg* neg) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -8123,17 +8124,17 @@ void LocationsBuilderMIPS::VisitNeg(HNeg* neg) { } void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { Register dst = locations->Out().AsRegister<Register>(); Register src = locations->InAt(0).AsRegister<Register>(); __ Subu(dst, ZERO, src); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register dst_high = locations->Out().AsRegisterPairHigh<Register>(); Register dst_low = locations->Out().AsRegisterPairLow<Register>(); Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>(); @@ -8144,11 +8145,11 @@ void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) { __ Subu(dst_high, dst_high, TMP); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { FRegister dst = locations->Out().AsFpuRegister<FRegister>(); FRegister src = locations->InAt(0).AsFpuRegister<FRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ NegS(dst, src); } else { __ NegD(dst, src); @@ -8164,7 +8165,7 @@ void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } @@ -8188,7 +8189,7 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) { } else { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); } void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) { @@ -8216,18 +8217,18 @@ void LocationsBuilderMIPS::VisitNot(HNot* instruction) { } void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { Register dst = locations->Out().AsRegister<Register>(); Register src = locations->InAt(0).AsRegister<Register>(); __ Nor(dst, src, ZERO); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register dst_high = locations->Out().AsRegisterPairHigh<Register>(); Register dst_low = locations->Out().AsRegisterPairLow<Register>(); Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>(); @@ -8339,19 +8340,20 @@ void InstructionCodeGeneratorMIPS::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) } void LocationsBuilderMIPS::VisitRem(HRem* rem) { - Primitive::Type type = rem->GetResultType(); - LocationSummary::CallKind call_kind = - (type == Primitive::kPrimInt) ? LocationSummary::kNoCall : LocationSummary::kCallOnMainOnly; + DataType::Type type = rem->GetResultType(); + LocationSummary::CallKind call_kind = (type == DataType::Type::kInt32) + ? LocationSummary::kNoCall + : LocationSummary::kCallOnMainOnly; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); switch (type) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterPairLocation( calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); @@ -8361,8 +8363,8 @@ void LocationsBuilderMIPS::VisitRem(HRem* rem) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1))); @@ -8376,23 +8378,23 @@ void LocationsBuilderMIPS::VisitRem(HRem* rem) { } void InstructionCodeGeneratorMIPS::VisitRem(HRem* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); switch (type) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: GenerateDivRemIntegral(instruction); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { codegen_->InvokeRuntime(kQuickLmod, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>(); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { codegen_->InvokeRuntime(kQuickFmodf, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickFmodf, float, float, float>(); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { codegen_->InvokeRuntime(kQuickFmod, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickFmod, double, double, double>(); break; @@ -8421,7 +8423,7 @@ void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_bar void LocationsBuilderMIPS::VisitReturn(HReturn* ret) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret); - Primitive::Type return_type = ret->InputAt(0)->GetType(); + DataType::Type return_type = ret->InputAt(0)->GetType(); locations->SetInAt(0, MipsReturnLocation(return_type)); } @@ -8597,33 +8599,33 @@ void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) { } void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) { - Primitive::Type input_type = conversion->GetInputType(); - Primitive::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); DCHECK_NE(input_type, result_type); bool isR6 = codegen_->GetInstructionSetFeatures().IsR6(); - if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) || - (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) { + if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) || + (result_type == DataType::Type::kReference) || (result_type == DataType::Type::kVoid)) { LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } LocationSummary::CallKind call_kind = LocationSummary::kNoCall; if (!isR6 && - ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) || - (result_type == Primitive::kPrimLong && Primitive::IsFloatingPointType(input_type)))) { + ((DataType::IsFloatingPointType(result_type) && input_type == DataType::Type::kInt64) || + (result_type == DataType::Type::kInt64 && DataType::IsFloatingPointType(input_type)))) { call_kind = LocationSummary::kCallOnMainOnly; } LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind); if (call_kind == LocationSummary::kNoCall) { - if (Primitive::IsFloatingPointType(input_type)) { + if (DataType::IsFloatingPointType(input_type)) { locations->SetInAt(0, Location::RequiresFpuRegister()); } else { locations->SetInAt(0, Location::RequiresRegister()); } - if (Primitive::IsFloatingPointType(result_type)) { + if (DataType::IsFloatingPointType(result_type)) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -8631,10 +8633,10 @@ void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) { } else { InvokeRuntimeCallingConvention calling_convention; - if (Primitive::IsFloatingPointType(input_type)) { + if (DataType::IsFloatingPointType(input_type)) { locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); } else { - DCHECK_EQ(input_type, Primitive::kPrimLong); + DCHECK_EQ(input_type, DataType::Type::kInt64); locations->SetInAt(0, Location::RegisterPairLocation( calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); } @@ -8645,14 +8647,14 @@ void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) { void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversion) { LocationSummary* locations = conversion->GetLocations(); - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2(); bool isR6 = codegen_->GetInstructionSetFeatures().IsR6(); DCHECK_NE(input_type, result_type); - if (result_type == Primitive::kPrimLong && Primitive::IsIntegralType(input_type)) { + if (result_type == DataType::Type::kInt64 && DataType::IsIntegralType(input_type)) { Register dst_high = locations->Out().AsRegisterPairHigh<Register>(); Register dst_low = locations->Out().AsRegisterPairLow<Register>(); Register src = locations->InAt(0).AsRegister<Register>(); @@ -8661,17 +8663,17 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi __ Move(dst_low, src); } __ Sra(dst_high, src, 31); - } else if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) { + } else if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) { Register dst = locations->Out().AsRegister<Register>(); - Register src = (input_type == Primitive::kPrimLong) + Register src = (input_type == DataType::Type::kInt64) ? locations->InAt(0).AsRegisterPairLow<Register>() : locations->InAt(0).AsRegister<Register>(); switch (result_type) { - case Primitive::kPrimChar: + case DataType::Type::kUint16: __ Andi(dst, src, 0xFFFF); break; - case Primitive::kPrimByte: + case DataType::Type::kInt8: if (has_sign_extension) { __ Seb(dst, src); } else { @@ -8679,7 +8681,7 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi __ Sra(dst, dst, 24); } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: if (has_sign_extension) { __ Seh(dst, src); } else { @@ -8687,7 +8689,7 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi __ Sra(dst, dst, 16); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: if (dst != src) { __ Move(dst, src); } @@ -8697,8 +8699,8 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } - } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) { - if (input_type == Primitive::kPrimLong) { + } else if (DataType::IsFloatingPointType(result_type) && DataType::IsIntegralType(input_type)) { + if (input_type == DataType::Type::kInt64) { if (isR6) { // cvt.s.l/cvt.d.l requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction. @@ -8707,16 +8709,16 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi FRegister dst = locations->Out().AsFpuRegister<FRegister>(); __ Mtc1(src_low, FTMP); __ Mthc1(src_high, FTMP); - if (result_type == Primitive::kPrimFloat) { + if (result_type == DataType::Type::kFloat32) { __ Cvtsl(dst, FTMP); } else { __ Cvtdl(dst, FTMP); } } else { - QuickEntrypointEnum entrypoint = (result_type == Primitive::kPrimFloat) ? kQuickL2f - : kQuickL2d; + QuickEntrypointEnum entrypoint = + (result_type == DataType::Type::kFloat32) ? kQuickL2f : kQuickL2d; codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc()); - if (result_type == Primitive::kPrimFloat) { + if (result_type == DataType::Type::kFloat32) { CheckEntrypointTypes<kQuickL2f, float, int64_t>(); } else { CheckEntrypointTypes<kQuickL2d, double, int64_t>(); @@ -8726,14 +8728,14 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi Register src = locations->InAt(0).AsRegister<Register>(); FRegister dst = locations->Out().AsFpuRegister<FRegister>(); __ Mtc1(src, FTMP); - if (result_type == Primitive::kPrimFloat) { + if (result_type == DataType::Type::kFloat32) { __ Cvtsw(dst, FTMP); } else { __ Cvtdw(dst, FTMP); } } - } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) { - CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong); + } else if (DataType::IsIntegralType(result_type) && DataType::IsFloatingPointType(input_type)) { + CHECK(result_type == DataType::Type::kInt32 || result_type == DataType::Type::kInt64); // When NAN2008=1 (R6), the truncate instruction caps the output at the minimum/maximum // value of the output type if the input is outside of the range after the truncation or @@ -8751,7 +8753,7 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi // instruction, which will handle such an input the same way irrespective of NAN2008. // Otherwise the input is compared to itself to determine whether it is a NaN or not // in order to return either zero or the minimum value. - if (result_type == Primitive::kPrimLong) { + if (result_type == DataType::Type::kInt64) { if (isR6) { // trunc.l.s/trunc.l.d requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction. @@ -8759,7 +8761,7 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi Register dst_high = locations->Out().AsRegisterPairHigh<Register>(); Register dst_low = locations->Out().AsRegisterPairLow<Register>(); - if (input_type == Primitive::kPrimFloat) { + if (input_type == DataType::Type::kFloat32) { __ TruncLS(FTMP, src); } else { __ TruncLD(FTMP, src); @@ -8767,10 +8769,10 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi __ Mfc1(dst_low, FTMP); __ Mfhc1(dst_high, FTMP); } else { - QuickEntrypointEnum entrypoint = (input_type == Primitive::kPrimFloat) ? kQuickF2l - : kQuickD2l; + QuickEntrypointEnum entrypoint = + (input_type == DataType::Type::kFloat32) ? kQuickF2l : kQuickD2l; codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc()); - if (input_type == Primitive::kPrimFloat) { + if (input_type == DataType::Type::kFloat32) { CheckEntrypointTypes<kQuickF2l, int64_t, float>(); } else { CheckEntrypointTypes<kQuickD2l, int64_t, double>(); @@ -8783,7 +8785,7 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi MipsLabel done; if (!isR6) { - if (input_type == Primitive::kPrimFloat) { + if (input_type == DataType::Type::kFloat32) { uint32_t min_val = bit_cast<uint32_t, float>(std::numeric_limits<int32_t>::min()); __ LoadConst32(TMP, min_val); __ Mtc1(TMP, FTMP); @@ -8794,14 +8796,14 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi __ MoveToFpuHigh(TMP, FTMP); } - if (input_type == Primitive::kPrimFloat) { + if (input_type == DataType::Type::kFloat32) { __ ColeS(0, FTMP, src); } else { __ ColeD(0, FTMP, src); } __ Bc1t(0, &truncate); - if (input_type == Primitive::kPrimFloat) { + if (input_type == DataType::Type::kFloat32) { __ CeqS(0, src, src); } else { __ CeqD(0, src, src); @@ -8814,7 +8816,7 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi __ Bind(&truncate); } - if (input_type == Primitive::kPrimFloat) { + if (input_type == DataType::Type::kFloat32) { __ TruncWS(FTMP, src); } else { __ TruncWD(FTMP, src); @@ -8825,11 +8827,11 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi __ Bind(&done); } } - } else if (Primitive::IsFloatingPointType(result_type) && - Primitive::IsFloatingPointType(input_type)) { + } else if (DataType::IsFloatingPointType(result_type) && + DataType::IsFloatingPointType(input_type)) { FRegister dst = locations->Out().AsFpuRegister<FRegister>(); FRegister src = locations->InAt(0).AsFpuRegister<FRegister>(); - if (result_type == Primitive::kPrimFloat) { + if (result_type == DataType::Type::kFloat32) { __ Cvtsd(dst, src); } else { __ Cvtds(dst, src); diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h index 2b1075d12b..5f2f90004d 100644 --- a/compiler/optimizing/code_generator_mips.h +++ b/compiler/optimizing/code_generator_mips.h @@ -81,8 +81,8 @@ class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionV InvokeDexCallingConventionVisitorMIPS() {} virtual ~InvokeDexCallingConventionVisitorMIPS() {} - Location GetNextLocation(Primitive::Type type) OVERRIDE; - Location GetReturnLocation(Primitive::Type type) const OVERRIDE; + Location GetNextLocation(DataType::Type type) OVERRIDE; + Location GetReturnLocation(DataType::Type type) const OVERRIDE; Location GetMethodLocation() const OVERRIDE; private: @@ -100,7 +100,7 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, FRegis kRuntimeParameterFpuRegistersLength, kMipsPointerSize) {} - Location GetReturnLocation(Primitive::Type return_type); + Location GetReturnLocation(DataType::Type return_type); private: DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); @@ -116,17 +116,17 @@ class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention { Location GetFieldIndexLocation() const OVERRIDE { return Location::RegisterLocation(A0); } - Location GetReturnLocation(Primitive::Type type) const OVERRIDE { - return Primitive::Is64BitType(type) + Location GetReturnLocation(DataType::Type type) const OVERRIDE { + return DataType::Is64BitType(type) ? Location::RegisterPairLocation(V0, V1) : Location::RegisterLocation(V0); } - Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE { - return Primitive::Is64BitType(type) + Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE { + return DataType::Is64BitType(type) ? Location::RegisterPairLocation(A2, A3) : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1)); } - Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return Location::FpuRegisterLocation(F0); } @@ -304,14 +304,14 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator { MipsLabel* label); void GenerateFpCompare(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* locations); // When the function returns `false` it means that the condition holds if the condition // code flag `cc` is non-zero and doesn't hold if `cc` is zero. If it returns `true`, // the roles of zero and non-zero values of the `cc` flag are exchanged. bool MaterializeFpCompareR2(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* input_locations, int cc); // When the function returns `false` it means that the condition holds if `dst` is non-zero @@ -319,12 +319,12 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator { // `dst` are exchanged. bool MaterializeFpCompareR6(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* input_locations, FRegister dst); void GenerateFpCompareAndBranch(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* locations, MipsLabel* label); void GenerateTestAndBranch(HInstruction* instruction, @@ -518,7 +518,7 @@ class CodeGeneratorMIPS : public CodeGenerator { // Code generation helpers. - void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; void MoveConstant(Location destination, int32_t value) OVERRIDE; @@ -541,8 +541,8 @@ class CodeGeneratorMIPS : public CodeGenerator { ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; } - bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE { - return type == Primitive::kPrimLong; + bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE { + return type == DataType::Type::kInt64; } // Check if the desired_string_load_kind is supported. If it is, return it, @@ -567,7 +567,7 @@ class CodeGeneratorMIPS : public CodeGenerator { HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED, - Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE { + DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE { UNIMPLEMENTED(FATAL) << "Not implemented on MIPS"; } diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 119e0f6b76..7051ccefdc 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -47,28 +47,28 @@ constexpr bool kBakerReadBarrierThunksEnableForFields = true; constexpr bool kBakerReadBarrierThunksEnableForArrays = true; constexpr bool kBakerReadBarrierThunksEnableForGcRoots = true; -Location Mips64ReturnLocation(Primitive::Type return_type) { +Location Mips64ReturnLocation(DataType::Type return_type) { switch (return_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: return Location::RegisterLocation(V0); - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: return Location::FpuRegisterLocation(F0); - case Primitive::kPrimVoid: + case DataType::Type::kVoid: return Location(); } UNREACHABLE(); } -Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const { +Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(DataType::Type type) const { return Mips64ReturnLocation(type); } @@ -76,34 +76,34 @@ Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const { return Location::RegisterLocation(kMethodRegisterArgument); } -Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) { +Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(DataType::Type type) { Location next_location; - if (type == Primitive::kPrimVoid) { + if (type == DataType::Type::kVoid) { LOG(FATAL) << "Unexpected parameter type " << type; } - if (Primitive::IsFloatingPointType(type) && + if (DataType::IsFloatingPointType(type) && (float_index_ < calling_convention.GetNumberOfFpuRegisters())) { next_location = Location::FpuRegisterLocation( calling_convention.GetFpuRegisterAt(float_index_++)); gp_index_++; - } else if (!Primitive::IsFloatingPointType(type) && + } else if (!DataType::IsFloatingPointType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) { next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++)); float_index_++; } else { size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_); - next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) - : Location::StackSlot(stack_offset); + next_location = DataType::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) + : Location::StackSlot(stack_offset); } // Space on the stack is reserved for all arguments. - stack_index_ += Primitive::Is64BitType(type) ? 2 : 1; + stack_index_ += DataType::Is64BitType(type) ? 2 : 1; return next_location; } -Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) { +Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type type) { return Mips64ReturnLocation(type); } @@ -128,10 +128,10 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimInt, + DataType::Type::kInt32, locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt); + DataType::Type::kInt32); QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() ? kQuickThrowStringBounds : kQuickThrowArrayBounds; @@ -236,7 +236,7 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 { // Move the class to the desired location. if (out.IsValid()) { DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); - Primitive::Type type = instruction_->GetType(); + DataType::Type type = instruction_->GetType(); mips64_codegen->MoveLocation(out, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), type); @@ -331,7 +331,7 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 { /* placeholder */ 0x5678); } - Primitive::Type type = instruction_->GetType(); + DataType::Type type = instruction_->GetType(); mips64_codegen->MoveLocation(locations->Out(), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), type); @@ -446,14 +446,14 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); + DataType::Type::kReference); if (instruction_->IsInstanceOf()) { mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); - Primitive::Type ret_type = instruction_->GetType(); + DataType::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); } else { @@ -515,17 +515,17 @@ class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 { parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove( locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); parallel_move.AddMove( locations->InAt(2), Location::RegisterLocation(calling_convention.GetRegisterAt(2)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); @@ -823,7 +823,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen); LocationSummary* locations = instruction_->GetLocations(); - Primitive::Type type = Primitive::kPrimNot; + DataType::Type type = DataType::Type::kReference; GpuRegister reg_out = out_.AsRegister<GpuRegister>(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); @@ -912,16 +912,16 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 { HParallelMove parallel_move(codegen->GetGraph()->GetArena()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove(obj_, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); if (index.IsValid()) { parallel_move.AddMove(index, Location::RegisterLocation(calling_convention.GetRegisterAt(2)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } else { @@ -987,7 +987,7 @@ class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Primitive::Type type = Primitive::kPrimNot; + DataType::Type type = DataType::Type::kReference; GpuRegister reg_out = out_.AsRegister<GpuRegister>(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); @@ -1002,7 +1002,7 @@ class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 { CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen); mips64_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_, - Primitive::kPrimNot); + DataType::Type::kReference); mips64_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow, instruction_, instruction_->GetDexPc(), @@ -1254,7 +1254,7 @@ void CodeGeneratorMIPS64::Bind(HBasicBlock* block) { void CodeGeneratorMIPS64::MoveLocation(Location destination, Location source, - Primitive::Type dst_type) { + DataType::Type dst_type) { if (source.Equals(destination)) { return; } @@ -1262,7 +1262,7 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, // A valid move can always be inferred from the destination and source // locations. When moving from and to a register, the argument type can be // used to generate 32bit instead of 64bit moves. - bool unspecified_type = (dst_type == Primitive::kPrimVoid); + bool unspecified_type = (dst_type == DataType::Type::kVoid); DCHECK_EQ(unspecified_type, false); if (destination.IsRegister() || destination.IsFpuRegister()) { @@ -1273,27 +1273,27 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, || src_cst->IsFloatConstant() || src_cst->IsNullConstant()))) { // For stack slots and 32bit constants, a 64bit type is appropriate. - dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; + dst_type = destination.IsRegister() ? DataType::Type::kInt32 : DataType::Type::kFloat32; } else { // If the source is a double stack slot or a 64bit constant, a 64bit // type is appropriate. Else the source is a register, and since the // type has not been specified, we chose a 64bit type to force a 64bit // move. - dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; + dst_type = destination.IsRegister() ? DataType::Type::kInt64 : DataType::Type::kFloat64; } } - DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) || - (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type))); + DCHECK((destination.IsFpuRegister() && DataType::IsFloatingPointType(dst_type)) || + (destination.IsRegister() && !DataType::IsFloatingPointType(dst_type))); if (source.IsStackSlot() || source.IsDoubleStackSlot()) { // Move to GPR/FPR from stack LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword; - if (Primitive::IsFloatingPointType(dst_type)) { + if (DataType::IsFloatingPointType(dst_type)) { __ LoadFpuFromOffset(load_type, destination.AsFpuRegister<FpuRegister>(), SP, source.GetStackIndex()); } else { - // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot. + // TODO: use load_type = kLoadUnsignedWord when type == DataType::Type::kReference. __ LoadFromOffset(load_type, destination.AsRegister<GpuRegister>(), SP, @@ -1307,27 +1307,27 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, } else if (source.IsConstant()) { // Move to GPR/FPR from constant GpuRegister gpr = AT; - if (!Primitive::IsFloatingPointType(dst_type)) { + if (!DataType::IsFloatingPointType(dst_type)) { gpr = destination.AsRegister<GpuRegister>(); } - if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) { + if (dst_type == DataType::Type::kInt32 || dst_type == DataType::Type::kFloat32) { int32_t value = GetInt32ValueOf(source.GetConstant()->AsConstant()); - if (Primitive::IsFloatingPointType(dst_type) && value == 0) { + if (DataType::IsFloatingPointType(dst_type) && value == 0) { gpr = ZERO; } else { __ LoadConst32(gpr, value); } } else { int64_t value = GetInt64ValueOf(source.GetConstant()->AsConstant()); - if (Primitive::IsFloatingPointType(dst_type) && value == 0) { + if (DataType::IsFloatingPointType(dst_type) && value == 0) { gpr = ZERO; } else { __ LoadConst64(gpr, value); } } - if (dst_type == Primitive::kPrimFloat) { + if (dst_type == DataType::Type::kFloat32) { __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>()); - } else if (dst_type == Primitive::kPrimDouble) { + } else if (dst_type == DataType::Type::kFloat64) { __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>()); } } else if (source.IsRegister()) { @@ -1336,7 +1336,7 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>()); } else { DCHECK(destination.IsFpuRegister()); - if (Primitive::Is64BitType(dst_type)) { + if (DataType::Is64BitType(dst_type)) { __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>()); } else { __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>()); @@ -1349,16 +1349,16 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, VectorRegisterFrom(source)); } else { // Move to FPR from FPR - if (dst_type == Primitive::kPrimFloat) { + if (dst_type == DataType::Type::kFloat32) { __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>()); } else { - DCHECK_EQ(dst_type, Primitive::kPrimDouble); + DCHECK_EQ(dst_type, DataType::Type::kFloat64); __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>()); } } } else { DCHECK(destination.IsRegister()); - if (Primitive::Is64BitType(dst_type)) { + if (DataType::Is64BitType(dst_type)) { __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>()); } else { __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>()); @@ -1387,13 +1387,14 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, if (source.IsRegister() || source.IsFpuRegister()) { if (unspecified_type) { if (source.IsRegister()) { - dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; + dst_type = destination.IsStackSlot() ? DataType::Type::kInt32 : DataType::Type::kInt64; } else { - dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; + dst_type = + destination.IsStackSlot() ? DataType::Type::kFloat32 : DataType::Type::kFloat64; } } - DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) && - (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type))); + DCHECK((destination.IsDoubleStackSlot() == DataType::Is64BitType(dst_type)) && + (source.IsFpuRegister() == DataType::IsFloatingPointType(dst_type))); // Move to stack from GPR/FPR StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword; if (source.IsRegister()) { @@ -1442,7 +1443,7 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, } } -void CodeGeneratorMIPS64::SwapLocations(Location loc1, Location loc2, Primitive::Type type) { +void CodeGeneratorMIPS64::SwapLocations(Location loc1, Location loc2, DataType::Type type) { DCHECK(!loc1.IsConstant()); DCHECK(!loc2.IsConstant()); @@ -1466,12 +1467,12 @@ void CodeGeneratorMIPS64::SwapLocations(Location loc1, Location loc2, Primitive: // Swap 2 FPRs FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>(); FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ MovS(FTMP, r1); __ MovS(r1, r2); __ MovS(r2, FTMP); } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); __ MovD(FTMP, r1); __ MovD(r1, r2); __ MovD(r2, FTMP); @@ -1482,7 +1483,7 @@ void CodeGeneratorMIPS64::SwapLocations(Location loc1, Location loc2, Primitive: Location mem_loc = is_slot1 ? loc1 : loc2; LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword; StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword; - // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot. + // TODO: use load_type = kLoadUnsignedWord when type == DataType::Type::kReference. __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex()); if (reg_loc.IsFpuRegister()) { __ StoreFpuToOffset(store_type, @@ -1859,10 +1860,10 @@ InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph, void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) { DCHECK_EQ(instruction->InputCount(), 2U); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); - Primitive::Type type = instruction->GetResultType(); + DataType::Type type = instruction->GetResultType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); HInstruction* right = instruction->InputAt(1); bool can_use_imm = false; @@ -1885,8 +1886,8 @@ void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) { } break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -1898,12 +1899,12 @@ void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) { } void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>(); Location rhs_location = locations->InAt(1); @@ -1933,7 +1934,7 @@ void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instructio else __ Xor(dst, lhs, rhs_reg); } else if (instruction->IsAdd()) { - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { if (use_imm) __ Addiu(dst, lhs, rhs_imm); else @@ -1946,7 +1947,7 @@ void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instructio } } else { DCHECK(instruction->IsSub()); - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { if (use_imm) __ Addiu(dst, lhs, -rhs_imm); else @@ -1960,18 +1961,18 @@ void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instructio } break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>(); FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>(); if (instruction->IsAdd()) { - if (type == Primitive::kPrimFloat) + if (type == DataType::Type::kFloat32) __ AddS(dst, lhs, rhs); else __ AddD(dst, lhs, rhs); } else if (instruction->IsSub()) { - if (type == Primitive::kPrimFloat) + if (type == DataType::Type::kFloat32) __ SubS(dst, lhs, rhs); else __ SubD(dst, lhs, rhs); @@ -1989,10 +1990,10 @@ void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) { DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor()); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); - Primitive::Type type = instr->GetResultType(); + DataType::Type type = instr->GetResultType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -2006,11 +2007,11 @@ void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) { void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) { DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor()); LocationSummary* locations = instr->GetLocations(); - Primitive::Type type = instr->GetType(); + DataType::Type type = instr->GetType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>(); Location rhs_location = locations->InAt(1); @@ -2026,13 +2027,13 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) { if (use_imm) { uint32_t shift_value = rhs_imm & - (type == Primitive::kPrimInt ? kMaxIntShiftDistance : kMaxLongShiftDistance); + (type == DataType::Type::kInt32 ? kMaxIntShiftDistance : kMaxLongShiftDistance); if (shift_value == 0) { if (dst != lhs) { __ Move(dst, lhs); } - } else if (type == Primitive::kPrimInt) { + } else if (type == DataType::Type::kInt32) { if (instr->IsShl()) { __ Sll(dst, lhs, shift_value); } else if (instr->IsShr()) { @@ -2067,7 +2068,7 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) { } } } else { - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { if (instr->IsShl()) { __ Sllv(dst, lhs, rhs_reg); } else if (instr->IsShr()) { @@ -2113,9 +2114,9 @@ void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) { } void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (type == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (type == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -2126,7 +2127,7 @@ void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) { } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(type)) { + if (DataType::IsFloatingPointType(type)) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { // The output overlaps in the case of an object array get with @@ -2165,11 +2166,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction); auto null_checker = GetImplicitNullChecker(instruction, codegen_); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); const bool maybe_compressed_char_at = mirror::kUseStringCompression && instruction->IsStringCharAt(); switch (type) { - case Primitive::kPrimBoolean: { + case DataType::Type::kBool: { GpuRegister out = out_loc.AsRegister<GpuRegister>(); if (index.IsConstant()) { size_t offset = @@ -2182,7 +2183,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimByte: { + case DataType::Type::kInt8: { GpuRegister out = out_loc.AsRegister<GpuRegister>(); if (index.IsConstant()) { size_t offset = @@ -2195,7 +2196,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimShort: { + case DataType::Type::kInt16: { GpuRegister out = out_loc.AsRegister<GpuRegister>(); if (index.IsConstant()) { size_t offset = @@ -2208,7 +2209,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimChar: { + case DataType::Type::kUint16: { GpuRegister out = out_loc.AsRegister<GpuRegister>(); if (maybe_compressed_char_at) { uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); @@ -2260,10 +2261,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t)); GpuRegister out = out_loc.AsRegister<GpuRegister>(); - LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord; + LoadOperandType load_type = + (type == DataType::Type::kReference) ? kLoadUnsignedWord : kLoadWord; if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; @@ -2275,7 +2277,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { static_assert( sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); @@ -2335,7 +2337,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { GpuRegister out = out_loc.AsRegister<GpuRegister>(); if (index.IsConstant()) { size_t offset = @@ -2348,7 +2350,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { FpuRegister out = out_loc.AsFpuRegister<FpuRegister>(); if (index.IsConstant()) { size_t offset = @@ -2361,7 +2363,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { FpuRegister out = out_loc.AsFpuRegister<FpuRegister>(); if (index.IsConstant()) { size_t offset = @@ -2374,7 +2376,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << instruction->GetType(); UNREACHABLE(); } @@ -2419,7 +2421,7 @@ Location LocationsBuilderMIPS64::FpuRegisterOrConstantForStore(HInstruction* ins } void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) { - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); @@ -2433,7 +2435,7 @@ void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) { + if (DataType::IsFloatingPointType(instruction->InputAt(2)->GetType())) { locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2))); } else { locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2))); @@ -2449,7 +2451,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>(); Location index = locations->InAt(1); Location value_location = locations->InAt(2); - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); @@ -2457,8 +2459,8 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { GpuRegister base_reg = index.IsConstant() ? obj : TMP; switch (value_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: { + case DataType::Type::kBool: + case DataType::Type::kInt8: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1; @@ -2475,8 +2477,8 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimShort: - case Primitive::kPrimChar: { + case DataType::Type::kInt16: + case DataType::Type::kUint16: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2; @@ -2493,7 +2495,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4; @@ -2510,7 +2512,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { if (value_location.IsConstant()) { // Just setting null. uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); @@ -2625,7 +2627,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8; @@ -2642,7 +2644,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4; @@ -2659,7 +2661,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8; @@ -2676,7 +2678,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << instruction->GetType(); UNREACHABLE(); } @@ -2961,24 +2963,24 @@ void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) { } void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) { - Primitive::Type in_type = compare->InputAt(0)->GetType(); + DataType::Type in_type = compare->InputAt(0)->GetType(); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare); switch (in_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -2992,24 +2994,24 @@ void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) { void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) { LocationSummary* locations = instruction->GetLocations(); GpuRegister res = locations->Out().AsRegister<GpuRegister>(); - Primitive::Type in_type = instruction->InputAt(0)->GetType(); + DataType::Type in_type = instruction->InputAt(0)->GetType(); // 0 if: left == right // 1 if: left > right // -1 if: left < right switch (in_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>(); Location rhs_location = locations->InAt(1); bool use_imm = rhs_location.IsConstant(); GpuRegister rhs = ZERO; if (use_imm) { - if (in_type == Primitive::kPrimLong) { + if (in_type == DataType::Type::kInt64) { int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant()); if (value != 0) { rhs = AT; @@ -3031,7 +3033,7 @@ void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>(); Mips64Label done; @@ -3053,7 +3055,7 @@ void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>(); Mips64Label done; @@ -3084,13 +3086,13 @@ void LocationsBuilderMIPS64::HandleCondition(HCondition* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); switch (instruction->InputAt(0)->GetType()) { default: - case Primitive::kPrimLong: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); break; @@ -3105,18 +3107,18 @@ void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) { return; } - Primitive::Type type = instruction->InputAt(0)->GetType(); + DataType::Type type = instruction->InputAt(0)->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { default: // Integer case. GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations); return; - case Primitive::kPrimLong: + case DataType::Type::kInt64: GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations); return; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations); return; } @@ -3124,7 +3126,7 @@ void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) { void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - Primitive::Type type = instruction->GetResultType(); + DataType::Type type = instruction->GetResultType(); LocationSummary* locations = instruction->GetLocations(); Location second = locations->InAt(1); @@ -3139,10 +3141,10 @@ void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instr __ Move(out, ZERO); } else { if (imm == -1) { - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { __ Subu(out, ZERO, dividend); } else { - DCHECK_EQ(type, Primitive::kPrimLong); + DCHECK_EQ(type, DataType::Type::kInt64); __ Dsubu(out, ZERO, dividend); } } else if (out != dividend) { @@ -3153,7 +3155,7 @@ void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instr void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - Primitive::Type type = instruction->GetResultType(); + DataType::Type type = instruction->GetResultType(); LocationSummary* locations = instruction->GetLocations(); Location second = locations->InAt(1); @@ -3166,7 +3168,7 @@ void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instru int ctz_imm = CTZ(abs_imm); if (instruction->IsDiv()) { - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { if (ctz_imm == 1) { // Fast path for division by +/-2, which is very common. __ Srl(TMP, dividend, 31); @@ -3180,7 +3182,7 @@ void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instru __ Subu(out, ZERO, out); } } else { - DCHECK_EQ(type, Primitive::kPrimLong); + DCHECK_EQ(type, DataType::Type::kInt64); if (ctz_imm == 1) { // Fast path for division by +/-2, which is very common. __ Dsrl32(TMP, dividend, 31); @@ -3203,7 +3205,7 @@ void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instru } } } else { - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { if (ctz_imm == 1) { // Fast path for modulo +/-2, which is very common. __ Sra(TMP, dividend, 31); @@ -3223,7 +3225,7 @@ void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instru __ Subu(out, out, TMP); } } else { - DCHECK_EQ(type, Primitive::kPrimLong); + DCHECK_EQ(type, DataType::Type::kInt64); if (ctz_imm == 1) { // Fast path for modulo +/-2, which is very common. __ Dsra32(TMP, dividend, 31); @@ -3266,17 +3268,17 @@ void InstructionCodeGeneratorMIPS64::GenerateDivRemWithAnyConstant(HBinaryOperat GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>(); int64_t imm = Int64FromConstant(second.GetConstant()); - Primitive::Type type = instruction->GetResultType(); - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type; + DataType::Type type = instruction->GetResultType(); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64) << type; int64_t magic; int shift; CalculateMagicAndShiftForDivRem(imm, - (type == Primitive::kPrimLong), + (type == DataType::Type::kInt64), &magic, &shift); - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { __ LoadConst32(TMP, magic); __ MuhR6(TMP, dividend, TMP); @@ -3331,8 +3333,8 @@ void InstructionCodeGeneratorMIPS64::GenerateDivRemWithAnyConstant(HBinaryOperat void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - Primitive::Type type = instruction->GetResultType(); - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type; + DataType::Type type = instruction->GetResultType(); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64) << type; LocationSummary* locations = instruction->GetLocations(); GpuRegister out = locations->Out().AsRegister<GpuRegister>(); @@ -3354,12 +3356,12 @@ void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* in GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>(); GpuRegister divisor = second.AsRegister<GpuRegister>(); if (instruction->IsDiv()) { - if (type == Primitive::kPrimInt) + if (type == DataType::Type::kInt32) __ DivR6(out, dividend, divisor); else __ Ddiv(out, dividend, divisor); } else { - if (type == Primitive::kPrimInt) + if (type == DataType::Type::kInt32) __ ModR6(out, dividend, divisor); else __ Dmod(out, dividend, divisor); @@ -3371,15 +3373,15 @@ void LocationsBuilderMIPS64::VisitDiv(HDiv* div) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); switch (div->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -3391,20 +3393,20 @@ void LocationsBuilderMIPS64::VisitDiv(HDiv* div) { } void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: GenerateDivRemIntegral(instruction); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>(); FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>(); - if (type == Primitive::kPrimFloat) + if (type == DataType::Type::kFloat32) __ DivS(dst, lhs, rhs); else __ DivD(dst, lhs, rhs); @@ -3426,9 +3428,9 @@ void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instructio codegen_->AddSlowPath(slow_path); Location value = instruction->GetLocations()->InAt(0); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); - if (!Primitive::IsIntegralType(type)) { + if (!DataType::IsIntegralType(type)) { LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck."; return; } @@ -3864,12 +3866,12 @@ void InstructionCodeGeneratorMIPS64::GenerateIntLongCompareAndBranch(IfCondition void InstructionCodeGeneratorMIPS64::GenerateFpCompare(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* locations) { GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { switch (cond) { case kCondEQ: __ CmpEqS(FTMP, lhs, rhs); @@ -3922,7 +3924,7 @@ void InstructionCodeGeneratorMIPS64::GenerateFpCompare(IfCondition cond, UNREACHABLE(); } } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); switch (cond) { case kCondEQ: __ CmpEqD(FTMP, lhs, rhs); @@ -3979,12 +3981,12 @@ void InstructionCodeGeneratorMIPS64::GenerateFpCompare(IfCondition cond, bool InstructionCodeGeneratorMIPS64::MaterializeFpCompare(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* input_locations, FpuRegister dst) { FpuRegister lhs = input_locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister rhs = input_locations->InAt(1).AsFpuRegister<FpuRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { switch (cond) { case kCondEQ: __ CmpEqS(dst, lhs, rhs); @@ -4025,7 +4027,7 @@ bool InstructionCodeGeneratorMIPS64::MaterializeFpCompare(IfCondition cond, UNREACHABLE(); } } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); switch (cond) { case kCondEQ: __ CmpEqD(dst, lhs, rhs); @@ -4070,12 +4072,12 @@ bool InstructionCodeGeneratorMIPS64::MaterializeFpCompare(IfCondition cond, void InstructionCodeGeneratorMIPS64::GenerateFpCompareAndBranch(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* locations, Mips64Label* label) { FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>(); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { switch (cond) { case kCondEQ: __ CmpEqS(FTMP, lhs, rhs); @@ -4122,7 +4124,7 @@ void InstructionCodeGeneratorMIPS64::GenerateFpCompareAndBranch(IfCondition cond UNREACHABLE(); } } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); switch (cond) { case kCondEQ: __ CmpEqD(FTMP, lhs, rhs); @@ -4216,7 +4218,7 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc // The condition instruction has not been materialized, use its inputs as // the comparison and its condition as the branch condition. HCondition* condition = cond->AsCondition(); - Primitive::Type type = condition->InputAt(0)->GetType(); + DataType::Type type = condition->InputAt(0)->GetType(); LocationSummary* locations = cond->GetLocations(); IfCondition if_cond = condition->GetCondition(); Mips64Label* branch_target = true_target; @@ -4230,11 +4232,11 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc default: GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target); break; } @@ -4299,8 +4301,9 @@ static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_ HInstruction* cond = select->InputAt(/* condition_input_index */ 2); HCondition* condition = cond->AsCondition(); - Primitive::Type cond_type = materialized ? Primitive::kPrimInt : condition->InputAt(0)->GetType(); - Primitive::Type dst_type = select->GetType(); + DataType::Type cond_type = + materialized ? DataType::Type::kInt32 : condition->InputAt(0)->GetType(); + DataType::Type dst_type = select->GetType(); HConstant* cst_true_value = select->GetTrueValue()->AsConstant(); HConstant* cst_false_value = select->GetFalseValue()->AsConstant(); @@ -4314,8 +4317,8 @@ static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_ bool use_const_for_true_in = false; if (!cond->IsConstant()) { - if (!Primitive::IsFloatingPointType(cond_type)) { - if (!Primitive::IsFloatingPointType(dst_type)) { + if (!DataType::IsFloatingPointType(cond_type)) { + if (!DataType::IsFloatingPointType(dst_type)) { // Moving int/long on int/long condition. if (is_true_value_zero_constant) { // seleqz out_reg, false_reg, cond_reg @@ -4358,7 +4361,7 @@ static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_ } } } else { - if (!Primitive::IsFloatingPointType(dst_type)) { + if (!DataType::IsFloatingPointType(dst_type)) { // Moving int/long on float/double condition. can_move_conditionally = true; if (is_true_value_zero_constant) { @@ -4404,7 +4407,7 @@ static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_ locations_to_set->SetInAt(0, Location::ConstantLocation(cst_false_value)); } else { locations_to_set->SetInAt(0, - Primitive::IsFloatingPointType(dst_type) + DataType::IsFloatingPointType(dst_type) ? Location::RequiresFpuRegister() : Location::RequiresRegister()); } @@ -4412,7 +4415,7 @@ static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_ locations_to_set->SetInAt(1, Location::ConstantLocation(cst_true_value)); } else { locations_to_set->SetInAt(1, - Primitive::IsFloatingPointType(dst_type) + DataType::IsFloatingPointType(dst_type) ? Location::RequiresFpuRegister() : Location::RequiresRegister()); } @@ -4421,7 +4424,7 @@ static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_ } if (can_move_conditionally) { - locations_to_set->SetOut(Primitive::IsFloatingPointType(dst_type) + locations_to_set->SetOut(DataType::IsFloatingPointType(dst_type) ? Location::RequiresFpuRegister() : Location::RequiresRegister()); } else { @@ -4441,9 +4444,9 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) { HInstruction* cond = select->InputAt(/* condition_input_index */ 2); GpuRegister cond_reg = TMP; FpuRegister fcond_reg = FTMP; - Primitive::Type cond_type = Primitive::kPrimInt; + DataType::Type cond_type = DataType::Type::kInt32; bool cond_inverted = false; - Primitive::Type dst_type = select->GetType(); + DataType::Type dst_type = select->GetType(); if (IsBooleanValueOrMaterializedCondition(cond)) { cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<GpuRegister>(); @@ -4459,14 +4462,14 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) { cond_locations, cond_reg); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: cond_inverted = MaterializeIntLongCompare(if_cond, /* is64bit */ true, cond_locations, cond_reg); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: cond_inverted = MaterializeFpCompare(if_cond, condition->IsGtBias(), cond_type, @@ -4485,7 +4488,7 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) { switch (dst_type) { default: - if (Primitive::IsFloatingPointType(cond_type)) { + if (DataType::IsFloatingPointType(cond_type)) { __ Mfc1(cond_reg, fcond_reg); } if (true_src.IsConstant()) { @@ -4512,8 +4515,8 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) { __ Or(dst.AsRegister<GpuRegister>(), AT, TMP); } break; - case Primitive::kPrimFloat: { - if (!Primitive::IsFloatingPointType(cond_type)) { + case DataType::Type::kFloat32: { + if (!DataType::IsFloatingPointType(cond_type)) { // sel*.fmt tests bit 0 of the condition register, account for that. __ Sltu(TMP, ZERO, cond_reg); __ Mtc1(TMP, fcond_reg); @@ -4547,8 +4550,8 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) { } break; } - case Primitive::kPrimDouble: { - if (!Primitive::IsFloatingPointType(cond_type)) { + case DataType::Type::kFloat64: { + if (!DataType::IsFloatingPointType(cond_type)) { // sel*.fmt tests bit 0 of the condition register, account for that. __ Sltu(TMP, ZERO, cond_reg); __ Mtc1(TMP, fcond_reg); @@ -4632,9 +4635,9 @@ void CodeGeneratorMIPS64::GenerateNop() { void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) { - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (field_type == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( instruction, object_field_get_with_read_barrier @@ -4644,7 +4647,7 @@ void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction, locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister()); } else { // The output overlaps in the case of an object field get with @@ -4666,7 +4669,7 @@ void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction, void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) { - Primitive::Type type = field_info.GetFieldType(); + DataType::Type type = field_info.GetFieldType(); LocationSummary* locations = instruction->GetLocations(); Location obj_loc = locations->InAt(0); GpuRegister obj = obj_loc.AsRegister<GpuRegister>(); @@ -4677,37 +4680,37 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction, auto null_checker = GetImplicitNullChecker(instruction, codegen_); switch (type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: load_type = kLoadUnsignedByte; break; - case Primitive::kPrimByte: + case DataType::Type::kInt8: load_type = kLoadSignedByte; break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: load_type = kLoadSignedHalfword; break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: load_type = kLoadUnsignedHalfword; break; - case Primitive::kPrimInt: - case Primitive::kPrimFloat: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: load_type = kLoadWord; break; - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: load_type = kLoadDoubleword; break; - case Primitive::kPrimNot: + case DataType::Type::kReference: load_type = kLoadUnsignedWord; break; - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } - if (!Primitive::IsFloatingPointType(type)) { + if (!DataType::IsFloatingPointType(type)) { DCHECK(dst_loc.IsRegister()); GpuRegister dst = dst_loc.AsRegister<GpuRegister>(); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // /* HeapReference<Object> */ dst = *(obj + offset) if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { Location temp_loc = @@ -4744,7 +4747,7 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction, // Memory barriers, in the case of references, are handled in the // previous switch statement. - if (is_volatile && (type != Primitive::kPrimNot)) { + if (is_volatile && (type != DataType::Type::kReference)) { GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } } @@ -4754,7 +4757,7 @@ void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction, LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); - if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) { + if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) { locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1))); } else { locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1))); @@ -4764,7 +4767,7 @@ void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction, void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, bool value_can_be_null) { - Primitive::Type type = field_info.GetFieldType(); + DataType::Type type = field_info.GetFieldType(); LocationSummary* locations = instruction->GetLocations(); GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>(); Location value_location = locations->InAt(1); @@ -4775,24 +4778,24 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction, auto null_checker = GetImplicitNullChecker(instruction, codegen_); switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: store_type = kStoreByte; break; - case Primitive::kPrimShort: - case Primitive::kPrimChar: + case DataType::Type::kInt16: + case DataType::Type::kUint16: store_type = kStoreHalfword; break; - case Primitive::kPrimInt: - case Primitive::kPrimFloat: - case Primitive::kPrimNot: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: + case DataType::Type::kReference: store_type = kStoreWord; break; - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: store_type = kStoreDoubleword; break; - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } @@ -4805,14 +4808,14 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction, int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant()); __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker); } else { - if (!Primitive::IsFloatingPointType(type)) { + if (!DataType::IsFloatingPointType(type)) { DCHECK(value_location.IsRegister()); GpuRegister src = value_location.AsRegister<GpuRegister>(); if (kPoisonHeapReferences && needs_write_barrier) { // Note that in the case where `value` is a null reference, // we do not enter this block, as a null reference does not // need poisoning. - DCHECK_EQ(type, Primitive::kPrimNot); + DCHECK_EQ(type, DataType::Type::kReference); __ PoisonHeapReference(TMP, src); __ StoreToOffset(store_type, TMP, obj, offset, null_checker); } else { @@ -6247,15 +6250,15 @@ void LocationsBuilderMIPS64::VisitMul(HMul* mul) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -6267,27 +6270,27 @@ void LocationsBuilderMIPS64::VisitMul(HMul* mul) { } void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>(); GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>(); - if (type == Primitive::kPrimInt) + if (type == DataType::Type::kInt32) __ MulR6(dst, lhs, rhs); else __ Dmul(dst, lhs, rhs); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>(); FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>(); - if (type == Primitive::kPrimFloat) + if (type == DataType::Type::kFloat32) __ MulS(dst, lhs, rhs); else __ MulD(dst, lhs, rhs); @@ -6302,14 +6305,14 @@ void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -6320,25 +6323,25 @@ void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) { } void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>(); - if (type == Primitive::kPrimInt) + if (type == DataType::Type::kInt32) __ Subu(dst, ZERO, src); else __ Dsubu(dst, ZERO, src); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>(); FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>(); - if (type == Primitive::kPrimFloat) + if (type == DataType::Type::kFloat32) __ NegS(dst, src); else __ NegD(dst, src); @@ -6353,7 +6356,7 @@ void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } @@ -6377,7 +6380,7 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) { } else { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); } void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) { @@ -6406,12 +6409,12 @@ void LocationsBuilderMIPS64::VisitNot(HNot* instruction) { } void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); LocationSummary* locations = instruction->GetLocations(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>(); __ Nor(dst, src, ZERO); @@ -6520,22 +6523,22 @@ void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED } void LocationsBuilderMIPS64::VisitRem(HRem* rem) { - Primitive::Type type = rem->GetResultType(); + DataType::Type type = rem->GetResultType(); LocationSummary::CallKind call_kind = - Primitive::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly - : LocationSummary::kNoCall; + DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly + : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1))); @@ -6549,19 +6552,20 @@ void LocationsBuilderMIPS64::VisitRem(HRem* rem) { } void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: GenerateDivRemIntegral(instruction); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { - QuickEntrypointEnum entrypoint = (type == Primitive::kPrimFloat) ? kQuickFmodf : kQuickFmod; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + QuickEntrypointEnum entrypoint = + (type == DataType::Type::kFloat32) ? kQuickFmodf : kQuickFmod; codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc()); - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { CheckEntrypointTypes<kQuickFmodf, float, float, float>(); } else { CheckEntrypointTypes<kQuickFmod, double, double, double>(); @@ -6592,7 +6596,7 @@ void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_b void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret); - Primitive::Type return_type = ret->InputAt(0)->GetType(); + DataType::Type return_type = ret->InputAt(0)->GetType(); locations->SetInAt(0, Mips64ReturnLocation(return_type)); } @@ -6761,24 +6765,24 @@ void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) { } void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) { - Primitive::Type input_type = conversion->GetInputType(); - Primitive::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); DCHECK_NE(input_type, result_type); - if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) || - (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) { + if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) || + (result_type == DataType::Type::kReference) || (result_type == DataType::Type::kVoid)) { LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion); - if (Primitive::IsFloatingPointType(input_type)) { + if (DataType::IsFloatingPointType(input_type)) { locations->SetInAt(0, Location::RequiresFpuRegister()); } else { locations->SetInAt(0, Location::RequiresRegister()); } - if (Primitive::IsFloatingPointType(result_type)) { + if (DataType::IsFloatingPointType(result_type)) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -6787,21 +6791,21 @@ void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) { void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) { LocationSummary* locations = conversion->GetLocations(); - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); DCHECK_NE(input_type, result_type); - if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) { + if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) { GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>(); switch (result_type) { - case Primitive::kPrimChar: + case DataType::Type::kUint16: __ Andi(dst, src, 0xFFFF); break; - case Primitive::kPrimByte: - if (input_type == Primitive::kPrimLong) { + case DataType::Type::kInt8: + if (input_type == DataType::Type::kInt64) { // Type conversion from long to types narrower than int is a result of code // transformations. To avoid unpredictable results for SEB and SEH, we first // need to sign-extend the low 32-bit value into bits 32 through 63. @@ -6811,8 +6815,8 @@ void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conver __ Seb(dst, src); } break; - case Primitive::kPrimShort: - if (input_type == Primitive::kPrimLong) { + case DataType::Type::kInt16: + if (input_type == DataType::Type::kInt64) { // Type conversion from long to types narrower than int is a result of code // transformations. To avoid unpredictable results for SEB and SEH, we first // need to sign-extend the low 32-bit value into bits 32 through 63. @@ -6822,12 +6826,12 @@ void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conver __ Seh(dst, src); } break; - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: // Sign-extend 32-bit int into bits 32 through 63 for int-to-long and long-to-int // conversions, except when the input and output registers are the same and we are not // converting longs to shorter types. In these cases, do nothing. - if ((input_type == Primitive::kPrimLong) || (dst != src)) { + if ((input_type == DataType::Type::kInt64) || (dst != src)) { __ Sll(dst, src, 0); } break; @@ -6836,49 +6840,49 @@ void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conver LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } - } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) { + } else if (DataType::IsFloatingPointType(result_type) && DataType::IsIntegralType(input_type)) { FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>(); GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>(); - if (input_type == Primitive::kPrimLong) { + if (input_type == DataType::Type::kInt64) { __ Dmtc1(src, FTMP); - if (result_type == Primitive::kPrimFloat) { + if (result_type == DataType::Type::kFloat32) { __ Cvtsl(dst, FTMP); } else { __ Cvtdl(dst, FTMP); } } else { __ Mtc1(src, FTMP); - if (result_type == Primitive::kPrimFloat) { + if (result_type == DataType::Type::kFloat32) { __ Cvtsw(dst, FTMP); } else { __ Cvtdw(dst, FTMP); } } - } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) { - CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong); + } else if (DataType::IsIntegralType(result_type) && DataType::IsFloatingPointType(input_type)) { + CHECK(result_type == DataType::Type::kInt32 || result_type == DataType::Type::kInt64); GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>(); - if (result_type == Primitive::kPrimLong) { - if (input_type == Primitive::kPrimFloat) { + if (result_type == DataType::Type::kInt64) { + if (input_type == DataType::Type::kFloat32) { __ TruncLS(FTMP, src); } else { __ TruncLD(FTMP, src); } __ Dmfc1(dst, FTMP); } else { - if (input_type == Primitive::kPrimFloat) { + if (input_type == DataType::Type::kFloat32) { __ TruncWS(FTMP, src); } else { __ TruncWD(FTMP, src); } __ Mfc1(dst, FTMP); } - } else if (Primitive::IsFloatingPointType(result_type) && - Primitive::IsFloatingPointType(input_type)) { + } else if (DataType::IsFloatingPointType(result_type) && + DataType::IsFloatingPointType(input_type)) { FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>(); FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>(); - if (result_type == Primitive::kPrimFloat) { + if (result_type == DataType::Type::kFloat32) { __ Cvtsd(dst, src); } else { __ Cvtds(dst, src); diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h index 9fe47ee297..2a95b3775d 100644 --- a/compiler/optimizing/code_generator_mips64.h +++ b/compiler/optimizing/code_generator_mips64.h @@ -79,8 +79,8 @@ class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventio InvokeDexCallingConventionVisitorMIPS64() {} virtual ~InvokeDexCallingConventionVisitorMIPS64() {} - Location GetNextLocation(Primitive::Type type) OVERRIDE; - Location GetReturnLocation(Primitive::Type type) const OVERRIDE; + Location GetNextLocation(DataType::Type type) OVERRIDE; + Location GetReturnLocation(DataType::Type type) const OVERRIDE; Location GetMethodLocation() const OVERRIDE; private: @@ -98,7 +98,7 @@ class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, Fpu kRuntimeParameterFpuRegistersLength, kMips64PointerSize) {} - Location GetReturnLocation(Primitive::Type return_type); + Location GetReturnLocation(DataType::Type return_type); private: DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); @@ -114,16 +114,16 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention { Location GetFieldIndexLocation() const OVERRIDE { return Location::RegisterLocation(A0); } - Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return Location::RegisterLocation(V0); } - Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED, + Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE { return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1); } - Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return Location::FpuRegisterLocation(F0); } @@ -306,19 +306,19 @@ class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator { Mips64Label* label); void GenerateFpCompare(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* locations); // When the function returns `false` it means that the condition holds if `dst` is non-zero // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero // `dst` are exchanged. bool MaterializeFpCompare(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* input_locations, FpuRegister dst); void GenerateFpCompareAndBranch(IfCondition cond, bool gt_bias, - Primitive::Type type, + DataType::Type type, LocationSummary* locations, Mips64Label* label); void HandleGoto(HInstruction* got, HBasicBlock* successor); @@ -497,14 +497,14 @@ class CodeGeneratorMIPS64 : public CodeGenerator { void Finalize(CodeAllocator* allocator) OVERRIDE; // Code generation helpers. - void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; void MoveConstant(Location destination, int32_t value) OVERRIDE; void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; - void SwapLocations(Location loc1, Location loc2, Primitive::Type type); + void SwapLocations(Location loc1, Location loc2, DataType::Type type); // Generate code to invoke a runtime entry point. void InvokeRuntime(QuickEntrypointEnum entrypoint, @@ -522,7 +522,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator { ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; } - bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; } + bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; } // Check if the desired_string_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. @@ -546,7 +546,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator { HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED, - Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE { + DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE { UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64"; } diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc index 3f576c82b3..5d5623bbe7 100644 --- a/compiler/optimizing/code_generator_vector_arm64.cc +++ b/compiler/optimizing/code_generator_vector_arm64.cc @@ -41,17 +41,17 @@ void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruc LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); HInstruction* input = instruction->InputAt(0); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, ARM64EncodableConstantOrRegister(input, instruction)); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: if (input->IsConstant() && Arm64CanEncodeConstantAsImmediate(input->AsConstant(), instruction)) { locations->SetInAt(0, Location::ConstantLocation(input->AsConstant())); @@ -72,8 +72,8 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* Location src_loc = locations->InAt(0); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (src_loc.IsConstant()) { __ Movi(dst.V16B(), Int64ConstantFrom(src_loc)); @@ -81,8 +81,8 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* __ Dup(dst.V16B(), InputRegisterAt(instruction, 0)); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (src_loc.IsConstant()) { __ Movi(dst.V8H(), Int64ConstantFrom(src_loc)); @@ -90,7 +90,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* __ Dup(dst.V8H(), InputRegisterAt(instruction, 0)); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (src_loc.IsConstant()) { __ Movi(dst.V4S(), Int64ConstantFrom(src_loc)); @@ -98,7 +98,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* __ Dup(dst.V4S(), InputRegisterAt(instruction, 0)); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); if (src_loc.IsConstant()) { __ Movi(dst.V2D(), Int64ConstantFrom(src_loc)); @@ -106,7 +106,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* __ Dup(dst.V2D(), XRegisterFrom(src_loc)); } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (src_loc.IsConstant()) { __ Fmov(dst.V4S(), src_loc.GetConstant()->AsFloatConstant()->GetValue()); @@ -114,7 +114,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* __ Dup(dst.V4S(), VRegisterFrom(src_loc).V4S(), 0); } break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); if (src_loc.IsConstant()) { __ Fmov(dst.V2D(), src_loc.GetConstant()->AsDoubleConstant()->GetValue()); @@ -131,17 +131,17 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* void LocationsBuilderARM64::VisitVecExtractScalar(HVecExtractScalar* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); break; @@ -155,16 +155,16 @@ void InstructionCodeGeneratorARM64::VisitVecExtractScalar(HVecExtractScalar* ins LocationSummary* locations = instruction->GetLocations(); VRegister src = VRegisterFrom(locations->InAt(0)); switch (instruction->GetPackedType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Umov(OutputRegister(instruction), src.V4S(), 0); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Umov(OutputRegister(instruction), src.V2D(), 0); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 4u); DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required @@ -179,19 +179,19 @@ void InstructionCodeGeneratorARM64::VisitVecExtractScalar(HVecExtractScalar* ins static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), instruction->IsVecNot() ? Location::kOutputOverlap : Location::kNoOutputOverlap); break; - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -210,7 +210,7 @@ void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) { VRegister src = VRegisterFrom(locations->InAt(0)); VRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); switch (instruction->GetKind()) { case HVecReduce::kSum: @@ -224,7 +224,7 @@ void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) { break; } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); switch (instruction->GetKind()) { case HVecReduce::kSum: @@ -249,9 +249,9 @@ void InstructionCodeGeneratorARM64::VisitVecCnv(HVecCnv* instruction) { LocationSummary* locations = instruction->GetLocations(); VRegister src = VRegisterFrom(locations->InAt(0)); VRegister dst = VRegisterFrom(locations->Out()); - Primitive::Type from = instruction->GetInputType(); - Primitive::Type to = instruction->GetResultType(); - if (from == Primitive::kPrimInt && to == Primitive::kPrimFloat) { + DataType::Type from = instruction->GetInputType(); + DataType::Type to = instruction->GetResultType(); + if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) { DCHECK_EQ(4u, instruction->GetVectorLength()); __ Scvtf(dst.V4S(), src.V4S()); } else { @@ -268,28 +268,28 @@ void InstructionCodeGeneratorARM64::VisitVecNeg(HVecNeg* instruction) { VRegister src = VRegisterFrom(locations->InAt(0)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ Neg(dst.V16B(), src.V16B()); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Neg(dst.V8H(), src.V8H()); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Neg(dst.V4S(), src.V4S()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Neg(dst.V2D(), src.V2D()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Fneg(dst.V4S(), src.V4S()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Fneg(dst.V2D(), src.V2D()); break; @@ -308,28 +308,28 @@ void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) { VRegister src = VRegisterFrom(locations->InAt(0)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ Abs(dst.V16B(), src.V16B()); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Abs(dst.V8H(), src.V8H()); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Abs(dst.V4S(), src.V4S()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Abs(dst.V2D(), src.V2D()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Fabs(dst.V4S(), src.V4S()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Fabs(dst.V2D(), src.V2D()); break; @@ -348,16 +348,16 @@ void InstructionCodeGeneratorARM64::VisitVecNot(HVecNot* instruction) { VRegister src = VRegisterFrom(locations->InAt(0)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: // special case boolean-not + case DataType::Type::kBool: // special case boolean-not DCHECK_EQ(16u, instruction->GetVectorLength()); __ Movi(dst.V16B(), 1); __ Eor(dst.V16B(), dst.V16B(), src.V16B()); break; - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: __ Not(dst.V16B(), src.V16B()); // lanes do not matter break; default: @@ -370,14 +370,14 @@ void InstructionCodeGeneratorARM64::VisitVecNot(HVecNot* instruction) { static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -398,28 +398,28 @@ void InstructionCodeGeneratorARM64::VisitVecAdd(HVecAdd* instruction) { VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ Add(dst.V16B(), lhs.V16B(), rhs.V16B()); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Add(dst.V8H(), lhs.V8H(), rhs.V8H()); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Add(dst.V4S(), lhs.V4S(), rhs.V4S()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Add(dst.V2D(), lhs.V2D(), rhs.V2D()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Fadd(dst.V4S(), lhs.V4S(), rhs.V4S()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Fadd(dst.V2D(), lhs.V2D(), rhs.V2D()); break; @@ -439,7 +439,7 @@ void InstructionCodeGeneratorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instructi VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { instruction->IsRounded() @@ -451,8 +451,8 @@ void InstructionCodeGeneratorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instructi : __ Shadd(dst.V16B(), lhs.V16B(), rhs.V16B()); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { instruction->IsRounded() @@ -480,28 +480,28 @@ void InstructionCodeGeneratorARM64::VisitVecSub(HVecSub* instruction) { VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ Sub(dst.V16B(), lhs.V16B(), rhs.V16B()); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Sub(dst.V8H(), lhs.V8H(), rhs.V8H()); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Sub(dst.V4S(), lhs.V4S(), rhs.V4S()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Sub(dst.V2D(), lhs.V2D(), rhs.V2D()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Fsub(dst.V4S(), lhs.V4S(), rhs.V4S()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Fsub(dst.V2D(), lhs.V2D(), rhs.V2D()); break; @@ -521,24 +521,24 @@ void InstructionCodeGeneratorARM64::VisitVecMul(HVecMul* instruction) { VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ Mul(dst.V16B(), lhs.V16B(), rhs.V16B()); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Mul(dst.V8H(), lhs.V8H(), rhs.V8H()); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Mul(dst.V4S(), lhs.V4S(), rhs.V4S()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Fmul(dst.V4S(), lhs.V4S(), rhs.V4S()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Fmul(dst.V2D(), lhs.V2D(), rhs.V2D()); break; @@ -558,11 +558,11 @@ void InstructionCodeGeneratorARM64::VisitVecDiv(HVecDiv* instruction) { VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Fdiv(dst.V4S(), lhs.V4S(), rhs.V4S()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Fdiv(dst.V2D(), lhs.V2D(), rhs.V2D()); break; @@ -582,7 +582,7 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) { VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Umin(dst.V16B(), lhs.V16B(), rhs.V16B()); @@ -590,8 +590,8 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) { __ Smin(dst.V16B(), lhs.V16B(), rhs.V16B()); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Umin(dst.V8H(), lhs.V8H(), rhs.V8H()); @@ -599,7 +599,7 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) { __ Smin(dst.V8H(), lhs.V8H(), rhs.V8H()); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S()); @@ -607,12 +607,12 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) { __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S()); } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ Fmin(dst.V4S(), lhs.V4S(), rhs.V4S()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ Fmin(dst.V2D(), lhs.V2D(), rhs.V2D()); @@ -633,7 +633,7 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) { VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Umax(dst.V16B(), lhs.V16B(), rhs.V16B()); @@ -641,8 +641,8 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) { __ Smax(dst.V16B(), lhs.V16B(), rhs.V16B()); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Umax(dst.V8H(), lhs.V8H(), rhs.V8H()); @@ -650,7 +650,7 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) { __ Smax(dst.V8H(), lhs.V8H(), rhs.V8H()); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S()); @@ -658,12 +658,12 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) { __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S()); } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ Fmax(dst.V4S(), lhs.V4S(), rhs.V4S()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ Fmax(dst.V2D(), lhs.V2D(), rhs.V2D()); @@ -684,14 +684,14 @@ void InstructionCodeGeneratorARM64::VisitVecAnd(HVecAnd* instruction) { VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ And(dst.V16B(), lhs.V16B(), rhs.V16B()); // lanes do not matter break; default: @@ -718,14 +718,14 @@ void InstructionCodeGeneratorARM64::VisitVecOr(HVecOr* instruction) { VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Orr(dst.V16B(), lhs.V16B(), rhs.V16B()); // lanes do not matter break; default: @@ -744,14 +744,14 @@ void InstructionCodeGeneratorARM64::VisitVecXor(HVecXor* instruction) { VRegister rhs = VRegisterFrom(locations->InAt(1)); VRegister dst = VRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: __ Eor(dst.V16B(), lhs.V16B(), rhs.V16B()); // lanes do not matter break; default: @@ -764,11 +764,11 @@ void InstructionCodeGeneratorARM64::VisitVecXor(HVecXor* instruction) { static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -789,20 +789,20 @@ void InstructionCodeGeneratorARM64::VisitVecShl(HVecShl* instruction) { VRegister dst = VRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ Shl(dst.V16B(), lhs.V16B(), value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Shl(dst.V8H(), lhs.V8H(), value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Shl(dst.V4S(), lhs.V4S(), value); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Shl(dst.V2D(), lhs.V2D(), value); break; @@ -822,20 +822,20 @@ void InstructionCodeGeneratorARM64::VisitVecShr(HVecShr* instruction) { VRegister dst = VRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ Sshr(dst.V16B(), lhs.V16B(), value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Sshr(dst.V8H(), lhs.V8H(), value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Sshr(dst.V4S(), lhs.V4S(), value); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Sshr(dst.V2D(), lhs.V2D(), value); break; @@ -855,20 +855,20 @@ void InstructionCodeGeneratorARM64::VisitVecUShr(HVecUShr* instruction) { VRegister dst = VRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ Ushr(dst.V16B(), lhs.V16B(), value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Ushr(dst.V8H(), lhs.V8H(), value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Ushr(dst.V4S(), lhs.V4S(), value); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Ushr(dst.V2D(), lhs.V2D(), value); break; @@ -887,18 +887,18 @@ void LocationsBuilderARM64::VisitVecSetScalars(HVecSetScalars* instruction) { bool is_zero = IsZeroBitPattern(input); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister()); @@ -925,21 +925,21 @@ void InstructionCodeGeneratorARM64::VisitVecSetScalars(HVecSetScalars* instructi // Set required elements. switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ Mov(dst.V16B(), 0, InputRegisterAt(instruction, 0)); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Mov(dst.V8H(), 0, InputRegisterAt(instruction, 0)); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Mov(dst.V4S(), 0, InputRegisterAt(instruction, 0)); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Mov(dst.V2D(), 0, InputRegisterAt(instruction, 0)); break; @@ -953,11 +953,11 @@ void InstructionCodeGeneratorARM64::VisitVecSetScalars(HVecSetScalars* instructi static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetInAt(2, Location::RequiresFpuRegister()); @@ -985,7 +985,7 @@ void InstructionCodeGeneratorARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccum DCHECK(locations->InAt(0).Equals(locations->Out())); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ Mla(acc.V16B(), left.V16B(), right.V16B()); @@ -993,8 +993,8 @@ void InstructionCodeGeneratorARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccum __ Mls(acc.V16B(), left.V16B(), right.V16B()); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ Mla(acc.V8H(), left.V8H(), right.V8H()); @@ -1002,7 +1002,7 @@ void InstructionCodeGeneratorARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccum __ Mls(acc.V8H(), left.V8H(), right.V8H()); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ Mla(acc.V4S(), left.V4S(), right.V4S()); @@ -1024,13 +1024,13 @@ void LocationsBuilderARM64::VisitVecSADAccumulate(HVecSADAccumulate* instruction HVecOperation* b = instruction->InputAt(2)->AsVecOperation(); DCHECK_EQ(a->GetPackedType(), b->GetPackedType()); switch (a->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: switch (instruction->GetPackedType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: locations->AddTemp(Location::RequiresFpuRegister()); locations->AddTemp(Location::RequiresFpuRegister()); FALLTHROUGH_INTENDED; - case Primitive::kPrimInt: + case DataType::Type::kInt32: locations->AddTemp(Location::RequiresFpuRegister()); locations->AddTemp(Location::RequiresFpuRegister()); break; @@ -1038,15 +1038,15 @@ void LocationsBuilderARM64::VisitVecSADAccumulate(HVecSADAccumulate* instruction break; } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: - if (instruction->GetPackedType() == Primitive::kPrimLong) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + if (instruction->GetPackedType() == DataType::Type::kInt64) { locations->AddTemp(Location::RequiresFpuRegister()); locations->AddTemp(Location::RequiresFpuRegister()); } break; - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: if (instruction->GetPackedType() == a->GetPackedType()) { locations->AddTemp(Location::RequiresFpuRegister()); } @@ -1069,16 +1069,16 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins HVecOperation* b = instruction->InputAt(2)->AsVecOperation(); DCHECK_EQ(a->GetPackedType(), b->GetPackedType()); switch (a->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, a->GetVectorLength()); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Sabal(acc.V8H(), left.V8B(), right.V8B()); __ Sabal2(acc.V8H(), left.V16B(), right.V16B()); break; - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { DCHECK_EQ(4u, instruction->GetVectorLength()); VRegister tmp1 = VRegisterFrom(locations->GetTemp(0)); VRegister tmp2 = VRegisterFrom(locations->GetTemp(1)); @@ -1092,7 +1092,7 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins __ Sabal2(acc.V4S(), tmp1.V8H(), tmp2.V8H()); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { DCHECK_EQ(2u, instruction->GetVectorLength()); VRegister tmp1 = VRegisterFrom(locations->GetTemp(0)); VRegister tmp2 = VRegisterFrom(locations->GetTemp(1)); @@ -1125,16 +1125,16 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins UNREACHABLE(); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, a->GetVectorLength()); switch (instruction->GetPackedType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Sabal(acc.V4S(), left.V4H(), right.V4H()); __ Sabal2(acc.V4S(), left.V8H(), right.V8H()); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { DCHECK_EQ(2u, instruction->GetVectorLength()); VRegister tmp1 = VRegisterFrom(locations->GetTemp(0)); VRegister tmp2 = VRegisterFrom(locations->GetTemp(1)); @@ -1153,10 +1153,10 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins UNREACHABLE(); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, a->GetVectorLength()); switch (instruction->GetPackedType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { DCHECK_EQ(4u, instruction->GetVectorLength()); VRegister tmp = VRegisterFrom(locations->GetTemp(0)); __ Sub(tmp.V4S(), left.V4S(), right.V4S()); @@ -1164,7 +1164,7 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins __ Add(acc.V4S(), acc.V4S(), tmp.V4S()); break; } - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Sabal(acc.V2D(), left.V2S(), right.V2S()); __ Sabal2(acc.V2D(), left.V4S(), right.V4S()); @@ -1174,10 +1174,10 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins UNREACHABLE(); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, a->GetVectorLength()); switch (instruction->GetPackedType()) { - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { DCHECK_EQ(2u, instruction->GetVectorLength()); VRegister tmp = VRegisterFrom(locations->GetTemp(0)); __ Sub(tmp.V2D(), left.V2D(), right.V2D()); @@ -1201,14 +1201,14 @@ static void CreateVecMemLocations(ArenaAllocator* arena, bool is_load) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (is_load) { @@ -1265,13 +1265,13 @@ void LocationsBuilderARM64::VisitVecLoad(HVecLoad* instruction) { void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); VRegister reg = VRegisterFrom(locations->Out()); UseScratchRegisterScope temps(GetVIXLAssembler()); Register scratch; switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: + case DataType::Type::kUint16: DCHECK_EQ(8u, instruction->GetVectorLength()); // Special handling of compressed/uncompressed string load. if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { @@ -1299,13 +1299,13 @@ void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) { return; } FALLTHROUGH_INTENDED; - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimFloat: - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ Ldr(reg, VecAddress(instruction, &temps, size, instruction->IsStringCharAt(), &scratch)); @@ -1322,20 +1322,20 @@ void LocationsBuilderARM64::VisitVecStore(HVecStore* instruction) { void InstructionCodeGeneratorARM64::VisitVecStore(HVecStore* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); VRegister reg = VRegisterFrom(locations->InAt(2)); UseScratchRegisterScope temps(GetVIXLAssembler()); Register scratch; switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimFloat: - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ Str(reg, VecAddress(instruction, &temps, size, /*is_string_char_at*/ false, &scratch)); diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc index 069054c2f5..333d108f2c 100644 --- a/compiler/optimizing/code_generator_vector_arm_vixl.cc +++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc @@ -35,11 +35,11 @@ using helpers::RegisterFrom; void LocationsBuilderARMVIXL::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; @@ -53,17 +53,17 @@ void InstructionCodeGeneratorARMVIXL::VisitVecReplicateScalar(HVecReplicateScala LocationSummary* locations = instruction->GetLocations(); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vdup(Untyped8, dst, InputRegisterAt(instruction, 0)); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Vdup(Untyped16, dst, InputRegisterAt(instruction, 0)); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Vdup(Untyped32, dst, InputRegisterAt(instruction, 0)); break; @@ -85,16 +85,16 @@ void InstructionCodeGeneratorARMVIXL::VisitVecExtractScalar(HVecExtractScalar* i static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), instruction->IsVecNot() ? Location::kOutputOverlap : Location::kNoOutputOverlap); break; - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -129,16 +129,16 @@ void InstructionCodeGeneratorARMVIXL::VisitVecNeg(HVecNeg* instruction) { vixl32::DRegister src = DRegisterFrom(locations->InAt(0)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vneg(DataTypeValue::S8, dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Vneg(DataTypeValue::S16, dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Vneg(DataTypeValue::S32, dst, src); break; @@ -157,16 +157,16 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) { vixl32::DRegister src = DRegisterFrom(locations->InAt(0)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vabs(DataTypeValue::S8, dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Vabs(DataTypeValue::S16, dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Vabs(DataTypeValue::S32, dst, src); break; @@ -185,15 +185,15 @@ void InstructionCodeGeneratorARMVIXL::VisitVecNot(HVecNot* instruction) { vixl32::DRegister src = DRegisterFrom(locations->InAt(0)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: // special case boolean-not + case DataType::Type::kBool: // special case boolean-not DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vmov(I8, dst, 1); __ Veor(dst, dst, src); break; - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: __ Vmvn(I8, dst, src); // lanes do not matter break; default: @@ -206,11 +206,11 @@ void InstructionCodeGeneratorARMVIXL::VisitVecNot(HVecNot* instruction) { static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -231,16 +231,16 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAdd(HVecAdd* instruction) { vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vadd(I8, dst, lhs, rhs); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Vadd(I16, dst, lhs, rhs); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Vadd(I32, dst, lhs, rhs); break; @@ -260,7 +260,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruc vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { instruction->IsRounded() @@ -272,8 +272,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruc : __ Vhadd(DataTypeValue::S8, dst, lhs, rhs); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { instruction->IsRounded() @@ -301,16 +301,16 @@ void InstructionCodeGeneratorARMVIXL::VisitVecSub(HVecSub* instruction) { vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vsub(I8, dst, lhs, rhs); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Vsub(I16, dst, lhs, rhs); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Vsub(I32, dst, lhs, rhs); break; @@ -330,16 +330,16 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMul(HVecMul* instruction) { vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vmul(I8, dst, lhs, rhs); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Vmul(I16, dst, lhs, rhs); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Vmul(I32, dst, lhs, rhs); break; @@ -367,7 +367,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) { vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Vmin(DataTypeValue::U8, dst, lhs, rhs); @@ -375,8 +375,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) { __ Vmin(DataTypeValue::S8, dst, lhs, rhs); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Vmin(DataTypeValue::U16, dst, lhs, rhs); @@ -384,7 +384,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) { __ Vmin(DataTypeValue::S16, dst, lhs, rhs); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Vmin(DataTypeValue::U32, dst, lhs, rhs); @@ -408,7 +408,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) { vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Vmax(DataTypeValue::U8, dst, lhs, rhs); @@ -416,8 +416,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) { __ Vmax(DataTypeValue::S8, dst, lhs, rhs); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Vmax(DataTypeValue::U16, dst, lhs, rhs); @@ -425,7 +425,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) { __ Vmax(DataTypeValue::S16, dst, lhs, rhs); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Vmax(DataTypeValue::U32, dst, lhs, rhs); @@ -449,11 +449,11 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAnd(HVecAnd* instruction) { vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: __ Vand(I8, dst, lhs, rhs); break; default: @@ -480,11 +480,11 @@ void InstructionCodeGeneratorARMVIXL::VisitVecOr(HVecOr* instruction) { vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: __ Vorr(I8, dst, lhs, rhs); break; default: @@ -503,11 +503,11 @@ void InstructionCodeGeneratorARMVIXL::VisitVecXor(HVecXor* instruction) { vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); vixl32::DRegister dst = DRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: __ Veor(I8, dst, lhs, rhs); break; default: @@ -520,10 +520,10 @@ void InstructionCodeGeneratorARMVIXL::VisitVecXor(HVecXor* instruction) { static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -544,16 +544,16 @@ void InstructionCodeGeneratorARMVIXL::VisitVecShl(HVecShl* instruction) { vixl32::DRegister dst = DRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vshl(I8, dst, lhs, value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Vshl(I16, dst, lhs, value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Vshl(I32, dst, lhs, value); break; @@ -573,16 +573,16 @@ void InstructionCodeGeneratorARMVIXL::VisitVecShr(HVecShr* instruction) { vixl32::DRegister dst = DRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vshr(DataTypeValue::S8, dst, lhs, value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Vshr(DataTypeValue::S16, dst, lhs, value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Vshr(DataTypeValue::S32, dst, lhs, value); break; @@ -602,16 +602,16 @@ void InstructionCodeGeneratorARMVIXL::VisitVecUShr(HVecUShr* instruction) { vixl32::DRegister dst = DRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); __ Vshr(DataTypeValue::U8, dst, lhs, value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); __ Vshr(DataTypeValue::U16, dst, lhs, value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Vshr(DataTypeValue::U32, dst, lhs, value); break; @@ -633,11 +633,11 @@ void InstructionCodeGeneratorARMVIXL::VisitVecSetScalars(HVecSetScalars* instruc static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetInAt(2, Location::RequiresFpuRegister()); @@ -677,11 +677,11 @@ static void CreateVecMemLocations(ArenaAllocator* arena, bool is_load) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (is_load) { @@ -707,7 +707,7 @@ MemOperand InstructionCodeGeneratorARMVIXL::VecAddress( vixl32::Register base = InputRegisterAt(instruction, 0); Location index = locations->InAt(1); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); uint32_t offset = mirror::Array::DataOffset(size).Uint32Value(); size_t shift = ComponentSizeShiftWidth(size); @@ -733,7 +733,7 @@ AlignedMemOperand InstructionCodeGeneratorARMVIXL::VecAddressUnaligned( vixl32::Register base = InputRegisterAt(instruction, 0); Location index = locations->InAt(1); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); uint32_t offset = mirror::Array::DataOffset(size).Uint32Value(); size_t shift = ComponentSizeShiftWidth(size); @@ -760,11 +760,11 @@ void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) { UseScratchRegisterScope temps(GetVIXLAssembler()); vixl32::Register scratch; - DCHECK(instruction->GetPackedType() != Primitive::kPrimChar || !instruction->IsStringCharAt()); + DCHECK(instruction->GetPackedType() != DataType::Type::kUint16 || !instruction->IsStringCharAt()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); if (IsWordAligned(instruction)) { __ Vldr(reg, VecAddress(instruction, &temps, &scratch)); @@ -774,8 +774,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) { VecAddressUnaligned(instruction, &temps, &scratch)); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); if (IsWordAligned(instruction)) { __ Vldr(reg, VecAddress(instruction, &temps, &scratch)); @@ -785,7 +785,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) { VecAddressUnaligned(instruction, &temps, &scratch)); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); if (IsWordAligned(instruction)) { __ Vldr(reg, VecAddress(instruction, &temps, &scratch)); @@ -810,8 +810,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecStore(HVecStore* instruction) { UseScratchRegisterScope temps(GetVIXLAssembler()); vixl32::Register scratch; switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(8u, instruction->GetVectorLength()); if (IsWordAligned(instruction)) { __ Vstr(reg, VecAddress(instruction, &temps, &scratch)); @@ -821,8 +821,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecStore(HVecStore* instruction) { VecAddressUnaligned(instruction, &temps, &scratch)); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(4u, instruction->GetVectorLength()); if (IsWordAligned(instruction)) { __ Vstr(reg, VecAddress(instruction, &temps, &scratch)); @@ -832,7 +832,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecStore(HVecStore* instruction) { VecAddressUnaligned(instruction, &temps, &scratch)); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(2u, instruction->GetVectorLength()); if (IsWordAligned(instruction)) { __ Vstr(reg, VecAddress(instruction, &temps, &scratch)); diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc index 0bedafcc81..c25f5acb7e 100644 --- a/compiler/optimizing/code_generator_vector_mips.cc +++ b/compiler/optimizing/code_generator_vector_mips.cc @@ -26,17 +26,17 @@ namespace mips { void LocationsBuilderMIPS::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -50,33 +50,33 @@ void InstructionCodeGeneratorMIPS::VisitVecReplicateScalar(HVecReplicateScalar* LocationSummary* locations = instruction->GetLocations(); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ FillB(dst, locations->InAt(0).AsRegister<Register>()); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ FillH(dst, locations->InAt(0).AsRegister<Register>()); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FillW(dst, locations->InAt(0).AsRegister<Register>()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ Mtc1(locations->InAt(0).AsRegisterPairLow<Register>(), FTMP); __ MoveToFpuHigh(locations->InAt(0).AsRegisterPairHigh<Register>(), FTMP); __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double */ true); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ ReplicateFPToVectorRegister(dst, locations->InAt(0).AsFpuRegister<FRegister>(), /* is_double */ false); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ ReplicateFPToVectorRegister(dst, locations->InAt(0).AsFpuRegister<FRegister>(), @@ -100,19 +100,19 @@ void InstructionCodeGeneratorMIPS::VisitVecExtractScalar(HVecExtractScalar* inst static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), instruction->IsVecNot() ? Location::kOutputOverlap : Location::kNoOutputOverlap); break; - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), (instruction->IsVecNeg() || instruction->IsVecAbs()) @@ -141,9 +141,9 @@ void InstructionCodeGeneratorMIPS::VisitVecCnv(HVecCnv* instruction) { LocationSummary* locations = instruction->GetLocations(); VectorRegister src = VectorRegisterFrom(locations->InAt(0)); VectorRegister dst = VectorRegisterFrom(locations->Out()); - Primitive::Type from = instruction->GetInputType(); - Primitive::Type to = instruction->GetResultType(); - if (from == Primitive::kPrimInt && to == Primitive::kPrimFloat) { + DataType::Type from = instruction->GetInputType(); + DataType::Type to = instruction->GetResultType(); + if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) { DCHECK_EQ(4u, instruction->GetVectorLength()); __ Ffint_sW(dst, src); } else { @@ -160,33 +160,33 @@ void InstructionCodeGeneratorMIPS::VisitVecNeg(HVecNeg* instruction) { VectorRegister src = VectorRegisterFrom(locations->InAt(0)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ FillB(dst, ZERO); __ SubvB(dst, dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ FillH(dst, ZERO); __ SubvH(dst, dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FillW(dst, ZERO); __ SubvW(dst, dst, src); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FillW(dst, ZERO); __ SubvD(dst, dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FillW(dst, ZERO); __ FsubW(dst, dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FillW(dst, ZERO); __ FsubD(dst, dst, src); @@ -206,34 +206,34 @@ void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) { VectorRegister src = VectorRegisterFrom(locations->InAt(0)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ FillB(dst, ZERO); // all zeroes __ Add_aB(dst, dst, src); // dst = abs(0) + abs(src) break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ FillH(dst, ZERO); // all zeroes __ Add_aH(dst, dst, src); // dst = abs(0) + abs(src) break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FillW(dst, ZERO); // all zeroes __ Add_aW(dst, dst, src); // dst = abs(0) + abs(src) break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FillW(dst, ZERO); // all zeroes __ Add_aD(dst, dst, src); // dst = abs(0) + abs(src) break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ LdiW(dst, -1); // all ones __ SrliW(dst, dst, 1); __ AndV(dst, dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ LdiD(dst, -1); // all ones __ SrliD(dst, dst, 1); @@ -254,18 +254,18 @@ void InstructionCodeGeneratorMIPS::VisitVecNot(HVecNot* instruction) { VectorRegister src = VectorRegisterFrom(locations->InAt(0)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: // special case boolean-not + case DataType::Type::kBool: // special case boolean-not DCHECK_EQ(16u, instruction->GetVectorLength()); __ LdiB(dst, 1); __ XorV(dst, dst, src); break; - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ NorV(dst, src, src); // lanes do not matter @@ -280,14 +280,14 @@ void InstructionCodeGeneratorMIPS::VisitVecNot(HVecNot* instruction) { static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -308,28 +308,28 @@ void InstructionCodeGeneratorMIPS::VisitVecAdd(HVecAdd* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ AddvB(dst, lhs, rhs); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ AddvH(dst, lhs, rhs); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ AddvW(dst, lhs, rhs); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ AddvD(dst, lhs, rhs); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FaddW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FaddD(dst, lhs, rhs); break; @@ -349,7 +349,7 @@ void InstructionCodeGeneratorMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instructio VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { instruction->IsRounded() @@ -361,8 +361,8 @@ void InstructionCodeGeneratorMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instructio : __ Ave_sB(dst, lhs, rhs); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { instruction->IsRounded() @@ -390,28 +390,28 @@ void InstructionCodeGeneratorMIPS::VisitVecSub(HVecSub* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ SubvB(dst, lhs, rhs); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ SubvH(dst, lhs, rhs); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ SubvW(dst, lhs, rhs); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ SubvD(dst, lhs, rhs); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FsubW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FsubD(dst, lhs, rhs); break; @@ -431,28 +431,28 @@ void InstructionCodeGeneratorMIPS::VisitVecMul(HVecMul* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ MulvB(dst, lhs, rhs); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ MulvH(dst, lhs, rhs); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ MulvW(dst, lhs, rhs); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ MulvD(dst, lhs, rhs); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FmulW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FmulD(dst, lhs, rhs); break; @@ -472,11 +472,11 @@ void InstructionCodeGeneratorMIPS::VisitVecDiv(HVecDiv* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FdivW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FdivD(dst, lhs, rhs); break; @@ -496,7 +496,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Min_uB(dst, lhs, rhs); @@ -504,8 +504,8 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) { __ Min_sB(dst, lhs, rhs); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Min_uH(dst, lhs, rhs); @@ -513,7 +513,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) { __ Min_sH(dst, lhs, rhs); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Min_uW(dst, lhs, rhs); @@ -521,7 +521,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) { __ Min_sW(dst, lhs, rhs); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Min_uD(dst, lhs, rhs); @@ -531,12 +531,12 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) { break; // When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value. // TODO: Fix min(x, NaN) cases for float and double. - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ FminW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ FminD(dst, lhs, rhs); @@ -557,7 +557,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Max_uB(dst, lhs, rhs); @@ -565,8 +565,8 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) { __ Max_sB(dst, lhs, rhs); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Max_uH(dst, lhs, rhs); @@ -574,7 +574,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) { __ Max_sH(dst, lhs, rhs); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Max_uW(dst, lhs, rhs); @@ -582,7 +582,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) { __ Max_sW(dst, lhs, rhs); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Max_uD(dst, lhs, rhs); @@ -592,12 +592,12 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) { break; // When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value. // TODO: Fix max(x, NaN) cases for float and double. - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ FmaxW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ FmaxD(dst, lhs, rhs); @@ -618,14 +618,14 @@ void InstructionCodeGeneratorMIPS::VisitVecAnd(HVecAnd* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ AndV(dst, lhs, rhs); // lanes do not matter @@ -654,14 +654,14 @@ void InstructionCodeGeneratorMIPS::VisitVecOr(HVecOr* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ OrV(dst, lhs, rhs); // lanes do not matter @@ -682,14 +682,14 @@ void InstructionCodeGeneratorMIPS::VisitVecXor(HVecXor* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ XorV(dst, lhs, rhs); // lanes do not matter @@ -704,11 +704,11 @@ void InstructionCodeGeneratorMIPS::VisitVecXor(HVecXor* instruction) { static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -729,20 +729,20 @@ void InstructionCodeGeneratorMIPS::VisitVecShl(HVecShl* instruction) { VectorRegister dst = VectorRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ SlliB(dst, lhs, value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ SlliH(dst, lhs, value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ SlliW(dst, lhs, value); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ SlliD(dst, lhs, value); break; @@ -762,20 +762,20 @@ void InstructionCodeGeneratorMIPS::VisitVecShr(HVecShr* instruction) { VectorRegister dst = VectorRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ SraiB(dst, lhs, value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ SraiH(dst, lhs, value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ SraiW(dst, lhs, value); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ SraiD(dst, lhs, value); break; @@ -795,20 +795,20 @@ void InstructionCodeGeneratorMIPS::VisitVecUShr(HVecUShr* instruction) { VectorRegister dst = VectorRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ SrliB(dst, lhs, value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ SrliH(dst, lhs, value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ SrliW(dst, lhs, value); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ SrliD(dst, lhs, value); break; @@ -830,11 +830,11 @@ void InstructionCodeGeneratorMIPS::VisitVecSetScalars(HVecSetScalars* instructio static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetInAt(2, Location::RequiresFpuRegister()); @@ -856,7 +856,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumu VectorRegister left = VectorRegisterFrom(locations->InAt(1)); VectorRegister right = VectorRegisterFrom(locations->InAt(2)); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ MaddvB(acc, left, right); @@ -864,8 +864,8 @@ void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumu __ MsubvB(acc, left, right); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ MaddvH(acc, left, right); @@ -873,7 +873,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumu __ MsubvH(acc, left, right); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ MaddvW(acc, left, right); @@ -881,7 +881,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumu __ MsubvW(acc, left, right); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ MaddvD(acc, left, right); @@ -910,14 +910,14 @@ static void CreateVecMemLocations(ArenaAllocator* arena, bool is_load) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (is_load) { @@ -970,18 +970,18 @@ void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) { void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); VectorRegister reg = VectorRegisterFrom(locations->Out()); Register base; int32_t offset = VecAddress(locations, size, &base); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ LdB(reg, base, offset); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // Loading 8-bytes (needed if dealing with compressed strings in StringCharAt) from unaligned // memory address may cause a trap to the kernel if the CPU doesn't directly support unaligned // loads and stores. @@ -990,13 +990,13 @@ void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) { DCHECK_EQ(8u, instruction->GetVectorLength()); __ LdH(reg, base, offset); break; - case Primitive::kPrimInt: - case Primitive::kPrimFloat: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ LdW(reg, base, offset); break; - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ LdD(reg, base, offset); break; @@ -1012,28 +1012,28 @@ void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) { void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); VectorRegister reg = VectorRegisterFrom(locations->InAt(2)); Register base; int32_t offset = VecAddress(locations, size, &base); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ StB(reg, base, offset); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ StH(reg, base, offset); break; - case Primitive::kPrimInt: - case Primitive::kPrimFloat: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ StW(reg, base, offset); break; - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ StD(reg, base, offset); break; diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc index db31bdcc92..f60f708976 100644 --- a/compiler/optimizing/code_generator_vector_mips64.cc +++ b/compiler/optimizing/code_generator_vector_mips64.cc @@ -31,17 +31,17 @@ VectorRegister VectorRegisterFrom(Location location) { void LocationsBuilderMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; @@ -55,31 +55,31 @@ void InstructionCodeGeneratorMIPS64::VisitVecReplicateScalar(HVecReplicateScalar LocationSummary* locations = instruction->GetLocations(); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ FillB(dst, locations->InAt(0).AsRegister<GpuRegister>()); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ FillH(dst, locations->InAt(0).AsRegister<GpuRegister>()); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FillW(dst, locations->InAt(0).AsRegister<GpuRegister>()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FillD(dst, locations->InAt(0).AsRegister<GpuRegister>()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ ReplicateFPToVectorRegister(dst, locations->InAt(0).AsFpuRegister<FpuRegister>(), /* is_double */ false); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ ReplicateFPToVectorRegister(dst, locations->InAt(0).AsFpuRegister<FpuRegister>(), @@ -103,19 +103,19 @@ void InstructionCodeGeneratorMIPS64::VisitVecExtractScalar(HVecExtractScalar* in static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), instruction->IsVecNot() ? Location::kOutputOverlap : Location::kNoOutputOverlap); break; - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), (instruction->IsVecNeg() || instruction->IsVecAbs()) @@ -144,9 +144,9 @@ void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) { LocationSummary* locations = instruction->GetLocations(); VectorRegister src = VectorRegisterFrom(locations->InAt(0)); VectorRegister dst = VectorRegisterFrom(locations->Out()); - Primitive::Type from = instruction->GetInputType(); - Primitive::Type to = instruction->GetResultType(); - if (from == Primitive::kPrimInt && to == Primitive::kPrimFloat) { + DataType::Type from = instruction->GetInputType(); + DataType::Type to = instruction->GetResultType(); + if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) { DCHECK_EQ(4u, instruction->GetVectorLength()); __ Ffint_sW(dst, src); } else { @@ -164,33 +164,33 @@ void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) { VectorRegister src = VectorRegisterFrom(locations->InAt(0)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ FillB(dst, ZERO); __ SubvB(dst, dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ FillH(dst, ZERO); __ SubvH(dst, dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FillW(dst, ZERO); __ SubvW(dst, dst, src); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FillD(dst, ZERO); __ SubvD(dst, dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FillW(dst, ZERO); __ FsubW(dst, dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FillD(dst, ZERO); __ FsubD(dst, dst, src); @@ -210,34 +210,34 @@ void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) { VectorRegister src = VectorRegisterFrom(locations->InAt(0)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ FillB(dst, ZERO); // all zeroes __ Add_aB(dst, dst, src); // dst = abs(0) + abs(src) break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ FillH(dst, ZERO); // all zeroes __ Add_aH(dst, dst, src); // dst = abs(0) + abs(src) break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FillW(dst, ZERO); // all zeroes __ Add_aW(dst, dst, src); // dst = abs(0) + abs(src) break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FillD(dst, ZERO); // all zeroes __ Add_aD(dst, dst, src); // dst = abs(0) + abs(src) break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ LdiW(dst, -1); // all ones __ SrliW(dst, dst, 1); __ AndV(dst, dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ LdiD(dst, -1); // all ones __ SrliD(dst, dst, 1); @@ -258,18 +258,18 @@ void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) { VectorRegister src = VectorRegisterFrom(locations->InAt(0)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: // special case boolean-not + case DataType::Type::kBool: // special case boolean-not DCHECK_EQ(16u, instruction->GetVectorLength()); __ LdiB(dst, 1); __ XorV(dst, dst, src); break; - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ NorV(dst, src, src); // lanes do not matter @@ -284,14 +284,14 @@ void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) { static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -312,28 +312,28 @@ void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ AddvB(dst, lhs, rhs); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ AddvH(dst, lhs, rhs); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ AddvW(dst, lhs, rhs); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ AddvD(dst, lhs, rhs); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FaddW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FaddD(dst, lhs, rhs); break; @@ -353,7 +353,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruct VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { instruction->IsRounded() @@ -365,8 +365,8 @@ void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruct : __ Ave_sB(dst, lhs, rhs); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { instruction->IsRounded() @@ -394,28 +394,28 @@ void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ SubvB(dst, lhs, rhs); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ SubvH(dst, lhs, rhs); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ SubvW(dst, lhs, rhs); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ SubvD(dst, lhs, rhs); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FsubW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FsubD(dst, lhs, rhs); break; @@ -435,28 +435,28 @@ void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ MulvB(dst, lhs, rhs); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ MulvH(dst, lhs, rhs); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ MulvW(dst, lhs, rhs); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ MulvD(dst, lhs, rhs); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FmulW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FmulD(dst, lhs, rhs); break; @@ -476,11 +476,11 @@ void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ FdivW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ FdivD(dst, lhs, rhs); break; @@ -500,7 +500,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Min_uB(dst, lhs, rhs); @@ -508,8 +508,8 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) { __ Min_sB(dst, lhs, rhs); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Min_uH(dst, lhs, rhs); @@ -517,7 +517,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) { __ Min_sH(dst, lhs, rhs); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Min_uW(dst, lhs, rhs); @@ -525,7 +525,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) { __ Min_sW(dst, lhs, rhs); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Min_uD(dst, lhs, rhs); @@ -535,12 +535,12 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) { break; // When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value. // TODO: Fix min(x, NaN) cases for float and double. - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ FminW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ FminD(dst, lhs, rhs); @@ -561,7 +561,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Max_uB(dst, lhs, rhs); @@ -569,8 +569,8 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) { __ Max_sB(dst, lhs, rhs); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Max_uH(dst, lhs, rhs); @@ -578,7 +578,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) { __ Max_sH(dst, lhs, rhs); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Max_uW(dst, lhs, rhs); @@ -586,7 +586,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) { __ Max_sW(dst, lhs, rhs); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ Max_uD(dst, lhs, rhs); @@ -596,12 +596,12 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) { break; // When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value. // TODO: Fix max(x, NaN) cases for float and double. - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ FmaxW(dst, lhs, rhs); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ FmaxD(dst, lhs, rhs); @@ -622,14 +622,14 @@ void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ AndV(dst, lhs, rhs); // lanes do not matter @@ -658,14 +658,14 @@ void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ OrV(dst, lhs, rhs); // lanes do not matter @@ -686,14 +686,14 @@ void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) { VectorRegister rhs = VectorRegisterFrom(locations->InAt(1)); VectorRegister dst = VectorRegisterFrom(locations->Out()); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ XorV(dst, lhs, rhs); // lanes do not matter @@ -708,11 +708,11 @@ void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) { static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -733,20 +733,20 @@ void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) { VectorRegister dst = VectorRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ SlliB(dst, lhs, value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ SlliH(dst, lhs, value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ SlliW(dst, lhs, value); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ SlliD(dst, lhs, value); break; @@ -766,20 +766,20 @@ void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) { VectorRegister dst = VectorRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ SraiB(dst, lhs, value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ SraiH(dst, lhs, value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ SraiW(dst, lhs, value); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ SraiD(dst, lhs, value); break; @@ -799,20 +799,20 @@ void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) { VectorRegister dst = VectorRegisterFrom(locations->Out()); int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ SrliB(dst, lhs, value); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ SrliH(dst, lhs, value); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ SrliW(dst, lhs, value); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ SrliD(dst, lhs, value); break; @@ -834,11 +834,11 @@ void InstructionCodeGeneratorMIPS64::VisitVecSetScalars(HVecSetScalars* instruct static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetInAt(2, Location::RequiresFpuRegister()); @@ -860,7 +860,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccu VectorRegister left = VectorRegisterFrom(locations->InAt(1)); VectorRegister right = VectorRegisterFrom(locations->InAt(2)); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ MaddvB(acc, left, right); @@ -868,8 +868,8 @@ void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccu __ MsubvB(acc, left, right); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ MaddvH(acc, left, right); @@ -877,7 +877,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccu __ MsubvH(acc, left, right); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ MaddvW(acc, left, right); @@ -885,7 +885,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccu __ MsubvW(acc, left, right); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); if (instruction->GetOpKind() == HInstruction::kAdd) { __ MaddvD(acc, left, right); @@ -914,14 +914,14 @@ static void CreateVecMemLocations(ArenaAllocator* arena, bool is_load) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (is_load) { @@ -974,18 +974,18 @@ void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) { void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); VectorRegister reg = VectorRegisterFrom(locations->Out()); GpuRegister base; int32_t offset = VecAddress(locations, size, &base); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ LdB(reg, base, offset); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // Loading 8-bytes (needed if dealing with compressed strings in StringCharAt) from unaligned // memory address may cause a trap to the kernel if the CPU doesn't directly support unaligned // loads and stores. @@ -994,13 +994,13 @@ void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) { DCHECK_EQ(8u, instruction->GetVectorLength()); __ LdH(reg, base, offset); break; - case Primitive::kPrimInt: - case Primitive::kPrimFloat: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ LdW(reg, base, offset); break; - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ LdD(reg, base, offset); break; @@ -1016,28 +1016,28 @@ void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) { void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); VectorRegister reg = VectorRegisterFrom(locations->InAt(2)); GpuRegister base; int32_t offset = VecAddress(locations, size, &base); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ StB(reg, base, offset); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ StH(reg, base, offset); break; - case Primitive::kPrimInt: - case Primitive::kPrimFloat: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ StW(reg, base, offset); break; - case Primitive::kPrimLong: - case Primitive::kPrimDouble: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ StD(reg, base, offset); break; diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc index 5a012e7298..6515dbe7b7 100644 --- a/compiler/optimizing/code_generator_vector_x86.cc +++ b/compiler/optimizing/code_generator_vector_x86.cc @@ -30,23 +30,23 @@ void LocationsBuilderX86::VisitVecReplicateScalar(HVecReplicateScalar* instructi HInstruction* input = instruction->InputAt(0); bool is_zero = IsZeroBitPattern(input); switch (instruction->GetPackedType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Long needs extra temporary to load from the register pair. if (!is_zero) { locations->AddTemp(Location::RequiresFpuRegister()); } FALLTHROUGH_INTENDED; - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresFpuRegister()); locations->SetOut(is_zero ? Location::RequiresFpuRegister() @@ -69,27 +69,27 @@ void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* i } switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<Register>()); __ punpcklbw(dst, dst); __ punpcklwd(dst, dst); __ pshufd(dst, dst, Immediate(0)); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<Register>()); __ punpcklwd(dst, dst); __ pshufd(dst, dst, Immediate(0)); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<Register>()); __ pshufd(dst, dst, Immediate(0)); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); DCHECK_EQ(2u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegisterPairLow<Register>()); @@ -98,12 +98,12 @@ void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* i __ punpcklqdq(dst, dst); break; } - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK(locations->InAt(0).Equals(locations->Out())); DCHECK_EQ(4u, instruction->GetVectorLength()); __ shufps(dst, dst, Immediate(0)); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK(locations->InAt(0).Equals(locations->Out())); DCHECK_EQ(2u, instruction->GetVectorLength()); __ shufpd(dst, dst, Immediate(0)); @@ -117,20 +117,20 @@ void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* i void LocationsBuilderX86::VisitVecExtractScalar(HVecExtractScalar* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Long needs extra temporary to store into the register pair. locations->AddTemp(Location::RequiresFpuRegister()); FALLTHROUGH_INTENDED; - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); break; @@ -144,18 +144,18 @@ void InstructionCodeGeneratorX86::VisitVecExtractScalar(HVecExtractScalar* instr LocationSummary* locations = instruction->GetLocations(); XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: // TODO: up to here, and? + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // TODO: up to here, and? LOG(FATAL) << "Unsupported SIMD type"; UNREACHABLE(); - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_LE(4u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ movd(locations->Out().AsRegister<Register>(), src); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); DCHECK_EQ(2u, instruction->GetVectorLength()); __ movd(locations->Out().AsRegisterPairLow<Register>(), src); @@ -163,8 +163,8 @@ void InstructionCodeGeneratorX86::VisitVecExtractScalar(HVecExtractScalar* instr __ movd(locations->Out().AsRegisterPairHigh<Register>(), tmp); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 4u); DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required @@ -179,14 +179,14 @@ void InstructionCodeGeneratorX86::VisitVecExtractScalar(HVecExtractScalar* instr static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; @@ -199,7 +199,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in void LocationsBuilderX86::VisitVecReduce(HVecReduce* instruction) { CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); // Long reduction or min/max require a temporary. - if (instruction->GetPackedType() == Primitive::kPrimLong || + if (instruction->GetPackedType() == DataType::Type::kInt64 || instruction->GetKind() == HVecReduce::kMin || instruction->GetKind() == HVecReduce::kMax) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); @@ -211,7 +211,7 @@ void InstructionCodeGeneratorX86::VisitVecReduce(HVecReduce* instruction) { XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); switch (instruction->GetKind()) { case HVecReduce::kSum: @@ -241,7 +241,7 @@ void InstructionCodeGeneratorX86::VisitVecReduce(HVecReduce* instruction) { } } break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { DCHECK_EQ(2u, instruction->GetVectorLength()); XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); switch (instruction->GetKind()) { @@ -271,9 +271,9 @@ void InstructionCodeGeneratorX86::VisitVecCnv(HVecCnv* instruction) { LocationSummary* locations = instruction->GetLocations(); XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); - Primitive::Type from = instruction->GetInputType(); - Primitive::Type to = instruction->GetResultType(); - if (from == Primitive::kPrimInt && to == Primitive::kPrimFloat) { + DataType::Type from = instruction->GetInputType(); + DataType::Type to = instruction->GetResultType(); + if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) { DCHECK_EQ(4u, instruction->GetVectorLength()); __ cvtdq2ps(dst, src); } else { @@ -290,33 +290,33 @@ void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) { XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ pxor(dst, dst); __ psubb(dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ pxor(dst, dst); __ psubw(dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pxor(dst, dst); __ psubd(dst, src); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ pxor(dst, dst); __ psubq(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ xorps(dst, dst); __ subps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ xorpd(dst, dst); __ subpd(dst, src); @@ -330,7 +330,7 @@ void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) { void LocationsBuilderX86::VisitVecAbs(HVecAbs* instruction) { CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); // Integral-abs requires a temporary for the comparison. - if (instruction->GetPackedType() == Primitive::kPrimInt) { + if (instruction->GetPackedType() == DataType::Type::kInt32) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); } } @@ -340,7 +340,7 @@ void InstructionCodeGeneratorX86::VisitVecAbs(HVecAbs* instruction) { XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { DCHECK_EQ(4u, instruction->GetVectorLength()); XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); __ movaps(dst, src); @@ -350,13 +350,13 @@ void InstructionCodeGeneratorX86::VisitVecAbs(HVecAbs* instruction) { __ psubd(dst, tmp); break; } - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pcmpeqb(dst, dst); // all ones __ psrld(dst, Immediate(1)); __ andps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ pcmpeqb(dst, dst); // all ones __ psrlq(dst, Immediate(1)); @@ -371,7 +371,7 @@ void InstructionCodeGeneratorX86::VisitVecAbs(HVecAbs* instruction) { void LocationsBuilderX86::VisitVecNot(HVecNot* instruction) { CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); // Boolean-not requires a temporary to construct the 16 x one. - if (instruction->GetPackedType() == Primitive::kPrimBoolean) { + if (instruction->GetPackedType() == DataType::Type::kBool) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); } } @@ -381,7 +381,7 @@ void InstructionCodeGeneratorX86::VisitVecNot(HVecNot* instruction) { XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: { // special case boolean-not + case DataType::Type::kBool: { // special case boolean-not DCHECK_EQ(16u, instruction->GetVectorLength()); XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); __ pxor(dst, dst); @@ -390,22 +390,22 @@ void InstructionCodeGeneratorX86::VisitVecNot(HVecNot* instruction) { __ pxor(dst, src); break; } - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ pcmpeqb(dst, dst); // all ones __ pxor(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pcmpeqb(dst, dst); // all ones __ xorps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ pcmpeqb(dst, dst); // all ones __ xorpd(dst, src); @@ -420,14 +420,14 @@ void InstructionCodeGeneratorX86::VisitVecNot(HVecNot* instruction) { static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); @@ -448,28 +448,28 @@ void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ paddb(dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ paddw(dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ paddd(dst, src); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ paddq(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ addps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ addpd(dst, src); break; @@ -493,12 +493,12 @@ void InstructionCodeGeneratorX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction DCHECK(instruction->IsUnsigned()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ pavgb(dst, src); return; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ pavgw(dst, src); return; @@ -518,28 +518,28 @@ void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ psubb(dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ psubw(dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ psubd(dst, src); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ psubq(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ subps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ subpd(dst, src); break; @@ -559,20 +559,20 @@ void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ pmullw(dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pmulld(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ mulps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ mulpd(dst, src); break; @@ -592,11 +592,11 @@ void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ divps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ divpd(dst, src); break; @@ -616,7 +616,7 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pminub(dst, src); @@ -624,8 +624,8 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) { __ pminsb(dst, src); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pminuw(dst, src); @@ -633,7 +633,7 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) { __ pminsw(dst, src); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pminud(dst, src); @@ -642,12 +642,12 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) { } break; // Next cases are sloppy wrt 0.0 vs -0.0. - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ minps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ minpd(dst, src); @@ -668,7 +668,7 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pmaxub(dst, src); @@ -676,8 +676,8 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) { __ pmaxsb(dst, src); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pmaxuw(dst, src); @@ -685,7 +685,7 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) { __ pmaxsw(dst, src); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pmaxud(dst, src); @@ -694,12 +694,12 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) { } break; // Next cases are sloppy wrt 0.0 vs -0.0. - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ maxps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ maxpd(dst, src); @@ -720,21 +720,21 @@ void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ pand(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ andps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ andpd(dst, src); break; @@ -754,21 +754,21 @@ void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ pandn(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ andnps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ andnpd(dst, src); break; @@ -788,21 +788,21 @@ void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ por(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ orps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ orpd(dst, src); break; @@ -822,21 +822,21 @@ void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ pxor(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ xorps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ xorpd(dst, src); break; @@ -850,10 +850,10 @@ void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) { static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); locations->SetOut(Location::SameAsFirstInput()); @@ -874,16 +874,16 @@ void InstructionCodeGeneratorX86::VisitVecShl(HVecShl* instruction) { int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ psllw(dst, Immediate(static_cast<uint8_t>(value))); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pslld(dst, Immediate(static_cast<uint8_t>(value))); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ psllq(dst, Immediate(static_cast<uint8_t>(value))); break; @@ -903,12 +903,12 @@ void InstructionCodeGeneratorX86::VisitVecShr(HVecShr* instruction) { int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ psraw(dst, Immediate(static_cast<uint8_t>(value))); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ psrad(dst, Immediate(static_cast<uint8_t>(value))); break; @@ -928,16 +928,16 @@ void InstructionCodeGeneratorX86::VisitVecUShr(HVecUShr* instruction) { int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ psrlw(dst, Immediate(static_cast<uint8_t>(value))); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ psrld(dst, Immediate(static_cast<uint8_t>(value))); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ psrlq(dst, Immediate(static_cast<uint8_t>(value))); break; @@ -956,23 +956,23 @@ void LocationsBuilderX86::VisitVecSetScalars(HVecSetScalars* instruction) { bool is_zero = IsZeroBitPattern(input); switch (instruction->GetPackedType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Long needs extra temporary to load from register pairs. if (!is_zero) { locations->AddTemp(Location::RequiresFpuRegister()); } FALLTHROUGH_INTENDED; - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister()); @@ -999,17 +999,17 @@ void InstructionCodeGeneratorX86::VisitVecSetScalars(HVecSetScalars* instruction // Set required elements. switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: // TODO: up to here, and? + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // TODO: up to here, and? LOG(FATAL) << "Unsupported SIMD type"; UNREACHABLE(); - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<Register>()); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); DCHECK_EQ(2u, instruction->GetVectorLength()); __ xorps(tmp, tmp); @@ -1018,11 +1018,11 @@ void InstructionCodeGeneratorX86::VisitVecSetScalars(HVecSetScalars* instruction __ punpckldq(dst, tmp); break; } - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ movss(dst, locations->InAt(1).AsFpuRegister<XmmRegister>()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ movsd(dst, locations->InAt(1).AsFpuRegister<XmmRegister>()); break; @@ -1036,11 +1036,11 @@ void InstructionCodeGeneratorX86::VisitVecSetScalars(HVecSetScalars* instruction static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetInAt(2, Location::RequiresFpuRegister()); @@ -1076,14 +1076,14 @@ static void CreateVecMemLocations(ArenaAllocator* arena, bool is_load) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (is_load) { @@ -1126,12 +1126,12 @@ void LocationsBuilderX86::VisitVecLoad(HVecLoad* instruction) { void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); Address address = VecAddress(locations, size, instruction->IsStringCharAt()); XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>(); bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: + case DataType::Type::kUint16: DCHECK_EQ(8u, instruction->GetVectorLength()); // Special handling of compressed/uncompressed string load. if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { @@ -1155,20 +1155,20 @@ void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) { return; } FALLTHROUGH_INTENDED; - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); is_aligned16 ? __ movdqa(reg, address) : __ movdqu(reg, address); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); is_aligned16 ? __ movaps(reg, address) : __ movups(reg, address); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); is_aligned16 ? __ movapd(reg, address) : __ movupd(reg, address); break; @@ -1184,26 +1184,26 @@ void LocationsBuilderX86::VisitVecStore(HVecStore* instruction) { void InstructionCodeGeneratorX86::VisitVecStore(HVecStore* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); Address address = VecAddress(locations, size, /*is_string_char_at*/ false); XmmRegister reg = locations->InAt(2).AsFpuRegister<XmmRegister>(); bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); is_aligned16 ? __ movdqa(address, reg) : __ movdqu(address, reg); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); is_aligned16 ? __ movaps(address, reg) : __ movups(address, reg); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); is_aligned16 ? __ movapd(address, reg) : __ movupd(address, reg); break; diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc index 3698b7fb85..4241042574 100644 --- a/compiler/optimizing/code_generator_vector_x86_64.cc +++ b/compiler/optimizing/code_generator_vector_x86_64.cc @@ -30,18 +30,18 @@ void LocationsBuilderX86_64::VisitVecReplicateScalar(HVecReplicateScalar* instru HInstruction* input = instruction->InputAt(0); bool is_zero = IsZeroBitPattern(input); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresFpuRegister()); locations->SetOut(is_zero ? Location::RequiresFpuRegister() @@ -64,37 +64,37 @@ void InstructionCodeGeneratorX86_64::VisitVecReplicateScalar(HVecReplicateScalar } switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<CpuRegister>(), /*64-bit*/ false); __ punpcklbw(dst, dst); __ punpcklwd(dst, dst); __ pshufd(dst, dst, Immediate(0)); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<CpuRegister>(), /*64-bit*/ false); __ punpcklwd(dst, dst); __ pshufd(dst, dst, Immediate(0)); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<CpuRegister>(), /*64-bit*/ false); __ pshufd(dst, dst, Immediate(0)); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<CpuRegister>(), /*64-bit*/ true); __ punpcklqdq(dst, dst); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(locations->InAt(0).Equals(locations->Out())); __ shufps(dst, dst, Immediate(0)); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(locations->InAt(0).Equals(locations->Out())); __ shufpd(dst, dst, Immediate(0)); @@ -108,17 +108,17 @@ void InstructionCodeGeneratorX86_64::VisitVecReplicateScalar(HVecReplicateScalar void LocationsBuilderX86_64::VisitVecExtractScalar(HVecExtractScalar* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); break; @@ -132,22 +132,22 @@ void InstructionCodeGeneratorX86_64::VisitVecExtractScalar(HVecExtractScalar* in LocationSummary* locations = instruction->GetLocations(); XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: // TODO: up to here, and? + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // TODO: up to here, and? LOG(FATAL) << "Unsupported SIMD type"; UNREACHABLE(); - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ movd(locations->Out().AsRegister<CpuRegister>(), src, /*64-bit*/ false); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ movd(locations->Out().AsRegister<CpuRegister>(), src, /*64-bit*/ true); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 4u); DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required @@ -162,14 +162,14 @@ void InstructionCodeGeneratorX86_64::VisitVecExtractScalar(HVecExtractScalar* in static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; @@ -182,7 +182,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in void LocationsBuilderX86_64::VisitVecReduce(HVecReduce* instruction) { CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); // Long reduction or min/max require a temporary. - if (instruction->GetPackedType() == Primitive::kPrimLong || + if (instruction->GetPackedType() == DataType::Type::kInt64 || instruction->GetKind() == HVecReduce::kMin || instruction->GetKind() == HVecReduce::kMax) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); @@ -194,7 +194,7 @@ void InstructionCodeGeneratorX86_64::VisitVecReduce(HVecReduce* instruction) { XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); switch (instruction->GetKind()) { case HVecReduce::kSum: @@ -224,7 +224,7 @@ void InstructionCodeGeneratorX86_64::VisitVecReduce(HVecReduce* instruction) { } } break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { DCHECK_EQ(2u, instruction->GetVectorLength()); XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); switch (instruction->GetKind()) { @@ -254,9 +254,9 @@ void InstructionCodeGeneratorX86_64::VisitVecCnv(HVecCnv* instruction) { LocationSummary* locations = instruction->GetLocations(); XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); - Primitive::Type from = instruction->GetInputType(); - Primitive::Type to = instruction->GetResultType(); - if (from == Primitive::kPrimInt && to == Primitive::kPrimFloat) { + DataType::Type from = instruction->GetInputType(); + DataType::Type to = instruction->GetResultType(); + if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) { DCHECK_EQ(4u, instruction->GetVectorLength()); __ cvtdq2ps(dst, src); } else { @@ -273,33 +273,33 @@ void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) { XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ pxor(dst, dst); __ psubb(dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ pxor(dst, dst); __ psubw(dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pxor(dst, dst); __ psubd(dst, src); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ pxor(dst, dst); __ psubq(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ xorps(dst, dst); __ subps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ xorpd(dst, dst); __ subpd(dst, src); @@ -313,7 +313,7 @@ void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) { void LocationsBuilderX86_64::VisitVecAbs(HVecAbs* instruction) { CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); // Integral-abs requires a temporary for the comparison. - if (instruction->GetPackedType() == Primitive::kPrimInt) { + if (instruction->GetPackedType() == DataType::Type::kInt32) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); } } @@ -323,7 +323,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAbs(HVecAbs* instruction) { XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { DCHECK_EQ(4u, instruction->GetVectorLength()); XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); __ movaps(dst, src); @@ -333,13 +333,13 @@ void InstructionCodeGeneratorX86_64::VisitVecAbs(HVecAbs* instruction) { __ psubd(dst, tmp); break; } - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pcmpeqb(dst, dst); // all ones __ psrld(dst, Immediate(1)); __ andps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ pcmpeqb(dst, dst); // all ones __ psrlq(dst, Immediate(1)); @@ -354,7 +354,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAbs(HVecAbs* instruction) { void LocationsBuilderX86_64::VisitVecNot(HVecNot* instruction) { CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); // Boolean-not requires a temporary to construct the 16 x one. - if (instruction->GetPackedType() == Primitive::kPrimBoolean) { + if (instruction->GetPackedType() == DataType::Type::kBool) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); } } @@ -364,7 +364,7 @@ void InstructionCodeGeneratorX86_64::VisitVecNot(HVecNot* instruction) { XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: { // special case boolean-not + case DataType::Type::kBool: { // special case boolean-not DCHECK_EQ(16u, instruction->GetVectorLength()); XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); __ pxor(dst, dst); @@ -373,22 +373,22 @@ void InstructionCodeGeneratorX86_64::VisitVecNot(HVecNot* instruction) { __ pxor(dst, src); break; } - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ pcmpeqb(dst, dst); // all ones __ pxor(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pcmpeqb(dst, dst); // all ones __ xorps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ pcmpeqb(dst, dst); // all ones __ xorpd(dst, src); @@ -403,14 +403,14 @@ void InstructionCodeGeneratorX86_64::VisitVecNot(HVecNot* instruction) { static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); @@ -431,28 +431,28 @@ void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ paddb(dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ paddw(dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ paddd(dst, src); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ paddq(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ addps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ addpd(dst, src); break; @@ -476,12 +476,12 @@ void InstructionCodeGeneratorX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruct DCHECK(instruction->IsUnsigned()); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ pavgb(dst, src); return; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ pavgw(dst, src); return; @@ -501,28 +501,28 @@ void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); __ psubb(dst, src); break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ psubw(dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ psubd(dst, src); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ psubq(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ subps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ subpd(dst, src); break; @@ -542,20 +542,20 @@ void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ pmullw(dst, src); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pmulld(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ mulps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ mulpd(dst, src); break; @@ -575,11 +575,11 @@ void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ divps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ divpd(dst, src); break; @@ -599,7 +599,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pminub(dst, src); @@ -607,8 +607,8 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) { __ pminsb(dst, src); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pminuw(dst, src); @@ -616,7 +616,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) { __ pminsw(dst, src); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pminud(dst, src); @@ -625,12 +625,12 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) { } break; // Next cases are sloppy wrt 0.0 vs -0.0. - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ minps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ minpd(dst, src); @@ -651,7 +651,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: DCHECK_EQ(16u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pmaxub(dst, src); @@ -659,8 +659,8 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) { __ pmaxsb(dst, src); } break; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pmaxuw(dst, src); @@ -668,7 +668,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) { __ pmaxsw(dst, src); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); if (instruction->IsUnsigned()) { __ pmaxud(dst, src); @@ -677,12 +677,12 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) { } break; // Next cases are sloppy wrt 0.0 vs -0.0. - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ maxps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); DCHECK(!instruction->IsUnsigned()); __ maxpd(dst, src); @@ -703,21 +703,21 @@ void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ pand(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ andps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ andpd(dst, src); break; @@ -737,21 +737,21 @@ void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ pandn(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ andnps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ andnpd(dst, src); break; @@ -771,21 +771,21 @@ void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ por(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ orps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ orpd(dst, src); break; @@ -805,21 +805,21 @@ void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) { XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); __ pxor(dst, src); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ xorps(dst, src); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ xorpd(dst, src); break; @@ -833,10 +833,10 @@ void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) { static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); locations->SetOut(Location::SameAsFirstInput()); @@ -857,16 +857,16 @@ void InstructionCodeGeneratorX86_64::VisitVecShl(HVecShl* instruction) { int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ psllw(dst, Immediate(static_cast<int8_t>(value))); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ pslld(dst, Immediate(static_cast<int8_t>(value))); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ psllq(dst, Immediate(static_cast<int8_t>(value))); break; @@ -886,12 +886,12 @@ void InstructionCodeGeneratorX86_64::VisitVecShr(HVecShr* instruction) { int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ psraw(dst, Immediate(static_cast<int8_t>(value))); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ psrad(dst, Immediate(static_cast<int8_t>(value))); break; @@ -911,16 +911,16 @@ void InstructionCodeGeneratorX86_64::VisitVecUShr(HVecUShr* instruction) { int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>(); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: DCHECK_EQ(8u, instruction->GetVectorLength()); __ psrlw(dst, Immediate(static_cast<int8_t>(value))); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ psrld(dst, Immediate(static_cast<int8_t>(value))); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ psrlq(dst, Immediate(static_cast<int8_t>(value))); break; @@ -939,18 +939,18 @@ void LocationsBuilderX86_64::VisitVecSetScalars(HVecSetScalars* instruction) { bool is_zero = IsZeroBitPattern(input); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) : Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister()); @@ -977,25 +977,25 @@ void InstructionCodeGeneratorX86_64::VisitVecSetScalars(HVecSetScalars* instruct // Set required elements. switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: // TODO: up to here, and? + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // TODO: up to here, and? LOG(FATAL) << "Unsupported SIMD type"; UNREACHABLE(); - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<CpuRegister>()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ movd(dst, locations->InAt(0).AsRegister<CpuRegister>()); // is 64-bit break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ movss(dst, locations->InAt(0).AsFpuRegister<XmmRegister>()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ movsd(dst, locations->InAt(0).AsFpuRegister<XmmRegister>()); break; @@ -1009,11 +1009,11 @@ void InstructionCodeGeneratorX86_64::VisitVecSetScalars(HVecSetScalars* instruct static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetInAt(2, Location::RequiresFpuRegister()); @@ -1049,14 +1049,14 @@ static void CreateVecMemLocations(ArenaAllocator* arena, bool is_load) { LocationSummary* locations = new (arena) LocationSummary(instruction); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (is_load) { @@ -1099,12 +1099,12 @@ void LocationsBuilderX86_64::VisitVecLoad(HVecLoad* instruction) { void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); Address address = VecAddress(locations, size, instruction->IsStringCharAt()); XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>(); bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); switch (instruction->GetPackedType()) { - case Primitive::kPrimChar: + case DataType::Type::kUint16: DCHECK_EQ(8u, instruction->GetVectorLength()); // Special handling of compressed/uncompressed string load. if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { @@ -1128,20 +1128,20 @@ void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) { return; } FALLTHROUGH_INTENDED; - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); is_aligned16 ? __ movdqa(reg, address) : __ movdqu(reg, address); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); is_aligned16 ? __ movaps(reg, address) : __ movups(reg, address); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); is_aligned16 ? __ movapd(reg, address) : __ movupd(reg, address); break; @@ -1157,26 +1157,26 @@ void LocationsBuilderX86_64::VisitVecStore(HVecStore* instruction) { void InstructionCodeGeneratorX86_64::VisitVecStore(HVecStore* instruction) { LocationSummary* locations = instruction->GetLocations(); - size_t size = Primitive::ComponentSize(instruction->GetPackedType()); + size_t size = DataType::Size(instruction->GetPackedType()); Address address = VecAddress(locations, size, /*is_string_char_at*/ false); XmmRegister reg = locations->InAt(2).AsFpuRegister<XmmRegister>(); bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); switch (instruction->GetPackedType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: DCHECK_LE(2u, instruction->GetVectorLength()); DCHECK_LE(instruction->GetVectorLength(), 16u); is_aligned16 ? __ movdqa(address, reg) : __ movdqu(address, reg); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); is_aligned16 ? __ movaps(address, reg) : __ movups(address, reg); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); is_aligned16 ? __ movapd(address, reg) : __ movupd(address, reg); break; diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 99581ee9b8..70e270e74d 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -161,10 +161,10 @@ class BoundsCheckSlowPathX86 : public SlowPathCode { x86_codegen->EmitParallelMoves( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimInt, + DataType::Type::kInt32, length_loc, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt); + DataType::Type::kInt32); QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() ? kQuickThrowStringBounds : kQuickThrowArrayBounds; @@ -342,10 +342,10 @@ class TypeCheckSlowPathX86 : public SlowPathCode { InvokeRuntimeCallingConvention calling_convention; x86_codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); + DataType::Type::kReference); if (instruction_->IsInstanceOf()) { x86_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, @@ -418,17 +418,17 @@ class ArraySetSlowPathX86 : public SlowPathCode { parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove( locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); parallel_move.AddMove( locations->InAt(2), Location::RegisterLocation(calling_convention.GetRegisterAt(2)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); @@ -814,16 +814,16 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { HParallelMove parallel_move(codegen->GetGraph()->GetArena()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove(obj_, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); if (index.IsValid()) { parallel_move.AddMove(index, Location::RegisterLocation(calling_convention.GetRegisterAt(2)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } else { @@ -1129,24 +1129,24 @@ void CodeGeneratorX86::Bind(HBasicBlock* block) { __ Bind(GetLabelOf(block)); } -Location InvokeDexCallingConventionVisitorX86::GetReturnLocation(Primitive::Type type) const { +Location InvokeDexCallingConventionVisitorX86::GetReturnLocation(DataType::Type type) const { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: return Location::RegisterLocation(EAX); - case Primitive::kPrimLong: + case DataType::Type::kInt64: return Location::RegisterPairLocation(EAX, EDX); - case Primitive::kPrimVoid: + case DataType::Type::kVoid: return Location::NoLocation(); - case Primitive::kPrimDouble: - case Primitive::kPrimFloat: + case DataType::Type::kFloat64: + case DataType::Type::kFloat32: return Location::FpuRegisterLocation(XMM0); } @@ -1157,14 +1157,14 @@ Location InvokeDexCallingConventionVisitorX86::GetMethodLocation() const { return Location::RegisterLocation(kMethodRegisterArgument); } -Location InvokeDexCallingConventionVisitorX86::GetNextLocation(Primitive::Type type) { +Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type type) { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: { uint32_t index = gp_index_++; stack_index_++; if (index < calling_convention.GetNumberOfRegisters()) { @@ -1174,7 +1174,7 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(Primitive::Type t } } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { uint32_t index = gp_index_; gp_index_ += 2; stack_index_ += 2; @@ -1187,7 +1187,7 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(Primitive::Type t } } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { uint32_t index = float_index_++; stack_index_++; if (index < calling_convention.GetNumberOfFpuRegisters()) { @@ -1197,7 +1197,7 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(Primitive::Type t } } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { uint32_t index = float_index_++; stack_index_ += 2; if (index < calling_convention.GetNumberOfFpuRegisters()) { @@ -1207,7 +1207,7 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(Primitive::Type t } } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unexpected parameter type " << type; break; } @@ -1263,10 +1263,10 @@ void CodeGeneratorX86::Move64(Location destination, Location source) { EmitParallelMoves( Location::RegisterLocation(source.AsRegisterPairHigh<Register>()), Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()), - Primitive::kPrimInt, + DataType::Type::kInt32, Location::RegisterLocation(source.AsRegisterPairLow<Register>()), Location::RegisterLocation(destination.AsRegisterPairLow<Register>()), - Primitive::kPrimInt); + DataType::Type::kInt32); } else if (source.IsFpuRegister()) { XmmRegister src_reg = source.AsFpuRegister<XmmRegister>(); __ movd(destination.AsRegisterPairLow<Register>(), src_reg); @@ -1285,7 +1285,7 @@ void CodeGeneratorX86::Move64(Location destination, Location source) { } else if (source.IsDoubleStackSlot()) { __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex())); } else if (source.IsRegisterPair()) { - size_t elem_size = Primitive::ComponentSize(Primitive::kPrimInt); + size_t elem_size = DataType::Size(DataType::Type::kInt32); // Create stack space for 2 elements. __ subl(ESP, Immediate(2 * elem_size)); __ movl(Address(ESP, 0), source.AsRegisterPairLow<Register>()); @@ -1317,10 +1317,10 @@ void CodeGeneratorX86::Move64(Location destination, Location source) { EmitParallelMoves( Location::StackSlot(source.GetStackIndex()), Location::StackSlot(destination.GetStackIndex()), - Primitive::kPrimInt, + DataType::Type::kInt32, Location::StackSlot(source.GetHighStackIndex(kX86WordSize)), Location::StackSlot(destination.GetHighStackIndex(kX86WordSize)), - Primitive::kPrimInt); + DataType::Type::kInt32); } } } @@ -1330,11 +1330,11 @@ void CodeGeneratorX86::MoveConstant(Location location, int32_t value) { __ movl(location.AsRegister<Register>(), Immediate(value)); } -void CodeGeneratorX86::MoveLocation(Location dst, Location src, Primitive::Type dst_type) { +void CodeGeneratorX86::MoveLocation(Location dst, Location src, DataType::Type dst_type) { HParallelMove move(GetGraph()->GetArena()); - if (dst_type == Primitive::kPrimLong && !src.IsConstant() && !src.IsFpuRegister()) { - move.AddMove(src.ToLow(), dst.ToLow(), Primitive::kPrimInt, nullptr); - move.AddMove(src.ToHigh(), dst.ToHigh(), Primitive::kPrimInt, nullptr); + if (dst_type == DataType::Type::kInt64 && !src.IsConstant() && !src.IsFpuRegister()) { + move.AddMove(src.ToLow(), dst.ToLow(), DataType::Type::kInt32, nullptr); + move.AddMove(src.ToHigh(), dst.ToHigh(), DataType::Type::kInt32, nullptr); } else { move.AddMove(src, dst, dst_type, nullptr); } @@ -1557,16 +1557,16 @@ void InstructionCodeGeneratorX86::GenerateCompareTestAndBranch(HCondition* condi Location left = locations->InAt(0); Location right = locations->InAt(1); - Primitive::Type type = condition->InputAt(0)->GetType(); + DataType::Type type = condition->InputAt(0)->GetType(); switch (type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: GenerateLongComparesAndJumps(condition, true_target, false_target); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: GenerateFPCompare(left, right, condition, false); GenerateFPJumps(condition, true_target, false_target); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: GenerateFPCompare(left, right, condition, true); GenerateFPJumps(condition, true_target, false_target); break; @@ -1589,8 +1589,8 @@ static bool AreEflagsSetFrom(HInstruction* cond, HInstruction* branch) { // conditions if they are materialized due to the complex branching. return cond->IsCondition() && cond->GetNext() == branch && - cond->InputAt(0)->GetType() != Primitive::kPrimLong && - !Primitive::IsFloatingPointType(cond->InputAt(0)->GetType()); + cond->InputAt(0)->GetType() != DataType::Type::kInt64 && + !DataType::IsFloatingPointType(cond->InputAt(0)->GetType()); } template<class LabelType> @@ -1654,8 +1654,8 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio // If this is a long or FP comparison that has been folded into // the HCondition, generate the comparison directly. - Primitive::Type type = condition->InputAt(0)->GetType(); - if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) { + DataType::Type type = condition->InputAt(0)->GetType(); + if (type == DataType::Type::kInt64 || DataType::IsFloatingPointType(type)) { GenerateCompareTestAndBranch(condition, true_target, false_target); return; } @@ -1728,7 +1728,7 @@ void InstructionCodeGeneratorX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFla static bool SelectCanUseCMOV(HSelect* select) { // There are no conditional move instructions for XMMs. - if (Primitive::IsFloatingPointType(select->GetType())) { + if (DataType::IsFloatingPointType(select->GetType())) { return false; } @@ -1736,9 +1736,9 @@ static bool SelectCanUseCMOV(HSelect* select) { // In 32 bit mode, a long condition doesn't generate a single CC either. HInstruction* condition = select->GetCondition(); if (condition->IsCondition()) { - Primitive::Type compare_type = condition->InputAt(0)->GetType(); - if (compare_type == Primitive::kPrimLong || - Primitive::IsFloatingPointType(compare_type)) { + DataType::Type compare_type = condition->InputAt(0)->GetType(); + if (compare_type == DataType::Type::kInt64 || + DataType::IsFloatingPointType(compare_type)) { return false; } } @@ -1749,7 +1749,7 @@ static bool SelectCanUseCMOV(HSelect* select) { void LocationsBuilderX86::VisitSelect(HSelect* select) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); - if (Primitive::IsFloatingPointType(select->GetType())) { + if (DataType::IsFloatingPointType(select->GetType())) { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); } else { @@ -1797,8 +1797,8 @@ void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) { } } else { // We can't handle FP or long here. - DCHECK_NE(condition->InputAt(0)->GetType(), Primitive::kPrimLong); - DCHECK(!Primitive::IsFloatingPointType(condition->InputAt(0)->GetType())); + DCHECK_NE(condition->InputAt(0)->GetType(), DataType::Type::kInt64); + DCHECK(!DataType::IsFloatingPointType(condition->InputAt(0)->GetType())); LocationSummary* cond_locations = condition->GetLocations(); codegen_->GenerateIntCompare(cond_locations->InAt(0), cond_locations->InAt(1)); cond = X86Condition(condition->GetCondition()); @@ -1812,7 +1812,7 @@ void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) { // If the condition is true, overwrite the output, which already contains false. Location false_loc = locations->InAt(0); Location true_loc = locations->InAt(1); - if (select->GetType() == Primitive::kPrimLong) { + if (select->GetType() == DataType::Type::kInt64) { // 64 bit conditional move. Register false_high = false_loc.AsRegisterPairHigh<Register>(); Register false_low = false_loc.AsRegisterPairLow<Register>(); @@ -1858,7 +1858,7 @@ void LocationsBuilderX86::HandleCondition(HCondition* cond) { new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); // Handle the long/FP comparisons made in instruction simplification. switch (cond->InputAt(0)->GetType()) { - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); if (!cond->IsEmittedAtUseSite()) { @@ -1866,8 +1866,8 @@ void LocationsBuilderX86::HandleCondition(HCondition* cond) { } break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); if (cond->InputAt(1)->IsX86LoadFromConstantTable()) { DCHECK(cond->InputAt(1)->IsEmittedAtUseSite()); @@ -1913,14 +1913,14 @@ void InstructionCodeGeneratorX86::HandleCondition(HCondition* cond) { __ setb(X86Condition(cond->GetCondition()), reg); return; } - case Primitive::kPrimLong: + case DataType::Type::kInt64: GenerateLongComparesAndJumps(cond, &true_label, &false_label); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: GenerateFPCompare(lhs, rhs, cond, false); GenerateFPJumps(cond, &true_label, &false_label); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: GenerateFPCompare(lhs, rhs, cond, true); GenerateFPJumps(cond, &true_label, &false_label); break; @@ -2099,22 +2099,22 @@ void LocationsBuilderX86::VisitReturn(HReturn* ret) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall); switch (ret->InputAt(0)->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: locations->SetInAt(0, Location::RegisterLocation(EAX)); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: locations->SetInAt( 0, Location::RegisterPairLocation(EAX, EDX)); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt( 0, Location::FpuRegisterLocation(XMM0)); break; @@ -2127,22 +2127,22 @@ void LocationsBuilderX86::VisitReturn(HReturn* ret) { void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) { if (kIsDebugBuild) { switch (ret->InputAt(0)->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX); DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0); break; @@ -2298,20 +2298,20 @@ void LocationsBuilderX86::VisitNeg(HNeg* neg) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresFpuRegister()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresFpuRegister()); @@ -2327,13 +2327,13 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) { Location out = locations->Out(); Location in = locations->InAt(0); switch (neg->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK(in.IsRegister()); DCHECK(in.Equals(out)); __ negl(out.AsRegister<Register>()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK(in.IsRegisterPair()); DCHECK(in.Equals(out)); __ negl(out.AsRegisterPairLow<Register>()); @@ -2346,7 +2346,7 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) { __ negl(out.AsRegisterPairHigh<Register>()); break; - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { DCHECK(in.Equals(out)); Register constant = locations->GetTemp(0).AsRegister<Register>(); XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); @@ -2359,7 +2359,7 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { DCHECK(in.Equals(out)); XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); // Implement double negation with an exclusive or with value @@ -2378,7 +2378,7 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) { void LocationsBuilderX86::VisitX86FPNeg(HX86FPNeg* neg) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); - DCHECK(Primitive::IsFloatingPointType(neg->GetType())); + DCHECK(DataType::IsFloatingPointType(neg->GetType())); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); @@ -2392,7 +2392,7 @@ void InstructionCodeGeneratorX86::VisitX86FPNeg(HX86FPNeg* neg) { Register constant_area = locations->InAt(1).AsRegister<Register>(); XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); - if (neg->GetType() == Primitive::kPrimFloat) { + if (neg->GetType() == DataType::Type::kFloat32) { __ movss(mask, codegen_->LiteralInt32Address(INT32_C(0x80000000), neg->GetBaseMethodAddress(), constant_area)); @@ -2406,15 +2406,15 @@ void InstructionCodeGeneratorX86::VisitX86FPNeg(HX86FPNeg* neg) { } void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); DCHECK_NE(result_type, input_type); // The float-to-long and double-to-long type conversions rely on a // call to the runtime. LocationSummary::CallKind call_kind = - ((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble) - && result_type == Primitive::kPrimLong) + ((input_type == DataType::Type::kFloat32 || input_type == DataType::Type::kFloat64) + && result_type == DataType::Type::kInt64) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; LocationSummary* locations = @@ -2424,9 +2424,9 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { // our bit representation makes it safe. switch (result_type) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: switch (input_type) { - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { // Type conversion from long to byte is a result of code transformations. HInstruction* input = conversion->InputAt(0); Location input_location = input->IsConstant() @@ -2438,11 +2438,11 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); break; } - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-byte' instruction. locations->SetInAt(0, Location::ByteRegisterOrConstant(ECX, conversion->InputAt(0))); // Make the output overlap to please the register allocator. This greatly simplifies @@ -2456,15 +2456,15 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to short is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-short' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -2476,22 +2476,22 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-int' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); locations->AddTemp(Location::RequiresFpuRegister()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); @@ -2504,21 +2504,21 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-long' instruction. locations->SetInAt(0, Location::RegisterLocation(EAX)); locations->SetOut(Location::RegisterPairLocation(EAX, EDX)); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { // Processing a Dex `float-to-long' or 'double-to-long' instruction. InvokeRuntimeCallingConvention calling_convention; XmmRegister parameter = calling_convention.GetFpuRegisterAt(0); @@ -2535,15 +2535,15 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to char is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: // Processing a Dex `int-to-char' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -2555,26 +2555,26 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-float' instruction. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-float' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::Any()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-float' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -2586,26 +2586,26 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { }; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-double' instruction. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-double' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::Any()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-double' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -2627,13 +2627,13 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio LocationSummary* locations = conversion->GetLocations(); Location out = locations->Out(); Location in = locations->InAt(0); - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); DCHECK_NE(result_type, input_type); switch (result_type) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to byte is a result of code transformations. if (in.IsRegisterPair()) { __ movsxb(out.AsRegister<Register>(), in.AsRegisterPairLow<ByteRegister>()); @@ -2643,11 +2643,11 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio __ movl(out.AsRegister<Register>(), Immediate(static_cast<int8_t>(value))); } break; - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-byte' instruction. if (in.IsRegister()) { __ movsxb(out.AsRegister<Register>(), in.AsRegister<ByteRegister>()); @@ -2664,9 +2664,9 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to short is a result of code transformations. if (in.IsRegisterPair()) { __ movsxw(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>()); @@ -2678,11 +2678,11 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio __ movl(out.AsRegister<Register>(), Immediate(static_cast<int16_t>(value))); } break; - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-short' instruction. if (in.IsRegister()) { __ movsxw(out.AsRegister<Register>(), in.AsRegister<Register>()); @@ -2701,9 +2701,9 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-int' instruction. if (in.IsRegisterPair()) { __ movl(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>()); @@ -2717,7 +2717,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio } break; - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { // Processing a Dex `float-to-int' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); Register output = out.AsRegister<Register>(); @@ -2742,7 +2742,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { // Processing a Dex `double-to-int' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); Register output = out.AsRegister<Register>(); @@ -2773,14 +2773,14 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-long' instruction. DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX); DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX); @@ -2788,13 +2788,13 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio __ cdq(); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-long' instruction. codegen_->InvokeRuntime(kQuickF2l, conversion, conversion->GetDexPc()); CheckEntrypointTypes<kQuickF2l, int64_t, float>(); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-long' instruction. codegen_->InvokeRuntime(kQuickD2l, conversion, conversion->GetDexPc()); CheckEntrypointTypes<kQuickD2l, int64_t, double>(); @@ -2806,9 +2806,9 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio } break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to short is a result of code transformations. if (in.IsRegisterPair()) { __ movzxw(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>()); @@ -2820,11 +2820,11 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint16_t>(value))); } break; - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: // Processing a Dex `Process a Dex `int-to-char'' instruction. if (in.IsRegister()) { __ movzxw(out.AsRegister<Register>(), in.AsRegister<Register>()); @@ -2843,19 +2843,19 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-float' instruction. __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<Register>()); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { // Processing a Dex `long-to-float' instruction. size_t adjustment = 0; @@ -2863,7 +2863,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio // InstructionCodeGeneratorX86::PushOntoFPStack and/or X86Assembler::fstps below. // TODO: enhance register allocator to ask for stack temporaries. if (!in.IsDoubleStackSlot() || !out.IsStackSlot()) { - adjustment = Primitive::ComponentSize(Primitive::kPrimLong); + adjustment = DataType::Size(DataType::Type::kInt64); __ subl(ESP, Immediate(adjustment)); } @@ -2885,7 +2885,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio break; } - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-float' instruction. __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); break; @@ -2896,19 +2896,19 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio }; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-double' instruction. __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<Register>()); break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { // Processing a Dex `long-to-double' instruction. size_t adjustment = 0; @@ -2916,7 +2916,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio // InstructionCodeGeneratorX86::PushOntoFPStack and/or X86Assembler::fstpl below. // TODO: enhance register allocator to ask for stack temporaries. if (!in.IsDoubleStackSlot() || !out.IsDoubleStackSlot()) { - adjustment = Primitive::ComponentSize(Primitive::kPrimLong); + adjustment = DataType::Size(DataType::Type::kInt64); __ subl(ESP, Immediate(adjustment)); } @@ -2938,7 +2938,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio break; } - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-double' instruction. __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); break; @@ -2959,22 +2959,22 @@ void LocationsBuilderX86::VisitAdd(HAdd* add) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); switch (add->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); if (add->InputAt(1)->IsX86LoadFromConstantTable()) { DCHECK(add->InputAt(1)->IsEmittedAtUseSite()); @@ -3000,7 +3000,7 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) { Location out = locations->Out(); switch (add->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { if (second.IsRegister()) { if (out.AsRegister<Register>() == first.AsRegister<Register>()) { __ addl(out.AsRegister<Register>(), second.AsRegister<Register>()); @@ -3024,7 +3024,7 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (second.IsRegisterPair()) { __ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>()); __ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>()); @@ -3041,7 +3041,7 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (second.IsFpuRegister()) { __ addss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (add->InputAt(1)->IsX86LoadFromConstantTable()) { @@ -3059,7 +3059,7 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (second.IsFpuRegister()) { __ addsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (add->InputAt(1)->IsX86LoadFromConstantTable()) { @@ -3086,15 +3086,15 @@ void LocationsBuilderX86::VisitSub(HSub* sub) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); switch (sub->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); if (sub->InputAt(1)->IsX86LoadFromConstantTable()) { DCHECK(sub->InputAt(1)->IsEmittedAtUseSite()); @@ -3118,7 +3118,7 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) { Location second = locations->InAt(1); DCHECK(first.Equals(locations->Out())); switch (sub->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { if (second.IsRegister()) { __ subl(first.AsRegister<Register>(), second.AsRegister<Register>()); } else if (second.IsConstant()) { @@ -3130,7 +3130,7 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (second.IsRegisterPair()) { __ subl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>()); __ sbbl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>()); @@ -3147,7 +3147,7 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (second.IsFpuRegister()) { __ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (sub->InputAt(1)->IsX86LoadFromConstantTable()) { @@ -3165,7 +3165,7 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (second.IsFpuRegister()) { __ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (sub->InputAt(1)->IsX86LoadFromConstantTable()) { @@ -3192,7 +3192,7 @@ void LocationsBuilderX86::VisitMul(HMul* mul) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); if (mul->InputAt(1)->IsIntConstant()) { @@ -3202,7 +3202,7 @@ void LocationsBuilderX86::VisitMul(HMul* mul) { locations->SetOut(Location::SameAsFirstInput()); } break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); @@ -3211,8 +3211,8 @@ void LocationsBuilderX86::VisitMul(HMul* mul) { locations->AddTemp(Location::RegisterLocation(EDX)); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); if (mul->InputAt(1)->IsX86LoadFromConstantTable()) { DCHECK(mul->InputAt(1)->IsEmittedAtUseSite()); @@ -3237,7 +3237,7 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) { Location out = locations->Out(); switch (mul->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: // The constant may have ended up in a register, so test explicitly to avoid // problems where the output may not be the same as the first operand. if (mul->InputAt(1)->IsIntConstant()) { @@ -3253,7 +3253,7 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) { } break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register in1_hi = first.AsRegisterPairHigh<Register>(); Register in1_lo = first.AsRegisterPairLow<Register>(); Register eax = locations->GetTemp(0).AsRegister<Register>(); @@ -3335,7 +3335,7 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { DCHECK(first.Equals(locations->Out())); if (second.IsFpuRegister()) { __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); @@ -3354,7 +3354,7 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { DCHECK(first.Equals(locations->Out())); if (second.IsFpuRegister()) { __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); @@ -3420,9 +3420,9 @@ void InstructionCodeGeneratorX86::PushOntoFPStack(Location source, } void InstructionCodeGeneratorX86::GenerateRemFP(HRem *rem) { - Primitive::Type type = rem->GetResultType(); - bool is_float = type == Primitive::kPrimFloat; - size_t elem_size = Primitive::ComponentSize(type); + DataType::Type type = rem->GetResultType(); + bool is_float = type == DataType::Type::kFloat32; + size_t elem_size = DataType::Size(type); LocationSummary* locations = rem->GetLocations(); Location first = locations->InAt(0); Location second = locations->InAt(1); @@ -3599,7 +3599,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr bool is_div = instruction->IsDiv(); switch (instruction->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { DCHECK_EQ(EAX, first.AsRegister<Register>()); DCHECK_EQ(is_div ? EAX : EDX, out.AsRegister<Register>()); @@ -3638,7 +3638,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { InvokeRuntimeCallingConvention calling_convention; DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>()); DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>()); @@ -3663,13 +3663,13 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr } void LocationsBuilderX86::VisitDiv(HDiv* div) { - LocationSummary::CallKind call_kind = (div->GetResultType() == Primitive::kPrimLong) + LocationSummary::CallKind call_kind = (div->GetResultType() == DataType::Type::kInt64) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind); switch (div->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RegisterLocation(EAX)); locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1))); locations->SetOut(Location::SameAsFirstInput()); @@ -3683,7 +3683,7 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterPairLocation( calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); @@ -3693,8 +3693,8 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) { locations->SetOut(Location::RegisterPairLocation(EAX, EDX)); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); if (div->InputAt(1)->IsX86LoadFromConstantTable()) { DCHECK(div->InputAt(1)->IsEmittedAtUseSite()); @@ -3718,13 +3718,13 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) { Location second = locations->InAt(1); switch (div->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GenerateDivRemIntegral(div); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (second.IsFpuRegister()) { __ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (div->InputAt(1)->IsX86LoadFromConstantTable()) { @@ -3742,7 +3742,7 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (second.IsFpuRegister()) { __ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (div->InputAt(1)->IsX86LoadFromConstantTable()) { @@ -3766,15 +3766,15 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) { } void LocationsBuilderX86::VisitRem(HRem* rem) { - Primitive::Type type = rem->GetResultType(); + DataType::Type type = rem->GetResultType(); - LocationSummary::CallKind call_kind = (rem->GetResultType() == Primitive::kPrimLong) + LocationSummary::CallKind call_kind = (rem->GetResultType() == DataType::Type::kInt64) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RegisterLocation(EAX)); locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1))); locations->SetOut(Location::RegisterLocation(EDX)); @@ -3786,7 +3786,7 @@ void LocationsBuilderX86::VisitRem(HRem* rem) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterPairLocation( calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); @@ -3796,8 +3796,8 @@ void LocationsBuilderX86::VisitRem(HRem* rem) { locations->SetOut(Location::RegisterPairLocation(EAX, EDX)); break; } - case Primitive::kPrimDouble: - case Primitive::kPrimFloat: { + case DataType::Type::kFloat64: + case DataType::Type::kFloat32: { locations->SetInAt(0, Location::Any()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); @@ -3811,15 +3811,15 @@ void LocationsBuilderX86::VisitRem(HRem* rem) { } void InstructionCodeGeneratorX86::VisitRem(HRem* rem) { - Primitive::Type type = rem->GetResultType(); + DataType::Type type = rem->GetResultType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GenerateDivRemIntegral(rem); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { GenerateRemFP(rem); break; } @@ -3831,15 +3831,15 @@ void InstructionCodeGeneratorX86::VisitRem(HRem* rem) { void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); switch (instruction->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { locations->SetInAt(0, Location::Any()); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); if (!instruction->IsConstant()) { locations->AddTemp(Location::RequiresRegister()); @@ -3859,11 +3859,11 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) Location value = locations->InAt(0); switch (instruction->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { if (value.IsRegister()) { __ testl(value.AsRegister<Register>(), value.AsRegister<Register>()); __ j(kEqual, slow_path->GetEntryLabel()); @@ -3878,7 +3878,7 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (value.IsRegisterPair()) { Register temp = locations->GetTemp(0).AsRegister<Register>(); __ movl(temp, value.AsRegisterPairLow<Register>()); @@ -3904,8 +3904,8 @@ void LocationsBuilderX86::HandleShift(HBinaryOperation* op) { new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall); switch (op->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { // Can't have Location::Any() and output SameAsFirstInput() locations->SetInAt(0, Location::RequiresRegister()); // The shift count needs to be in CL or a constant. @@ -3927,7 +3927,7 @@ void InstructionCodeGeneratorX86::HandleShift(HBinaryOperation* op) { DCHECK(first.Equals(locations->Out())); switch (op->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { DCHECK(first.IsRegister()); Register first_reg = first.AsRegister<Register>(); if (second.IsRegister()) { @@ -3956,7 +3956,7 @@ void InstructionCodeGeneratorX86::HandleShift(HBinaryOperation* op) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (second.IsRegister()) { Register second_reg = second.AsRegister<Register>(); DCHECK_EQ(ECX, second_reg); @@ -4000,10 +4000,10 @@ void InstructionCodeGeneratorX86::GenerateShlLong(const Location& loc, int shift codegen_->EmitParallelMoves( loc.ToLow(), loc.ToHigh(), - Primitive::kPrimInt, + DataType::Type::kInt32, Location::ConstantLocation(GetGraph()->GetIntConstant(0)), loc.ToLow(), - Primitive::kPrimInt); + DataType::Type::kInt32); } else if (shift > 32) { // Low part becomes 0. High part is low part << (shift-32). __ movl(high, low); @@ -4067,10 +4067,10 @@ void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, int shif codegen_->EmitParallelMoves( loc.ToHigh(), loc.ToLow(), - Primitive::kPrimInt, + DataType::Type::kInt32, Location::ConstantLocation(GetGraph()->GetIntConstant(0)), loc.ToHigh(), - Primitive::kPrimInt); + DataType::Type::kInt32); } else if (shift > 32) { // Low part is high >> (shift - 32). High part becomes 0. __ movl(low, high); @@ -4099,11 +4099,11 @@ void LocationsBuilderX86::VisitRor(HRor* ror) { new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall); switch (ror->GetResultType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Add the temporary needed. locations->AddTemp(Location::RequiresRegister()); FALLTHROUGH_INTENDED; - case Primitive::kPrimInt: + case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresRegister()); // The shift count needs to be in CL (unless it is a constant). locations->SetInAt(1, Location::ByteRegisterOrConstant(ECX, ror->InputAt(1))); @@ -4120,7 +4120,7 @@ void InstructionCodeGeneratorX86::VisitRor(HRor* ror) { Location first = locations->InAt(0); Location second = locations->InAt(1); - if (ror->GetResultType() == Primitive::kPrimInt) { + if (ror->GetResultType() == DataType::Type::kInt32) { Register first_reg = first.AsRegister<Register>(); if (second.IsRegister()) { Register second_reg = second.AsRegister<Register>(); @@ -4132,7 +4132,7 @@ void InstructionCodeGeneratorX86::VisitRor(HRor* ror) { return; } - DCHECK_EQ(ror->GetResultType(), Primitive::kPrimLong); + DCHECK_EQ(ror->GetResultType(), DataType::Type::kInt64); Register first_reg_lo = first.AsRegisterPairLow<Register>(); Register first_reg_hi = first.AsRegisterPairHigh<Register>(); Register temp_reg = locations->GetTemp(0).AsRegister<Register>(); @@ -4315,11 +4315,11 @@ void InstructionCodeGeneratorX86::VisitNot(HNot* not_) { Location out = locations->Out(); DCHECK(in.Equals(out)); switch (not_->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ notl(out.AsRegister<Register>()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ notl(out.AsRegisterPairLow<Register>()); __ notl(out.AsRegisterPairHigh<Register>()); break; @@ -4348,19 +4348,19 @@ void LocationsBuilderX86::VisitCompare(HCompare* compare) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); switch (compare->InputAt(0)->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); if (compare->InputAt(1)->IsX86LoadFromConstantTable()) { DCHECK(compare->InputAt(1)->IsEmittedAtUseSite()); @@ -4387,15 +4387,15 @@ void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) { Condition less_cond = kLess; switch (compare->InputAt(0)->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: { codegen_->GenerateIntCompare(left, right); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register left_low = left.AsRegisterPairLow<Register>(); Register left_high = left.AsRegisterPairHigh<Register>(); int32_t val_low = 0; @@ -4431,13 +4431,13 @@ void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) { less_cond = kBelow; // for CF (unsigned). break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { GenerateFPCompare(left, right, compare, false); __ j(kUnordered, compare->IsGtBias() ? &greater : &less); less_cond = kBelow; // for CF (floats). break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { GenerateFPCompare(left, right, compare, true); __ j(kUnordered, compare->IsGtBias() ? &greater : &less); less_cond = kBelow; // for CF (floats). @@ -4744,7 +4744,7 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, kEmitCompilerReadBarrier ? @@ -4755,7 +4755,7 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI } locations->SetInAt(0, Location::RequiresRegister()); - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister()); } else { // The output overlaps in case of long: we don't want the low move @@ -4765,12 +4765,12 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI // the read barrier. locations->SetOut( Location::RequiresRegister(), - (object_field_get_with_read_barrier || instruction->GetType() == Primitive::kPrimLong) ? + (object_field_get_with_read_barrier || instruction->GetType() == DataType::Type::kInt64) ? Location::kOutputOverlap : Location::kNoOutputOverlap); } - if (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong)) { + if (field_info.IsVolatile() && (field_info.GetFieldType() == DataType::Type::kInt64)) { // Long values can be loaded atomically into an XMM using movsd. // So we use an XMM register as a temp to achieve atomicity (first // load the temp into the XMM and then copy the XMM into the @@ -4788,35 +4788,35 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction, Register base = base_loc.AsRegister<Register>(); Location out = locations->Out(); bool is_volatile = field_info.IsVolatile(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); switch (field_type) { - case Primitive::kPrimBoolean: { + case DataType::Type::kBool: { __ movzxb(out.AsRegister<Register>(), Address(base, offset)); break; } - case Primitive::kPrimByte: { + case DataType::Type::kInt8: { __ movsxb(out.AsRegister<Register>(), Address(base, offset)); break; } - case Primitive::kPrimShort: { + case DataType::Type::kInt16: { __ movsxw(out.AsRegister<Register>(), Address(base, offset)); break; } - case Primitive::kPrimChar: { + case DataType::Type::kUint16: { __ movzxw(out.AsRegister<Register>(), Address(base, offset)); break; } - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ movl(out.AsRegister<Register>(), Address(base, offset)); break; - case Primitive::kPrimNot: { + case DataType::Type::kReference: { // /* HeapReference<Object> */ out = *(base + offset) if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this @@ -4840,7 +4840,7 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction, break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (is_volatile) { XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); __ movsd(temp, Address(base, offset)); @@ -4857,22 +4857,22 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction, break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { __ movss(out.AsFpuRegister<XmmRegister>(), Address(base, offset)); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { __ movsd(out.AsFpuRegister<XmmRegister>(), Address(base, offset)); break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << field_type; UNREACHABLE(); } - if (field_type == Primitive::kPrimNot || field_type == Primitive::kPrimLong) { + if (field_type == DataType::Type::kReference || field_type == DataType::Type::kInt64) { // Potential implicit null checks, in the case of reference or // long fields, are handled in the previous switch statement. } else { @@ -4880,7 +4880,7 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction, } if (is_volatile) { - if (field_type == Primitive::kPrimNot) { + if (field_type == DataType::Type::kReference) { // Memory barriers, in the case of references, are also handled // in the previous switch statement. } else { @@ -4896,23 +4896,23 @@ void LocationsBuilderX86::HandleFieldSet(HInstruction* instruction, const FieldI new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); bool is_volatile = field_info.IsVolatile(); - Primitive::Type field_type = field_info.GetFieldType(); - bool is_byte_type = (field_type == Primitive::kPrimBoolean) - || (field_type == Primitive::kPrimByte); + DataType::Type field_type = field_info.GetFieldType(); + bool is_byte_type = (field_type == DataType::Type::kBool) + || (field_type == DataType::Type::kInt8); // The register allocator does not support multiple // inputs that die at entry with one in a specific register. if (is_byte_type) { // Ensure the value is in a byte register. locations->SetInAt(1, Location::RegisterLocation(EAX)); - } else if (Primitive::IsFloatingPointType(field_type)) { - if (is_volatile && field_type == Primitive::kPrimDouble) { + } else if (DataType::IsFloatingPointType(field_type)) { + if (is_volatile && field_type == DataType::Type::kFloat64) { // In order to satisfy the semantics of volatile, this must be a single instruction store. locations->SetInAt(1, Location::RequiresFpuRegister()); } else { locations->SetInAt(1, Location::FpuRegisterOrConstant(instruction->InputAt(1))); } - } else if (is_volatile && field_type == Primitive::kPrimLong) { + } else if (is_volatile && field_type == DataType::Type::kInt64) { // In order to satisfy the semantics of volatile, this must be a single instruction store. locations->SetInAt(1, Location::RequiresRegister()); @@ -4944,7 +4944,7 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, Register base = locations->InAt(0).AsRegister<Register>(); Location value = locations->InAt(1); bool is_volatile = field_info.IsVolatile(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); @@ -4956,14 +4956,14 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, bool maybe_record_implicit_null_check_done = false; switch (field_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: { + case DataType::Type::kBool: + case DataType::Type::kInt8: { __ movb(Address(base, offset), value.AsRegister<ByteRegister>()); break; } - case Primitive::kPrimShort: - case Primitive::kPrimChar: { + case DataType::Type::kInt16: + case DataType::Type::kUint16: { if (value.IsConstant()) { __ movw(Address(base, offset), Immediate(CodeGenerator::GetInt16ValueOf(value.GetConstant()))); @@ -4973,13 +4973,13 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimInt: - case Primitive::kPrimNot: { + case DataType::Type::kInt32: + case DataType::Type::kReference: { if (kPoisonHeapReferences && needs_write_barrier) { // Note that in the case where `value` is a null reference, // we do not enter this block, as the reference does not // need poisoning. - DCHECK_EQ(field_type, Primitive::kPrimNot); + DCHECK_EQ(field_type, DataType::Type::kReference); Register temp = locations->GetTemp(0).AsRegister<Register>(); __ movl(temp, value.AsRegister<Register>()); __ PoisonHeapReference(temp); @@ -4994,7 +4994,7 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (is_volatile) { XmmRegister temp1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); XmmRegister temp2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); @@ -5017,7 +5017,7 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (value.IsConstant()) { int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); __ movl(Address(base, offset), Immediate(v)); @@ -5027,7 +5027,7 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (value.IsConstant()) { int64_t v = CodeGenerator::GetInt64ValueOf(value.GetConstant()); __ movl(Address(base, offset), Immediate(Low32Bits(v))); @@ -5040,7 +5040,7 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << field_type; UNREACHABLE(); } @@ -5205,7 +5205,7 @@ void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) { void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, object_array_get_with_read_barrier ? @@ -5216,7 +5216,7 @@ void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) { } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { // The output overlaps in case of long: we don't want the low move @@ -5226,9 +5226,9 @@ void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) { // the read barrier. locations->SetOut( Location::RequiresRegister(), - (instruction->GetType() == Primitive::kPrimLong || object_array_get_with_read_barrier) ? - Location::kOutputOverlap : - Location::kNoOutputOverlap); + (instruction->GetType() == DataType::Type::kInt64 || object_array_get_with_read_barrier) + ? Location::kOutputOverlap + : Location::kNoOutputOverlap); } } @@ -5240,27 +5240,27 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { Location out_loc = locations->Out(); uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); switch (type) { - case Primitive::kPrimBoolean: { + case DataType::Type::kBool: { Register out = out_loc.AsRegister<Register>(); __ movzxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset)); break; } - case Primitive::kPrimByte: { + case DataType::Type::kInt8: { Register out = out_loc.AsRegister<Register>(); __ movsxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset)); break; } - case Primitive::kPrimShort: { + case DataType::Type::kInt16: { Register out = out_loc.AsRegister<Register>(); __ movsxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset)); break; } - case Primitive::kPrimChar: { + case DataType::Type::kUint16: { Register out = out_loc.AsRegister<Register>(); if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { // Branch cases into compressed and uncompressed for each index's type. @@ -5284,13 +5284,13 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { Register out = out_loc.AsRegister<Register>(); __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset)); break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { static_assert( sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); @@ -5320,7 +5320,7 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { DCHECK_NE(obj, out_loc.AsRegisterPairLow<Register>()); __ movl(out_loc.AsRegisterPairLow<Register>(), CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset)); @@ -5330,24 +5330,24 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { XmmRegister out = out_loc.AsFpuRegister<XmmRegister>(); __ movss(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset)); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { XmmRegister out = out_loc.AsFpuRegister<XmmRegister>(); __ movsd(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset)); break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } - if (type == Primitive::kPrimNot || type == Primitive::kPrimLong) { + if (type == DataType::Type::kReference || type == DataType::Type::kInt64) { // Potential implicit null checks, in the case of reference or // long arrays, are handled in the previous switch statement. } else { @@ -5356,7 +5356,7 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { } void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) { - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); @@ -5368,8 +5368,8 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) { LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); - bool is_byte_type = (value_type == Primitive::kPrimBoolean) - || (value_type == Primitive::kPrimByte); + bool is_byte_type = (value_type == DataType::Type::kBool) + || (value_type == DataType::Type::kInt8); // We need the inputs to be different than the output in case of long operation. // In case of a byte operation, the register allocator does not support multiple // inputs that die at entry with one in a specific register. @@ -5378,7 +5378,7 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) { if (is_byte_type) { // Ensure the value is in a byte register. locations->SetInAt(2, Location::ByteRegisterOrConstant(EAX, instruction->InputAt(2))); - } else if (Primitive::IsFloatingPointType(value_type)) { + } else if (DataType::IsFloatingPointType(value_type)) { locations->SetInAt(2, Location::FpuRegisterOrConstant(instruction->InputAt(2))); } else { locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2))); @@ -5397,7 +5397,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { Register array = array_loc.AsRegister<Register>(); Location index = locations->InAt(1); Location value = locations->InAt(2); - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); @@ -5406,8 +5406,8 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); switch (value_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: { + case DataType::Type::kBool: + case DataType::Type::kInt8: { uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_1, offset); if (value.IsRegister()) { @@ -5419,8 +5419,8 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimShort: - case Primitive::kPrimChar: { + case DataType::Type::kInt16: + case DataType::Type::kUint16: { uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_2, offset); if (value.IsRegister()) { @@ -5432,7 +5432,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset); @@ -5528,7 +5528,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset); if (value.IsRegister()) { @@ -5542,7 +5542,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); if (value.IsRegisterPair()) { __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset), @@ -5562,7 +5562,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset); if (value.IsFpuRegister()) { @@ -5576,7 +5576,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, offset); if (value.IsFpuRegister()) { @@ -5593,7 +5593,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << instruction->GetType(); UNREACHABLE(); } @@ -5803,7 +5803,7 @@ void ParallelMoveResolverX86::EmitMove(size_t index) { __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegister<Register>()); } } else if (source.IsRegisterPair()) { - size_t elem_size = Primitive::ComponentSize(Primitive::kPrimInt); + size_t elem_size = DataType::Size(DataType::Type::kInt32); // Create stack space for 2 elements. __ subl(ESP, Immediate(2 * elem_size)); __ movl(Address(ESP, 0), source.AsRegisterPairLow<Register>()); @@ -6957,8 +6957,8 @@ void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(i void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); - DCHECK(instruction->GetResultType() == Primitive::kPrimInt - || instruction->GetResultType() == Primitive::kPrimLong); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32 + || instruction->GetResultType() == DataType::Type::kInt64); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); @@ -6982,7 +6982,7 @@ void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instr Location second = locations->InAt(1); DCHECK(first.Equals(locations->Out())); - if (instruction->GetResultType() == Primitive::kPrimInt) { + if (instruction->GetResultType() == DataType::Type::kInt32) { if (second.IsRegister()) { if (instruction->IsAnd()) { __ andl(first.AsRegister<Register>(), second.AsRegister<Register>()); @@ -7015,7 +7015,7 @@ void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instr } } } else { - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); if (second.IsRegisterPair()) { if (instruction->IsAnd()) { __ andl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>()); @@ -7557,12 +7557,12 @@ void LocationsBuilderX86::VisitX86LoadFromConstantTable( } switch (insn->GetType()) { - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: locations->SetOut(Location::RequiresRegister()); break; @@ -7582,19 +7582,19 @@ void InstructionCodeGeneratorX86::VisitX86LoadFromConstantTable(HX86LoadFromCons HConstant *value = insn->GetConstant(); switch (insn->GetType()) { - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: __ movss(out.AsFpuRegister<XmmRegister>(), codegen_->LiteralFloatAddress( value->AsFloatConstant()->GetValue(), insn->GetBaseMethodAddress(), const_area)); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: __ movsd(out.AsFpuRegister<XmmRegister>(), codegen_->LiteralDoubleAddress( value->AsDoubleConstant()->GetValue(), insn->GetBaseMethodAddress(), const_area)); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ movl(out.AsRegister<Register>(), codegen_->LiteralInt32Address( value->AsIntConstant()->GetValue(), insn->GetBaseMethodAddress(), const_area)); @@ -7787,13 +7787,13 @@ Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr, } // TODO: target as memory. -void CodeGeneratorX86::MoveFromReturnRegister(Location target, Primitive::Type type) { +void CodeGeneratorX86::MoveFromReturnRegister(Location target, DataType::Type type) { if (!target.IsValid()) { - DCHECK_EQ(type, Primitive::kPrimVoid); + DCHECK_EQ(type, DataType::Type::kVoid); return; } - DCHECK_NE(type, Primitive::kPrimVoid); + DCHECK_NE(type, DataType::Type::kVoid); Location return_loc = InvokeDexCallingConventionVisitorX86().GetReturnLocation(type); if (target.Equals(return_loc)) { @@ -7802,10 +7802,10 @@ void CodeGeneratorX86::MoveFromReturnRegister(Location target, Primitive::Type t // TODO: Consider pairs in the parallel move resolver, then this could be nicely merged // with the else branch. - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { HParallelMove parallel_move(GetGraph()->GetArena()); - parallel_move.AddMove(return_loc.ToLow(), target.ToLow(), Primitive::kPrimInt, nullptr); - parallel_move.AddMove(return_loc.ToHigh(), target.ToHigh(), Primitive::kPrimInt, nullptr); + parallel_move.AddMove(return_loc.ToLow(), target.ToLow(), DataType::Type::kInt32, nullptr); + parallel_move.AddMove(return_loc.ToHigh(), target.ToHigh(), DataType::Type::kInt32, nullptr); GetMoveResolver()->EmitNativeCode(¶llel_move); } else { // Let the parallel move resolver take care of all of this. diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index e8f919d122..fb61e75d73 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -83,8 +83,8 @@ class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVi InvokeDexCallingConventionVisitorX86() {} virtual ~InvokeDexCallingConventionVisitorX86() {} - Location GetNextLocation(Primitive::Type type) OVERRIDE; - Location GetReturnLocation(Primitive::Type type) const OVERRIDE; + Location GetNextLocation(DataType::Type type) OVERRIDE; + Location GetReturnLocation(DataType::Type type) const OVERRIDE; Location GetMethodLocation() const OVERRIDE; private: @@ -103,13 +103,13 @@ class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention { Location GetFieldIndexLocation() const OVERRIDE { return Location::RegisterLocation(EAX); } - Location GetReturnLocation(Primitive::Type type) const OVERRIDE { - return Primitive::Is64BitType(type) + Location GetReturnLocation(DataType::Type type) const OVERRIDE { + return DataType::Is64BitType(type) ? Location::RegisterPairLocation(EAX, EDX) : Location::RegisterLocation(EAX); } - Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE { - return Primitive::Is64BitType(type) + Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE { + return DataType::Is64BitType(type) ? (is_instance ? Location::RegisterPairLocation(EDX, EBX) : Location::RegisterPairLocation(ECX, EDX)) @@ -117,7 +117,7 @@ class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention { ? Location::RegisterLocation(EDX) : Location::RegisterLocation(ECX)); } - Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return Location::FpuRegisterLocation(XMM0); } @@ -321,7 +321,7 @@ class CodeGeneratorX86 : public CodeGenerator { void GenerateFrameExit() OVERRIDE; void Bind(HBasicBlock* block) OVERRIDE; void MoveConstant(Location destination, int32_t value) OVERRIDE; - void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; @@ -428,7 +428,7 @@ class CodeGeneratorX86 : public CodeGenerator { dex::TypeIndex dex_index, Handle<mirror::Class> handle); - void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE; + void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE; // Emit linker patches. void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; @@ -456,8 +456,8 @@ class CodeGeneratorX86 : public CodeGenerator { block_labels_ = CommonInitializeLabels<Label>(); } - bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE { - return type == Primitive::kPrimLong; + bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE { + return type == DataType::Type::kInt64; } bool ShouldSplitLongMoves() const OVERRIDE { return true; } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 65b3f62104..42704e9fe1 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -106,12 +106,12 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCode { class DivRemMinusOneSlowPathX86_64 : public SlowPathCode { public: - DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, Primitive::Type type, bool is_div) + DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, DataType::Type type, bool is_div) : SlowPathCode(at), cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { __ Bind(GetEntryLabel()); - if (type_ == Primitive::kPrimInt) { + if (type_ == DataType::Type::kInt32) { if (is_div_) { __ negl(cpu_reg_); } else { @@ -119,7 +119,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCode { } } else { - DCHECK_EQ(Primitive::kPrimLong, type_); + DCHECK_EQ(DataType::Type::kInt64, type_); if (is_div_) { __ negq(cpu_reg_); } else { @@ -133,7 +133,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCode { private: const CpuRegister cpu_reg_; - const Primitive::Type type_; + const DataType::Type type_; const bool is_div_; DISALLOW_COPY_AND_ASSIGN(DivRemMinusOneSlowPathX86_64); }; @@ -215,10 +215,10 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode { codegen->EmitParallelMoves( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimInt, + DataType::Type::kInt32, length_loc, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt); + DataType::Type::kInt32); QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() ? kQuickThrowStringBounds : kQuickThrowArrayBounds; @@ -360,10 +360,10 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); + DataType::Type::kReference); if (instruction_->IsInstanceOf()) { x86_64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); @@ -431,17 +431,17 @@ class ArraySetSlowPathX86_64 : public SlowPathCode { parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove( locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); parallel_move.AddMove( locations->InAt(2), Location::RegisterLocation(calling_convention.GetRegisterAt(2)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); @@ -834,16 +834,16 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { HParallelMove parallel_move(codegen->GetGraph()->GetArena()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); parallel_move.AddMove(obj_, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot, + DataType::Type::kReference, nullptr); if (index.IsValid()) { parallel_move.AddMove(index, Location::RegisterLocation(calling_convention.GetRegisterAt(2)), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } else { @@ -1443,7 +1443,7 @@ void CodeGeneratorX86_64::MoveConstant(Location location, int32_t value) { } void CodeGeneratorX86_64::MoveLocation( - Location dst, Location src, Primitive::Type dst_type ATTRIBUTE_UNUSED) { + Location dst, Location src, DataType::Type dst_type ATTRIBUTE_UNUSED) { Move(dst, src); } @@ -1518,22 +1518,22 @@ void InstructionCodeGeneratorX86_64::GenerateCompareTest(HCondition* condition) Location left = locations->InAt(0); Location right = locations->InAt(1); - Primitive::Type type = condition->InputAt(0)->GetType(); + DataType::Type type = condition->InputAt(0)->GetType(); switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: { codegen_->GenerateIntCompare(left, right); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { codegen_->GenerateLongCompare(left, right); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (right.IsFpuRegister()) { __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); } else if (right.IsConstant()) { @@ -1547,7 +1547,7 @@ void InstructionCodeGeneratorX86_64::GenerateCompareTest(HCondition* condition) } break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (right.IsFpuRegister()) { __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); } else if (right.IsConstant()) { @@ -1580,17 +1580,17 @@ void InstructionCodeGeneratorX86_64::GenerateCompareTestAndBranch(HCondition* co GenerateCompareTest(condition); // Now generate the correct jump(s). - Primitive::Type type = condition->InputAt(0)->GetType(); + DataType::Type type = condition->InputAt(0)->GetType(); switch (type) { - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { __ j(X86_64IntegerCondition(condition->GetCondition()), true_target); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { GenerateFPJumps(condition, true_target, false_target); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { GenerateFPJumps(condition, true_target, false_target); break; } @@ -1613,7 +1613,7 @@ static bool AreEflagsSetFrom(HInstruction* cond, HInstruction* branch) { // conditions if they are materialized due to the complex branching. return cond->IsCondition() && cond->GetNext() == branch && - !Primitive::IsFloatingPointType(cond->InputAt(0)->GetType()); + !DataType::IsFloatingPointType(cond->InputAt(0)->GetType()); } template<class LabelType> @@ -1677,8 +1677,8 @@ void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruc // If this is a long or FP comparison that has been folded into // the HCondition, generate the comparison directly. - Primitive::Type type = condition->InputAt(0)->GetType(); - if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) { + DataType::Type type = condition->InputAt(0)->GetType(); + if (type == DataType::Type::kInt64 || DataType::IsFloatingPointType(type)) { GenerateCompareTestAndBranch(condition, true_target, false_target); return; } @@ -1750,14 +1750,14 @@ void InstructionCodeGeneratorX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimize static bool SelectCanUseCMOV(HSelect* select) { // There are no conditional move instructions for XMMs. - if (Primitive::IsFloatingPointType(select->GetType())) { + if (DataType::IsFloatingPointType(select->GetType())) { return false; } // A FP condition doesn't generate the single CC that we need. HInstruction* condition = select->GetCondition(); if (condition->IsCondition() && - Primitive::IsFloatingPointType(condition->InputAt(0)->GetType())) { + DataType::IsFloatingPointType(condition->InputAt(0)->GetType())) { return false; } @@ -1767,7 +1767,7 @@ static bool SelectCanUseCMOV(HSelect* select) { void LocationsBuilderX86_64::VisitSelect(HSelect* select) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); - if (Primitive::IsFloatingPointType(select->GetType())) { + if (DataType::IsFloatingPointType(select->GetType())) { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); } else { @@ -1826,7 +1826,7 @@ void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) { // If the condition is true, overwrite the output, which already contains false. // Generate the correct sized CMOV. - bool is_64_bit = Primitive::Is64BitType(select->GetType()); + bool is_64_bit = DataType::Is64BitType(select->GetType()); if (value_true_loc.IsRegister()) { __ cmov(cond, value_false, value_true_loc.AsRegister<CpuRegister>(), is_64_bit); } else { @@ -1862,12 +1862,12 @@ void LocationsBuilderX86_64::HandleCondition(HCondition* cond) { new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); // Handle the long/FP comparisons made in instruction simplification. switch (cond->InputAt(0)->GetType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); break; @@ -1902,14 +1902,14 @@ void InstructionCodeGeneratorX86_64::HandleCondition(HCondition* cond) { codegen_->GenerateIntCompare(lhs, rhs); __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg); return; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Clear output register: setcc only sets the low byte. __ xorl(reg, reg); codegen_->GenerateLongCompare(lhs, rhs); __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg); return; - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { XmmRegister lhs_reg = lhs.AsFpuRegister<XmmRegister>(); if (rhs.IsConstant()) { float value = rhs.GetConstant()->AsFloatConstant()->GetValue(); @@ -1922,7 +1922,7 @@ void InstructionCodeGeneratorX86_64::HandleCondition(HCondition* cond) { GenerateFPJumps(cond, &true_label, &false_label); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { XmmRegister lhs_reg = lhs.AsFpuRegister<XmmRegister>(); if (rhs.IsConstant()) { double value = rhs.GetConstant()->AsDoubleConstant()->GetValue(); @@ -2035,19 +2035,19 @@ void LocationsBuilderX86_64::VisitCompare(HCompare* compare) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); switch (compare->InputAt(0)->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::RequiresRegister()); @@ -2065,23 +2065,23 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) { Location right = locations->InAt(1); NearLabel less, greater, done; - Primitive::Type type = compare->InputAt(0)->GetType(); + DataType::Type type = compare->InputAt(0)->GetType(); Condition less_cond = kLess; switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: { codegen_->GenerateIntCompare(left, right); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { codegen_->GenerateLongCompare(left, right); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { XmmRegister left_reg = left.AsFpuRegister<XmmRegister>(); if (right.IsConstant()) { float value = right.GetConstant()->AsFloatConstant()->GetValue(); @@ -2095,7 +2095,7 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) { less_cond = kBelow; // ucomis{s,d} sets CF break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { XmmRegister left_reg = left.AsFpuRegister<XmmRegister>(); if (right.IsConstant()) { double value = right.GetConstant()->AsDoubleConstant()->GetValue(); @@ -2207,18 +2207,18 @@ void LocationsBuilderX86_64::VisitReturn(HReturn* ret) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall); switch (ret->InputAt(0)->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RegisterLocation(RAX)); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::FpuRegisterLocation(XMM0)); break; @@ -2230,18 +2230,18 @@ void LocationsBuilderX86_64::VisitReturn(HReturn* ret) { void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) { if (kIsDebugBuild) { switch (ret->InputAt(0)->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<CpuRegister>().AsRegister(), RAX); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(), XMM0); break; @@ -2253,22 +2253,22 @@ void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) { codegen_->GenerateFrameExit(); } -Location InvokeDexCallingConventionVisitorX86_64::GetReturnLocation(Primitive::Type type) const { +Location InvokeDexCallingConventionVisitorX86_64::GetReturnLocation(DataType::Type type) const { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: - case Primitive::kPrimLong: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: return Location::RegisterLocation(RAX); - case Primitive::kPrimVoid: + case DataType::Type::kVoid: return Location::NoLocation(); - case Primitive::kPrimDouble: - case Primitive::kPrimFloat: + case DataType::Type::kFloat64: + case DataType::Type::kFloat32: return Location::FpuRegisterLocation(XMM0); } @@ -2279,14 +2279,14 @@ Location InvokeDexCallingConventionVisitorX86_64::GetMethodLocation() const { return Location::RegisterLocation(kMethodRegisterArgument); } -Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(Primitive::Type type) { +Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(DataType::Type type) { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimNot: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kReference: { uint32_t index = gp_index_++; stack_index_++; if (index < calling_convention.GetNumberOfRegisters()) { @@ -2296,7 +2296,7 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(Primitive::Typ } } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { uint32_t index = gp_index_; stack_index_ += 2; if (index < calling_convention.GetNumberOfRegisters()) { @@ -2308,7 +2308,7 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(Primitive::Typ } } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { uint32_t index = float_index_++; stack_index_++; if (index < calling_convention.GetNumberOfFpuRegisters()) { @@ -2318,7 +2318,7 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(Primitive::Typ } } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { uint32_t index = float_index_++; stack_index_ += 2; if (index < calling_convention.GetNumberOfFpuRegisters()) { @@ -2328,7 +2328,7 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(Primitive::Typ } } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unexpected parameter type " << type; break; } @@ -2469,14 +2469,14 @@ void LocationsBuilderX86_64::VisitNeg(HNeg* neg) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresFpuRegister()); @@ -2492,19 +2492,19 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) { Location out = locations->Out(); Location in = locations->InAt(0); switch (neg->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: DCHECK(in.IsRegister()); DCHECK(in.Equals(out)); __ negl(out.AsRegister<CpuRegister>()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: DCHECK(in.IsRegister()); DCHECK(in.Equals(out)); __ negq(out.AsRegister<CpuRegister>()); break; - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { DCHECK(in.Equals(out)); XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); // Implement float negation with an exclusive or with value @@ -2515,7 +2515,7 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { DCHECK(in.Equals(out)); XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); // Implement double negation with an exclusive or with value @@ -2534,23 +2534,23 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) { void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); DCHECK_NE(result_type, input_type); // The Java language does not allow treating boolean as an integral type but // our bit representation makes it safe. switch (result_type) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to byte is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-byte' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -2562,15 +2562,15 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to short is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-short' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -2582,21 +2582,21 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-int' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); @@ -2608,14 +2608,14 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-long' instruction. // TODO: We would benefit from a (to-be-implemented) // Location::RegisterOrStackSlot requirement for this input. @@ -2623,13 +2623,13 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { locations->SetOut(Location::RequiresRegister()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-long' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-long' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); @@ -2641,15 +2641,15 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to char is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: // Processing a Dex `int-to-char' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -2661,26 +2661,26 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-float' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-float' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-float' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -2692,26 +2692,26 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { }; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-double' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-double' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-double' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -2733,19 +2733,19 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver LocationSummary* locations = conversion->GetLocations(); Location out = locations->Out(); Location in = locations->InAt(0); - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); DCHECK_NE(result_type, input_type); switch (result_type) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to byte is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-byte' instruction. if (in.IsRegister()) { __ movsxb(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>()); @@ -2764,15 +2764,15 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to short is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-short' instruction. if (in.IsRegister()) { __ movsxw(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>()); @@ -2791,9 +2791,9 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-int' instruction. if (in.IsRegister()) { __ movl(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>()); @@ -2808,7 +2808,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { // Processing a Dex `float-to-int' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); CpuRegister output = out.AsRegister<CpuRegister>(); @@ -2830,7 +2830,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { // Processing a Dex `double-to-int' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); CpuRegister output = out.AsRegister<CpuRegister>(); @@ -2858,21 +2858,21 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: switch (input_type) { DCHECK(out.IsRegister()); - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-long' instruction. DCHECK(in.IsRegister()); __ movsxd(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>()); break; - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { // Processing a Dex `float-to-long' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); CpuRegister output = out.AsRegister<CpuRegister>(); @@ -2894,7 +2894,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { // Processing a Dex `double-to-long' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); CpuRegister output = out.AsRegister<CpuRegister>(); @@ -2922,15 +2922,15 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Type conversion from long to char is a result of code transformations. - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: // Processing a Dex `int-to-char' instruction. if (in.IsRegister()) { __ movzxw(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>()); @@ -2949,14 +2949,14 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-float' instruction. if (in.IsRegister()) { __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false); @@ -2970,7 +2970,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-float' instruction. if (in.IsRegister()) { __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true); @@ -2984,7 +2984,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: // Processing a Dex `double-to-float' instruction. if (in.IsFpuRegister()) { __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); @@ -3004,14 +3004,14 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver }; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: switch (input_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: // Boolean input is a result of code transformations. - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimInt: - case Primitive::kPrimChar: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kUint16: // Processing a Dex `int-to-double' instruction. if (in.IsRegister()) { __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false); @@ -3025,7 +3025,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // Processing a Dex `long-to-double' instruction. if (in.IsRegister()) { __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true); @@ -3039,7 +3039,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: // Processing a Dex `float-to-double' instruction. if (in.IsFpuRegister()) { __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); @@ -3069,14 +3069,14 @@ void LocationsBuilderX86_64::VisitAdd(HAdd* add) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); switch (add->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); // We can use a leaq or addq if the constant can fit in an immediate. locations->SetInAt(1, Location::RegisterOrInt32Constant(add->InputAt(1))); @@ -3084,8 +3084,8 @@ void LocationsBuilderX86_64::VisitAdd(HAdd* add) { break; } - case Primitive::kPrimDouble: - case Primitive::kPrimFloat: { + case DataType::Type::kFloat64: + case DataType::Type::kFloat32: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); @@ -3104,7 +3104,7 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) { Location out = locations->Out(); switch (add->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { if (second.IsRegister()) { if (out.AsRegister<Register>() == first.AsRegister<Register>()) { __ addl(out.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>()); @@ -3129,7 +3129,7 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (second.IsRegister()) { if (out.AsRegister<Register>() == first.AsRegister<Register>()) { __ addq(out.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>()); @@ -3154,7 +3154,7 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (second.IsFpuRegister()) { __ addss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (second.IsConstant()) { @@ -3169,7 +3169,7 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (second.IsFpuRegister()) { __ addsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (second.IsConstant()) { @@ -3193,20 +3193,20 @@ void LocationsBuilderX86_64::VisitSub(HSub* sub) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); switch (sub->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrInt32Constant(sub->InputAt(1))); locations->SetOut(Location::SameAsFirstInput()); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); @@ -3223,7 +3223,7 @@ void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) { Location second = locations->InAt(1); DCHECK(first.Equals(locations->Out())); switch (sub->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { if (second.IsRegister()) { __ subl(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>()); } else if (second.IsConstant()) { @@ -3234,7 +3234,7 @@ void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (second.IsConstant()) { int64_t value = second.GetConstant()->AsLongConstant()->GetValue(); DCHECK(IsInt<32>(value)); @@ -3245,7 +3245,7 @@ void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (second.IsFpuRegister()) { __ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (second.IsConstant()) { @@ -3260,7 +3260,7 @@ void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (second.IsFpuRegister()) { __ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (second.IsConstant()) { @@ -3284,7 +3284,7 @@ void LocationsBuilderX86_64::VisitMul(HMul* mul) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); if (mul->InputAt(1)->IsIntConstant()) { @@ -3295,7 +3295,7 @@ void LocationsBuilderX86_64::VisitMul(HMul* mul) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); if (mul->InputAt(1)->IsLongConstant() && @@ -3307,8 +3307,8 @@ void LocationsBuilderX86_64::VisitMul(HMul* mul) { } break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); @@ -3326,7 +3326,7 @@ void InstructionCodeGeneratorX86_64::VisitMul(HMul* mul) { Location second = locations->InAt(1); Location out = locations->Out(); switch (mul->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: // The constant may have ended up in a register, so test explicitly to avoid // problems where the output may not be the same as the first operand. if (mul->InputAt(1)->IsIntConstant()) { @@ -3342,7 +3342,7 @@ void InstructionCodeGeneratorX86_64::VisitMul(HMul* mul) { Address(CpuRegister(RSP), second.GetStackIndex())); } break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { // The constant may have ended up in a register, so test explicitly to avoid // problems where the output may not be the same as the first operand. if (mul->InputAt(1)->IsLongConstant()) { @@ -3367,7 +3367,7 @@ void InstructionCodeGeneratorX86_64::VisitMul(HMul* mul) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { DCHECK(first.Equals(out)); if (second.IsFpuRegister()) { __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); @@ -3383,7 +3383,7 @@ void InstructionCodeGeneratorX86_64::VisitMul(HMul* mul) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { DCHECK(first.Equals(out)); if (second.IsFpuRegister()) { __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); @@ -3427,9 +3427,9 @@ void InstructionCodeGeneratorX86_64::PushOntoFPStack(Location source, uint32_t t } void InstructionCodeGeneratorX86_64::GenerateRemFP(HRem *rem) { - Primitive::Type type = rem->GetResultType(); - bool is_float = type == Primitive::kPrimFloat; - size_t elem_size = Primitive::ComponentSize(type); + DataType::Type type = rem->GetResultType(); + bool is_float = type == DataType::Type::kFloat32; + size_t elem_size = DataType::Size(type); LocationSummary* locations = rem->GetLocations(); Location first = locations->InAt(0); Location second = locations->InAt(1); @@ -3493,7 +3493,7 @@ void InstructionCodeGeneratorX86_64::DivRemOneOrMinusOne(HBinaryOperation* instr DCHECK(imm == 1 || imm == -1); switch (instruction->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { if (instruction->IsRem()) { __ xorl(output_register, output_register); } else { @@ -3505,7 +3505,7 @@ void InstructionCodeGeneratorX86_64::DivRemOneOrMinusOne(HBinaryOperation* instr break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (instruction->IsRem()) { __ xorl(output_register, output_register); } else { @@ -3535,7 +3535,7 @@ void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) { CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>(); - if (instruction->GetResultType() == Primitive::kPrimInt) { + if (instruction->GetResultType() == DataType::Type::kInt32) { __ leal(tmp, Address(numerator, abs_imm - 1)); __ testl(numerator, numerator); __ cmov(kGreaterEqual, tmp, numerator); @@ -3548,7 +3548,7 @@ void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) { __ movl(output_register, tmp); } else { - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); CpuRegister rdx = locations->GetTemp(0).AsRegister<CpuRegister>(); codegen_->Load64BitValue(rdx, abs_imm - 1); @@ -3591,7 +3591,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat int shift; // TODO: can these branches be written as one? - if (instruction->GetResultType() == Primitive::kPrimInt) { + if (instruction->GetResultType() == DataType::Type::kInt32) { int imm = second.GetConstant()->AsIntConstant()->GetValue(); CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); @@ -3626,7 +3626,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat } else { int64_t imm = second.GetConstant()->AsLongConstant()->GetValue(); - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); CpuRegister rax = eax; CpuRegister rdx = edx; @@ -3679,8 +3679,8 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* instruction) { DCHECK(instruction->IsDiv() || instruction->IsRem()); - Primitive::Type type = instruction->GetResultType(); - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DataType::Type type = instruction->GetResultType(); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); bool is_div = instruction->IsDiv(); LocationSummary* locations = instruction->GetLocations(); @@ -3714,7 +3714,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in // 0x80000000(00000000)/-1 triggers an arithmetic exception! // Dividing by -1 is actually negation and -0x800000000(00000000) = 0x80000000(00000000) // so it's safe to just use negl instead of more complex comparisons. - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { __ cmpl(second_reg, Immediate(-1)); __ j(kEqual, slow_path->GetEntryLabel()); // edx:eax <- sign-extended of eax @@ -3737,8 +3737,8 @@ void LocationsBuilderX86_64::VisitDiv(HDiv* div) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); switch (div->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RegisterLocation(RAX)); locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1))); locations->SetOut(Location::SameAsFirstInput()); @@ -3753,8 +3753,8 @@ void LocationsBuilderX86_64::VisitDiv(HDiv* div) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); @@ -3772,15 +3772,15 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) { Location second = locations->InAt(1); DCHECK(first.Equals(locations->Out())); - Primitive::Type type = div->GetResultType(); + DataType::Type type = div->GetResultType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GenerateDivRemIntegral(div); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (second.IsFpuRegister()) { __ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (second.IsConstant()) { @@ -3795,7 +3795,7 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (second.IsFpuRegister()) { __ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (second.IsConstant()) { @@ -3816,13 +3816,13 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) { } void LocationsBuilderX86_64::VisitRem(HRem* rem) { - Primitive::Type type = rem->GetResultType(); + DataType::Type type = rem->GetResultType(); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RegisterLocation(RAX)); locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1))); // Intel uses rdx:rax as the dividend and puts the remainder in rdx @@ -3836,8 +3836,8 @@ void LocationsBuilderX86_64::VisitRem(HRem* rem) { break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { locations->SetInAt(0, Location::Any()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); @@ -3851,15 +3851,15 @@ void LocationsBuilderX86_64::VisitRem(HRem* rem) { } void InstructionCodeGeneratorX86_64::VisitRem(HRem* rem) { - Primitive::Type type = rem->GetResultType(); + DataType::Type type = rem->GetResultType(); switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { GenerateDivRemIntegral(rem); break; } - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: { + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { GenerateRemFP(rem); break; } @@ -3882,11 +3882,11 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio Location value = locations->InAt(0); switch (instruction->GetType()) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { if (value.IsRegister()) { __ testl(value.AsRegister<CpuRegister>(), value.AsRegister<CpuRegister>()); __ j(kEqual, slow_path->GetEntryLabel()); @@ -3901,7 +3901,7 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (value.IsRegister()) { __ testq(value.AsRegister<CpuRegister>(), value.AsRegister<CpuRegister>()); __ j(kEqual, slow_path->GetEntryLabel()); @@ -3928,8 +3928,8 @@ void LocationsBuilderX86_64::HandleShift(HBinaryOperation* op) { new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall); switch (op->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); // The shift count needs to be in CL. locations->SetInAt(1, Location::ByteRegisterOrConstant(RCX, op->InputAt(1))); @@ -3949,7 +3949,7 @@ void InstructionCodeGeneratorX86_64::HandleShift(HBinaryOperation* op) { Location second = locations->InAt(1); switch (op->GetResultType()) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { if (second.IsRegister()) { CpuRegister second_reg = second.AsRegister<CpuRegister>(); if (op->IsShl()) { @@ -3971,7 +3971,7 @@ void InstructionCodeGeneratorX86_64::HandleShift(HBinaryOperation* op) { } break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (second.IsRegister()) { CpuRegister second_reg = second.AsRegister<CpuRegister>(); if (op->IsShl()) { @@ -4004,8 +4004,8 @@ void LocationsBuilderX86_64::VisitRor(HRor* ror) { new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall); switch (ror->GetResultType()) { - case Primitive::kPrimInt: - case Primitive::kPrimLong: { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { locations->SetInAt(0, Location::RequiresRegister()); // The shift count needs to be in CL (unless it is a constant). locations->SetInAt(1, Location::ByteRegisterOrConstant(RCX, ror->InputAt(1))); @@ -4024,7 +4024,7 @@ void InstructionCodeGeneratorX86_64::VisitRor(HRor* ror) { Location second = locations->InAt(1); switch (ror->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: if (second.IsRegister()) { CpuRegister second_reg = second.AsRegister<CpuRegister>(); __ rorl(first_reg, second_reg); @@ -4033,7 +4033,7 @@ void InstructionCodeGeneratorX86_64::VisitRor(HRor* ror) { __ rorl(first_reg, imm); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (second.IsRegister()) { CpuRegister second_reg = second.AsRegister<CpuRegister>(); __ rorq(first_reg, second_reg); @@ -4186,11 +4186,11 @@ void InstructionCodeGeneratorX86_64::VisitNot(HNot* not_) { locations->Out().AsRegister<CpuRegister>().AsRegister()); Location out = locations->Out(); switch (not_->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ notl(out.AsRegister<CpuRegister>()); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ notq(out.AsRegister<CpuRegister>()); break; @@ -4255,7 +4255,7 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) { DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, object_field_get_with_read_barrier ? @@ -4265,7 +4265,7 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister()); } else { // The output overlaps for an object field get when read barriers @@ -4286,36 +4286,36 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction, CpuRegister base = base_loc.AsRegister<CpuRegister>(); Location out = locations->Out(); bool is_volatile = field_info.IsVolatile(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); switch (field_type) { - case Primitive::kPrimBoolean: { + case DataType::Type::kBool: { __ movzxb(out.AsRegister<CpuRegister>(), Address(base, offset)); break; } - case Primitive::kPrimByte: { + case DataType::Type::kInt8: { __ movsxb(out.AsRegister<CpuRegister>(), Address(base, offset)); break; } - case Primitive::kPrimShort: { + case DataType::Type::kInt16: { __ movsxw(out.AsRegister<CpuRegister>(), Address(base, offset)); break; } - case Primitive::kPrimChar: { + case DataType::Type::kUint16: { __ movzxw(out.AsRegister<CpuRegister>(), Address(base, offset)); break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { __ movl(out.AsRegister<CpuRegister>(), Address(base, offset)); break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { // /* HeapReference<Object> */ out = *(base + offset) if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this @@ -4339,27 +4339,27 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction, break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { __ movq(out.AsRegister<CpuRegister>(), Address(base, offset)); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { __ movss(out.AsFpuRegister<XmmRegister>(), Address(base, offset)); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { __ movsd(out.AsFpuRegister<XmmRegister>(), Address(base, offset)); break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << field_type; UNREACHABLE(); } - if (field_type == Primitive::kPrimNot) { + if (field_type == DataType::Type::kReference) { // Potential implicit null checks, in the case of reference // fields, are handled in the previous switch statement. } else { @@ -4367,7 +4367,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction, } if (is_volatile) { - if (field_type == Primitive::kPrimNot) { + if (field_type == DataType::Type::kReference) { // Memory barriers, in the case of references, are also handled // in the previous switch statement. } else { @@ -4382,13 +4382,13 @@ void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction, LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); bool is_volatile = field_info.IsVolatile(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); locations->SetInAt(0, Location::RequiresRegister()); - if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) { + if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) { if (is_volatile) { // In order to satisfy the semantics of volatile, this must be a single instruction store. locations->SetInAt(1, Location::FpuRegisterOrInt32Constant(instruction->InputAt(1))); @@ -4407,7 +4407,7 @@ void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction, // Temporary registers for the write barrier. locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. locations->AddTemp(Location::RequiresRegister()); - } else if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) { + } else if (kPoisonHeapReferences && field_type == DataType::Type::kReference) { // Temporary register for the reference poisoning. locations->AddTemp(Location::RequiresRegister()); } @@ -4422,7 +4422,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, CpuRegister base = locations->InAt(0).AsRegister<CpuRegister>(); Location value = locations->InAt(1); bool is_volatile = field_info.IsVolatile(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); if (is_volatile) { @@ -4432,8 +4432,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, bool maybe_record_implicit_null_check_done = false; switch (field_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: { + case DataType::Type::kBool: + case DataType::Type::kInt8: { if (value.IsConstant()) { __ movb(Address(base, offset), Immediate(CodeGenerator::GetInt8ValueOf(value.GetConstant()))); @@ -4443,8 +4443,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimShort: - case Primitive::kPrimChar: { + case DataType::Type::kInt16: + case DataType::Type::kUint16: { if (value.IsConstant()) { __ movw(Address(base, offset), Immediate(CodeGenerator::GetInt16ValueOf(value.GetConstant()))); @@ -4454,17 +4454,17 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimInt: - case Primitive::kPrimNot: { + case DataType::Type::kInt32: + case DataType::Type::kReference: { if (value.IsConstant()) { int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); - // `field_type == Primitive::kPrimNot` implies `v == 0`. - DCHECK((field_type != Primitive::kPrimNot) || (v == 0)); + // `field_type == DataType::Type::kReference` implies `v == 0`. + DCHECK((field_type != DataType::Type::kReference) || (v == 0)); // Note: if heap poisoning is enabled, no need to poison // (negate) `v` if it is a reference, as it would be null. __ movl(Address(base, offset), Immediate(v)); } else { - if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && field_type == DataType::Type::kReference) { CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); __ movl(temp, value.AsRegister<CpuRegister>()); __ PoisonHeapReference(temp); @@ -4476,7 +4476,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (value.IsConstant()) { int64_t v = value.GetConstant()->AsLongConstant()->GetValue(); codegen_->MoveInt64ToAddress(Address(base, offset), @@ -4490,7 +4490,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (value.IsConstant()) { int32_t v = bit_cast<int32_t, float>(value.GetConstant()->AsFloatConstant()->GetValue()); @@ -4501,7 +4501,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (value.IsConstant()) { int64_t v = bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue()); @@ -4516,7 +4516,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << field_type; UNREACHABLE(); } @@ -4681,7 +4681,7 @@ void InstructionCodeGeneratorX86_64::VisitNullCheck(HNullCheck* instruction) { void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot); + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, object_array_get_with_read_barrier ? @@ -4692,7 +4692,7 @@ void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) { } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(instruction->GetType())) { + if (DataType::IsFloatingPointType(instruction->GetType())) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } else { // The output overlaps for an object array get when read barriers @@ -4712,27 +4712,27 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { Location out_loc = locations->Out(); uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); switch (type) { - case Primitive::kPrimBoolean: { + case DataType::Type::kBool: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); __ movzxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset)); break; } - case Primitive::kPrimByte: { + case DataType::Type::kInt8: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); __ movsxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset)); break; } - case Primitive::kPrimShort: { + case DataType::Type::kInt16: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); __ movsxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset)); break; } - case Primitive::kPrimChar: { + case DataType::Type::kUint16: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { // Branch cases into compressed and uncompressed for each index's type. @@ -4754,13 +4754,13 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset)); break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { static_assert( sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); @@ -4790,30 +4790,30 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); __ movq(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset)); break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { XmmRegister out = out_loc.AsFpuRegister<XmmRegister>(); __ movss(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset)); break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { XmmRegister out = out_loc.AsFpuRegister<XmmRegister>(); __ movsd(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset)); break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // Potential implicit null checks, in the case of reference // arrays, are handled in the previous switch statement. } else { @@ -4822,7 +4822,7 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { } void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) { - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); @@ -4836,7 +4836,7 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); - if (Primitive::IsFloatingPointType(value_type)) { + if (DataType::IsFloatingPointType(value_type)) { locations->SetInAt(2, Location::FpuRegisterOrConstant(instruction->InputAt(2))); } else { locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2))); @@ -4855,7 +4855,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { CpuRegister array = array_loc.AsRegister<CpuRegister>(); Location index = locations->InAt(1); Location value = locations->InAt(2); - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); @@ -4864,8 +4864,8 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); switch (value_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: { + case DataType::Type::kBool: + case DataType::Type::kInt8: { uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_1, offset); if (value.IsRegister()) { @@ -4877,8 +4877,8 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimShort: - case Primitive::kPrimChar: { + case DataType::Type::kInt16: + case DataType::Type::kUint16: { uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_2, offset); if (value.IsRegister()) { @@ -4891,7 +4891,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset); @@ -4987,7 +4987,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset); if (value.IsRegister()) { @@ -5001,7 +5001,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { uint32_t offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset); if (value.IsRegister()) { @@ -5016,7 +5016,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset); if (value.IsFpuRegister()) { @@ -5030,7 +5030,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset); if (value.IsFpuRegister()) { @@ -5046,7 +5046,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unreachable type " << instruction->GetType(); UNREACHABLE(); } @@ -6361,8 +6361,8 @@ void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperatio void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); - DCHECK(instruction->GetResultType() == Primitive::kPrimInt - || instruction->GetResultType() == Primitive::kPrimLong); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32 + || instruction->GetResultType() == DataType::Type::kInt64); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); @@ -6386,7 +6386,7 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in Location second = locations->InAt(1); DCHECK(first.Equals(locations->Out())); - if (instruction->GetResultType() == Primitive::kPrimInt) { + if (instruction->GetResultType() == DataType::Type::kInt32) { if (second.IsRegister()) { if (instruction->IsAnd()) { __ andl(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>()); @@ -6418,7 +6418,7 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in } } } else { - DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); CpuRegister first_reg = first.AsRegister<CpuRegister>(); bool second_is_constant = false; int64_t value = 0; @@ -7096,13 +7096,13 @@ Address CodeGeneratorX86_64::LiteralInt64Address(int64_t v) { } // TODO: trg as memory. -void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, Primitive::Type type) { +void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, DataType::Type type) { if (!trg.IsValid()) { - DCHECK_EQ(type, Primitive::kPrimVoid); + DCHECK_EQ(type, DataType::Type::kVoid); return; } - DCHECK_NE(type, Primitive::kPrimVoid); + DCHECK_NE(type, DataType::Type::kVoid); Location return_loc = InvokeDexCallingConventionVisitorX86_64().GetReturnLocation(type); if (trg.Equals(return_loc)) { diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 8e8e695a64..6f67a45f25 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -89,16 +89,16 @@ class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention { Location GetFieldIndexLocation() const OVERRIDE { return Location::RegisterLocation(RDI); } - Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return Location::RegisterLocation(RAX); } - Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) + Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE { return is_instance ? Location::RegisterLocation(RDX) : Location::RegisterLocation(RSI); } - Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return Location::FpuRegisterLocation(XMM0); } @@ -112,8 +112,8 @@ class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventio InvokeDexCallingConventionVisitorX86_64() {} virtual ~InvokeDexCallingConventionVisitorX86_64() {} - Location GetNextLocation(Primitive::Type type) OVERRIDE; - Location GetReturnLocation(Primitive::Type type) const OVERRIDE; + Location GetNextLocation(DataType::Type type) OVERRIDE; + Location GetReturnLocation(DataType::Type type) const OVERRIDE; Location GetMethodLocation() const OVERRIDE; private: @@ -299,7 +299,7 @@ class CodeGeneratorX86_64 : public CodeGenerator { void GenerateFrameExit() OVERRIDE; void Bind(HBasicBlock* block) OVERRIDE; void MoveConstant(Location destination, int32_t value) OVERRIDE; - void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE; void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; @@ -384,7 +384,7 @@ class CodeGeneratorX86_64 : public CodeGenerator { block_labels_ = CommonInitializeLabels<Label>(); } - bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; } @@ -422,7 +422,7 @@ class CodeGeneratorX86_64 : public CodeGenerator { dex::TypeIndex dex_index, Handle<mirror::Class> handle); - void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE; + void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE; void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE; diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index 0a8e97cf0d..896fcfa20d 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -91,7 +91,7 @@ static void TestCodeLong(const uint16_t* data, for (const CodegenTargetConfig& target_config : GetTargetConfigs()) { ArenaPool pool; ArenaAllocator arena(&pool); - HGraph* graph = CreateCFG(&arena, data, Primitive::kPrimLong); + HGraph* graph = CreateCFG(&arena, data, DataType::Type::kInt64); // Remove suspend checks, they cannot be executed in this context. RemoveSuspendChecks(graph); RunCode(target_config, graph, [](HGraph*) {}, has_result, expected); @@ -602,7 +602,7 @@ TEST_F(CodegenTest, ReturnDivInt2Addr) { static void TestComparison(IfCondition condition, int64_t i, int64_t j, - Primitive::Type type, + DataType::Type type, const CodegenTargetConfig target_config) { ArenaPool pool; ArenaAllocator allocator(&pool); @@ -626,11 +626,11 @@ static void TestComparison(IfCondition condition, HInstruction* op1; HInstruction* op2; - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { op1 = graph->GetIntConstant(i); op2 = graph->GetIntConstant(j); } else { - DCHECK_EQ(type, Primitive::kPrimLong); + DCHECK_EQ(type, DataType::Type::kInt64); op1 = graph->GetLongConstant(i); op2 = graph->GetLongConstant(j); } @@ -693,7 +693,8 @@ TEST_F(CodegenTest, ComparisonsInt) { for (int64_t i = -1; i <= 1; i++) { for (int64_t j = -1; j <= 1; j++) { for (int cond = kCondFirst; cond <= kCondLast; cond++) { - TestComparison(static_cast<IfCondition>(cond), i, j, Primitive::kPrimInt, target_config); + TestComparison( + static_cast<IfCondition>(cond), i, j, DataType::Type::kInt32, target_config); } } } @@ -705,7 +706,8 @@ TEST_F(CodegenTest, ComparisonsLong) { for (int64_t i = -1; i <= 1; i++) { for (int64_t j = -1; j <= 1; j++) { for (int cond = kCondFirst; cond <= kCondLast; cond++) { - TestComparison(static_cast<IfCondition>(cond), i, j, Primitive::kPrimLong, target_config); + TestComparison( + static_cast<IfCondition>(cond), i, j, DataType::Type::kInt64, target_config); } } } @@ -728,8 +730,8 @@ TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) { // used as temps; however GPR scratch register is required for big stack offsets which don't fit // LDR encoding. So the following code is a regression test for that situation. HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); - move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), Primitive::kPrimInt, nullptr); - move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), Primitive::kPrimInt, nullptr); + move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), DataType::Type::kInt32, nullptr); + move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), DataType::Type::kInt32, nullptr); codegen.GetMoveResolver()->EmitNativeCode(move); InternalCodeAllocator code_allocator; @@ -778,11 +780,11 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) { HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); move->AddMove(Location::DoubleStackSlot(0), Location::DoubleStackSlot(257), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); move->AddMove(Location::DoubleStackSlot(257), Location::DoubleStackSlot(0), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); codegen.GetMoveResolver()->EmitNativeCode(move); @@ -806,19 +808,19 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) { HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); move->AddMove(Location::SIMDStackSlot(0), Location::SIMDStackSlot(257), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); move->AddMove(Location::SIMDStackSlot(257), Location::SIMDStackSlot(0), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); move->AddMove(Location::FpuRegisterLocation(0), Location::FpuRegisterLocation(1), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); move->AddMove(Location::FpuRegisterLocation(1), Location::FpuRegisterLocation(0), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); codegen.GetMoveResolver()->EmitNativeCode(move); graph->SetHasSIMD(false); diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h index e354654ee8..356ff9f41f 100644 --- a/compiler/optimizing/common_arm.h +++ b/compiler/optimizing/common_arm.h @@ -76,8 +76,8 @@ inline vixl::aarch32::Register RegisterFrom(Location location) { return vixl::aarch32::Register(location.reg()); } -inline vixl::aarch32::Register RegisterFrom(Location location, Primitive::Type type) { - DCHECK(type != Primitive::kPrimVoid && !Primitive::IsFloatingPointType(type)) << type; +inline vixl::aarch32::Register RegisterFrom(Location location, DataType::Type type) { + DCHECK(type != DataType::Type::kVoid && !DataType::IsFloatingPointType(type)) << type; return RegisterFrom(location); } @@ -94,20 +94,20 @@ inline vixl::aarch32::SRegister SRegisterFrom(Location location) { } inline vixl::aarch32::SRegister OutputSRegister(HInstruction* instr) { - Primitive::Type type = instr->GetType(); - DCHECK_EQ(type, Primitive::kPrimFloat) << type; + DataType::Type type = instr->GetType(); + DCHECK_EQ(type, DataType::Type::kFloat32) << type; return SRegisterFrom(instr->GetLocations()->Out()); } inline vixl::aarch32::DRegister OutputDRegister(HInstruction* instr) { - Primitive::Type type = instr->GetType(); - DCHECK_EQ(type, Primitive::kPrimDouble) << type; + DataType::Type type = instr->GetType(); + DCHECK_EQ(type, DataType::Type::kFloat64) << type; return DRegisterFrom(instr->GetLocations()->Out()); } inline vixl::aarch32::VRegister OutputVRegister(HInstruction* instr) { - Primitive::Type type = instr->GetType(); - if (type == Primitive::kPrimFloat) { + DataType::Type type = instr->GetType(); + if (type == DataType::Type::kFloat32) { return OutputSRegister(instr); } else { return OutputDRegister(instr); @@ -115,23 +115,23 @@ inline vixl::aarch32::VRegister OutputVRegister(HInstruction* instr) { } inline vixl::aarch32::SRegister InputSRegisterAt(HInstruction* instr, int input_index) { - Primitive::Type type = instr->InputAt(input_index)->GetType(); - DCHECK_EQ(type, Primitive::kPrimFloat) << type; + DataType::Type type = instr->InputAt(input_index)->GetType(); + DCHECK_EQ(type, DataType::Type::kFloat32) << type; return SRegisterFrom(instr->GetLocations()->InAt(input_index)); } inline vixl::aarch32::DRegister InputDRegisterAt(HInstruction* instr, int input_index) { - Primitive::Type type = instr->InputAt(input_index)->GetType(); - DCHECK_EQ(type, Primitive::kPrimDouble) << type; + DataType::Type type = instr->InputAt(input_index)->GetType(); + DCHECK_EQ(type, DataType::Type::kFloat64) << type; return DRegisterFrom(instr->GetLocations()->InAt(input_index)); } inline vixl::aarch32::VRegister InputVRegisterAt(HInstruction* instr, int input_index) { - Primitive::Type type = instr->InputAt(input_index)->GetType(); - if (type == Primitive::kPrimFloat) { + DataType::Type type = instr->InputAt(input_index)->GetType(); + if (type == DataType::Type::kFloat32) { return InputSRegisterAt(instr, input_index); } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(type, DataType::Type::kFloat64); return InputDRegisterAt(instr, input_index); } } @@ -196,7 +196,7 @@ inline uint64_t Uint64ConstantFrom(HInstruction* instr) { return instr->AsConstant()->GetValueAsUint64(); } -inline vixl::aarch32::Operand OperandFrom(Location location, Primitive::Type type) { +inline vixl::aarch32::Operand OperandFrom(Location location, DataType::Type type) { if (location.IsRegister()) { return vixl::aarch32::Operand(RegisterFrom(location, type)); } else { diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h index e73fd7ddc8..102acb3423 100644 --- a/compiler/optimizing/common_arm64.h +++ b/compiler/optimizing/common_arm64.h @@ -73,9 +73,9 @@ inline vixl::aarch64::Register WRegisterFrom(Location location) { return vixl::aarch64::Register::GetWRegFromCode(VIXLRegCodeFromART(location.reg())); } -inline vixl::aarch64::Register RegisterFrom(Location location, Primitive::Type type) { - DCHECK(type != Primitive::kPrimVoid && !Primitive::IsFloatingPointType(type)) << type; - return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location); +inline vixl::aarch64::Register RegisterFrom(Location location, DataType::Type type) { + DCHECK(type != DataType::Type::kVoid && !DataType::IsFloatingPointType(type)) << type; + return type == DataType::Type::kInt64 ? XRegisterFrom(location) : WRegisterFrom(location); } inline vixl::aarch64::Register OutputRegister(HInstruction* instr) { @@ -107,9 +107,9 @@ inline vixl::aarch64::FPRegister SRegisterFrom(Location location) { return vixl::aarch64::FPRegister::GetSRegFromCode(location.reg()); } -inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, Primitive::Type type) { - DCHECK(Primitive::IsFloatingPointType(type)) << type; - return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location); +inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, DataType::Type type) { + DCHECK(DataType::IsFloatingPointType(type)) << type; + return type == DataType::Type::kFloat64 ? DRegisterFrom(location) : SRegisterFrom(location); } inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) { @@ -121,20 +121,20 @@ inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int inpu instr->InputAt(input_index)->GetType()); } -inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, Primitive::Type type) { - return Primitive::IsFloatingPointType(type) +inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, DataType::Type type) { + return DataType::IsFloatingPointType(type) ? vixl::aarch64::CPURegister(FPRegisterFrom(location, type)) : vixl::aarch64::CPURegister(RegisterFrom(location, type)); } inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) { - return Primitive::IsFloatingPointType(instr->GetType()) + return DataType::IsFloatingPointType(instr->GetType()) ? static_cast<vixl::aarch64::CPURegister>(OutputFPRegister(instr)) : static_cast<vixl::aarch64::CPURegister>(OutputRegister(instr)); } inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) { - return Primitive::IsFloatingPointType(instr->InputAt(index)->GetType()) + return DataType::IsFloatingPointType(instr->InputAt(index)->GetType()) ? static_cast<vixl::aarch64::CPURegister>(InputFPRegisterAt(instr, index)) : static_cast<vixl::aarch64::CPURegister>(InputRegisterAt(instr, index)); } @@ -142,9 +142,9 @@ inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int in inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* instr, int index) { HInstruction* input = instr->InputAt(index); - Primitive::Type input_type = input->GetType(); + DataType::Type input_type = input->GetType(); if (input->IsConstant() && input->AsConstant()->IsZeroBitPattern()) { - return (Primitive::ComponentSize(input_type) >= vixl::aarch64::kXRegSizeInBytes) + return (DataType::Size(input_type) >= vixl::aarch64::kXRegSizeInBytes) ? vixl::aarch64::Register(vixl::aarch64::xzr) : vixl::aarch64::Register(vixl::aarch64::wzr); } @@ -163,7 +163,7 @@ inline int64_t Int64ConstantFrom(Location location) { } } -inline vixl::aarch64::Operand OperandFrom(Location location, Primitive::Type type) { +inline vixl::aarch64::Operand OperandFrom(Location location, DataType::Type type) { if (location.IsRegister()) { return vixl::aarch64::Operand(RegisterFrom(location, type)); } else { @@ -202,7 +202,7 @@ inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base } inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) { - return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset); + return HeapOperand(RegisterFrom(location, DataType::Type::kReference), offset); } inline Location LocationFrom(const vixl::aarch64::Register& reg) { diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc index 5f39a49d68..bb586bf096 100644 --- a/compiler/optimizing/constant_folding.cc +++ b/compiler/optimizing/constant_folding.cc @@ -150,7 +150,7 @@ void InstructionWithAbsorbingInputSimplifier::VisitEqual(HEqual* instruction) { // EQUAL lhs, null // where lhs cannot be null with // CONSTANT false - instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0)); + instruction->ReplaceWith(GetGraph()->GetConstant(DataType::Type::kBool, 0)); instruction->GetBlock()->RemoveInstruction(instruction); } } @@ -162,7 +162,7 @@ void InstructionWithAbsorbingInputSimplifier::VisitNotEqual(HNotEqual* instructi // NOT_EQUAL lhs, null // where lhs cannot be null with // CONSTANT true - instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1)); + instruction->ReplaceWith(GetGraph()->GetConstant(DataType::Type::kBool, 1)); instruction->GetBlock()->RemoveInstruction(instruction); } } @@ -174,7 +174,7 @@ void InstructionWithAbsorbingInputSimplifier::VisitAbove(HAbove* instruction) { // ABOVE dst, 0, src // unsigned 0 > src is always false // with // CONSTANT false - instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0)); + instruction->ReplaceWith(GetGraph()->GetConstant(DataType::Type::kBool, 0)); instruction->GetBlock()->RemoveInstruction(instruction); } } @@ -186,7 +186,7 @@ void InstructionWithAbsorbingInputSimplifier::VisitAboveOrEqual(HAboveOrEqual* i // ABOVE_OR_EQUAL dst, src, 0 // unsigned src >= 0 is always true // with // CONSTANT true - instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1)); + instruction->ReplaceWith(GetGraph()->GetConstant(DataType::Type::kBool, 1)); instruction->GetBlock()->RemoveInstruction(instruction); } } @@ -198,7 +198,7 @@ void InstructionWithAbsorbingInputSimplifier::VisitBelow(HBelow* instruction) { // BELOW dst, src, 0 // unsigned src < 0 is always false // with // CONSTANT false - instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0)); + instruction->ReplaceWith(GetGraph()->GetConstant(DataType::Type::kBool, 0)); instruction->GetBlock()->RemoveInstruction(instruction); } } @@ -210,7 +210,7 @@ void InstructionWithAbsorbingInputSimplifier::VisitBelowOrEqual(HBelowOrEqual* i // BELOW_OR_EQUAL dst, 0, src // unsigned 0 <= src is always true // with // CONSTANT true - instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1)); + instruction->ReplaceWith(GetGraph()->GetConstant(DataType::Type::kBool, 1)); instruction->GetBlock()->RemoveInstruction(instruction); } } @@ -231,7 +231,7 @@ void InstructionWithAbsorbingInputSimplifier::VisitCompare(HCompare* instruction HConstant* input_cst = instruction->GetConstantRight(); if (input_cst != nullptr) { HInstruction* input_value = instruction->GetLeastConstantLeft(); - if (Primitive::IsFloatingPointType(input_value->GetType()) && + if (DataType::IsFloatingPointType(input_value->GetType()) && ((input_cst->IsFloatConstant() && input_cst->AsFloatConstant()->IsNaN()) || (input_cst->IsDoubleConstant() && input_cst->AsDoubleConstant()->IsNaN()))) { // Replace code looking like @@ -240,7 +240,7 @@ void InstructionWithAbsorbingInputSimplifier::VisitCompare(HCompare* instruction // CONSTANT +1 (gt bias) // or // CONSTANT -1 (lt bias) - instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimInt, + instruction->ReplaceWith(GetGraph()->GetConstant(DataType::Type::kInt32, (instruction->IsGtBias() ? 1 : -1))); instruction->GetBlock()->RemoveInstruction(instruction); } @@ -249,8 +249,8 @@ void InstructionWithAbsorbingInputSimplifier::VisitCompare(HCompare* instruction void InstructionWithAbsorbingInputSimplifier::VisitMul(HMul* instruction) { HConstant* input_cst = instruction->GetConstantRight(); - Primitive::Type type = instruction->GetType(); - if (Primitive::IsIntOrLongType(type) && + DataType::Type type = instruction->GetType(); + if (DataType::IsIntOrLongType(type) && (input_cst != nullptr) && input_cst->IsArithmeticZero()) { // Replace code looking like // MUL dst, src, 0 @@ -282,9 +282,9 @@ void InstructionWithAbsorbingInputSimplifier::VisitOr(HOr* instruction) { } void InstructionWithAbsorbingInputSimplifier::VisitRem(HRem* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); - if (!Primitive::IsIntegralType(type)) { + if (!DataType::IsIntegralType(type)) { return; } @@ -326,9 +326,9 @@ void InstructionWithAbsorbingInputSimplifier::VisitShr(HShr* instruction) { } void InstructionWithAbsorbingInputSimplifier::VisitSub(HSub* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); - if (!Primitive::IsIntegralType(type)) { + if (!DataType::IsIntegralType(type)) { return; } @@ -360,7 +360,7 @@ void InstructionWithAbsorbingInputSimplifier::VisitXor(HXor* instruction) { // XOR dst, src, src // with // CONSTANT 0 - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); HBasicBlock* block = instruction->GetBlock(); instruction->ReplaceWith(GetGraph()->GetConstant(type, 0)); block->RemoveInstruction(instruction); diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc index 7ef28ed910..c85a2e3e70 100644 --- a/compiler/optimizing/constant_folding_test.cc +++ b/compiler/optimizing/constant_folding_test.cc @@ -43,7 +43,7 @@ class ConstantFoldingTest : public CommonCompilerTest { const std::string& expected_after_cf, const std::string& expected_after_dce, const std::function<void(HGraph*)>& check_after_cf, - Primitive::Type return_type = Primitive::kPrimInt) { + DataType::Type return_type = DataType::Type::kInt32) { graph_ = CreateCFG(&allocator_, data, return_type); TestCodeOnReadyGraph(expected_before, expected_after_cf, @@ -208,7 +208,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingNegation) { expected_after_cf, expected_after_dce, check_after_cf, - Primitive::kPrimLong); + DataType::Type::kInt64); } /** @@ -483,7 +483,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingOnAddition) { expected_after_cf, expected_after_dce, check_after_cf, - Primitive::kPrimLong); + DataType::Type::kInt64); } /** @@ -547,7 +547,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingOnSubtraction) { expected_after_cf, expected_after_dce, check_after_cf, - Primitive::kPrimLong); + DataType::Type::kInt64); } /** @@ -756,7 +756,7 @@ TEST_F(ConstantFoldingTest, UnsignedComparisonsWithZero) { // Make various unsigned comparisons with zero against a parameter. HInstruction* parameter = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt, true); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32, true); entry_block->AddInstruction(parameter); entry_block->AddInstruction(new (&allocator_) HGoto()); diff --git a/compiler/optimizing/data_type-inl.h b/compiler/optimizing/data_type-inl.h new file mode 100644 index 0000000000..fbc0c1215d --- /dev/null +++ b/compiler/optimizing/data_type-inl.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_DATA_TYPE_INL_H_ +#define ART_COMPILER_OPTIMIZING_DATA_TYPE_INL_H_ + +#include "data_type.h" +#include "primitive.h" + +namespace art { + +// Note: Not declared in data_type.h to avoid pulling in "primitive.h". +constexpr DataType::Type DataTypeFromPrimitive(Primitive::Type type) { + switch (type) { + case Primitive::kPrimNot: return DataType::Type::kReference; + case Primitive::kPrimBoolean: return DataType::Type::kBool; + case Primitive::kPrimByte: return DataType::Type::kInt8; + case Primitive::kPrimChar: return DataType::Type::kUint16; + case Primitive::kPrimShort: return DataType::Type::kInt16; + case Primitive::kPrimInt: return DataType::Type::kInt32; + case Primitive::kPrimLong: return DataType::Type::kInt64; + case Primitive::kPrimFloat: return DataType::Type::kFloat32; + case Primitive::kPrimDouble: return DataType::Type::kFloat64; + case Primitive::kPrimVoid: return DataType::Type::kVoid; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +constexpr DataType::Type DataType::FromShorty(char type) { + return DataTypeFromPrimitive(Primitive::GetType(type)); +} + +constexpr char DataType::TypeId(DataType::Type type) { + // Type id for visualizer. + switch (type) { + case DataType::Type::kBool: return 'z'; + case DataType::Type::kInt8: return 'b'; + case DataType::Type::kUint16: return 'c'; + case DataType::Type::kInt16: return 's'; + case DataType::Type::kInt32: return 'i'; + case DataType::Type::kInt64: return 'j'; + case DataType::Type::kFloat32: return 'f'; + case DataType::Type::kFloat64: return 'd'; + case DataType::Type::kReference: return 'l'; + case DataType::Type::kVoid: return 'v'; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_DATA_TYPE_INL_H_ diff --git a/compiler/optimizing/data_type.cc b/compiler/optimizing/data_type.cc new file mode 100644 index 0000000000..689061722e --- /dev/null +++ b/compiler/optimizing/data_type.cc @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "data_type.h" + +namespace art { + +static const char* kTypeNames[] = { + "Reference", + "Bool", + "Int8", + "Uint16", + "Int16", + "Int32", + "Int64", + "Float32", + "Float64", + "Void", +}; + +const char* DataType::PrettyDescriptor(Type type) { + static_assert(arraysize(kTypeNames) == static_cast<size_t>(Type::kLast) + 1, + "Missing element"); + uint32_t uint_type = static_cast<uint32_t>(type); + CHECK_LE(uint_type, static_cast<uint32_t>(Type::kLast)); + return kTypeNames[uint_type]; +} + +std::ostream& operator<<(std::ostream& os, DataType::Type type) { + uint32_t uint_type = static_cast<uint32_t>(type); + if (uint_type <= static_cast<uint32_t>(DataType::Type::kLast)) { + os << kTypeNames[uint_type]; + } else { + os << "Type[" << uint_type << "]"; + } + return os; +} + +} // namespace art diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h new file mode 100644 index 0000000000..08f9263127 --- /dev/null +++ b/compiler/optimizing/data_type.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_DATA_TYPE_H_ +#define ART_COMPILER_OPTIMIZING_DATA_TYPE_H_ + +#include <iosfwd> + +#include "base/logging.h" +#include "base/bit_utils.h" + +namespace art { + +class DataType { + public: + enum class Type : uint8_t { + kReference = 0, + kBool, + kInt8, + kUint16, + kInt16, + kInt32, + kInt64, + kFloat32, + kFloat64, + kVoid, + kLast = kVoid + }; + + static constexpr Type FromShorty(char type); + static constexpr char TypeId(DataType::Type type); + + static constexpr size_t SizeShift(Type type) { + switch (type) { + case Type::kVoid: + case Type::kBool: + case Type::kInt8: + return 0; + case Type::kUint16: + case Type::kInt16: + return 1; + case Type::kInt32: + case Type::kFloat32: + return 2; + case Type::kInt64: + case Type::kFloat64: + return 3; + case Type::kReference: + return WhichPowerOf2(kObjectReferenceSize); + default: + LOG(FATAL) << "Invalid type " << static_cast<int>(type); + return 0; + } + } + + static constexpr size_t Size(Type type) { + switch (type) { + case Type::kVoid: + return 0; + case Type::kBool: + case Type::kInt8: + return 1; + case Type::kUint16: + case Type::kInt16: + return 2; + case Type::kInt32: + case Type::kFloat32: + return 4; + case Type::kInt64: + case Type::kFloat64: + return 8; + case Type::kReference: + return kObjectReferenceSize; + default: + LOG(FATAL) << "Invalid type " << static_cast<int>(type); + return 0; + } + } + + static bool IsFloatingPointType(Type type) { + return type == Type::kFloat32 || type == Type::kFloat64; + } + + static bool IsIntegralType(Type type) { + // The Java language does not allow treating boolean as an integral type but + // our bit representation makes it safe. + switch (type) { + case Type::kBool: + case Type::kInt8: + case Type::kUint16: + case Type::kInt16: + case Type::kInt32: + case Type::kInt64: + return true; + default: + return false; + } + } + + static bool IsIntOrLongType(Type type) { + return type == Type::kInt32 || type == Type::kInt64; + } + + static bool Is64BitType(Type type) { + return type == Type::kInt64 || type == Type::kFloat64; + } + + // Return the general kind of `type`, fusing integer-like types as Type::kInt. + static Type Kind(Type type) { + switch (type) { + case Type::kBool: + case Type::kInt8: + case Type::kInt16: + case Type::kUint16: + case Type::kInt32: + return Type::kInt32; + default: + return type; + } + } + + static int64_t MinValueOfIntegralType(Type type) { + switch (type) { + case Type::kBool: + return std::numeric_limits<bool>::min(); + case Type::kInt8: + return std::numeric_limits<int8_t>::min(); + case Type::kUint16: + return std::numeric_limits<uint16_t>::min(); + case Type::kInt16: + return std::numeric_limits<int16_t>::min(); + case Type::kInt32: + return std::numeric_limits<int32_t>::min(); + case Type::kInt64: + return std::numeric_limits<int64_t>::min(); + default: + LOG(FATAL) << "non integral type"; + } + return 0; + } + + static int64_t MaxValueOfIntegralType(Type type) { + switch (type) { + case Type::kBool: + return std::numeric_limits<bool>::max(); + case Type::kInt8: + return std::numeric_limits<int8_t>::max(); + case Type::kUint16: + return std::numeric_limits<uint16_t>::max(); + case Type::kInt16: + return std::numeric_limits<int16_t>::max(); + case Type::kInt32: + return std::numeric_limits<int32_t>::max(); + case Type::kInt64: + return std::numeric_limits<int64_t>::max(); + default: + LOG(FATAL) << "non integral type"; + } + return 0; + } + + static const char* PrettyDescriptor(Type type); + + private: + static constexpr size_t kObjectReferenceSize = 4u; +}; +std::ostream& operator<<(std::ostream& os, DataType::Type data_type); + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_DATA_TYPE_H_ diff --git a/compiler/optimizing/data_type_test.cc b/compiler/optimizing/data_type_test.cc new file mode 100644 index 0000000000..927291a54a --- /dev/null +++ b/compiler/optimizing/data_type_test.cc @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <gtest/gtest.h> + +#include "data_type-inl.h" + +#include "primitive.h" + +namespace art { + +template <DataType::Type data_type, Primitive::Type primitive_type> +static void CheckConversion() { + static_assert(data_type == DataTypeFromPrimitive(primitive_type), "Conversion check."); + static_assert(DataType::Size(data_type) == Primitive::ComponentSize(primitive_type), + "Size check."); +} + +TEST(DataType, SizeAgainstPrimitive) { + CheckConversion<DataType::Type::kVoid, Primitive::kPrimVoid>(); + CheckConversion<DataType::Type::kBool, Primitive::kPrimBoolean>(); + CheckConversion<DataType::Type::kInt8, Primitive::kPrimByte>(); + CheckConversion<DataType::Type::kUint16, Primitive::kPrimChar>(); + CheckConversion<DataType::Type::kInt16, Primitive::kPrimShort>(); + CheckConversion<DataType::Type::kInt32, Primitive::kPrimInt>(); + CheckConversion<DataType::Type::kInt64, Primitive::kPrimLong>(); + CheckConversion<DataType::Type::kFloat32, Primitive::kPrimFloat>(); + CheckConversion<DataType::Type::kFloat64, Primitive::kPrimDouble>(); + CheckConversion<DataType::Type::kReference, Primitive::kPrimNot>(); +} + +TEST(DataType, Names) { +#define CHECK_NAME(type) EXPECT_STREQ(#type, DataType::PrettyDescriptor(DataType::Type::k##type)) + CHECK_NAME(Void); + CHECK_NAME(Bool); + CHECK_NAME(Int8); + CHECK_NAME(Uint16); + CHECK_NAME(Int16); + CHECK_NAME(Int32); + CHECK_NAME(Int64); + CHECK_NAME(Float32); + CHECK_NAME(Float64); + CHECK_NAME(Reference); +#undef CHECK_NAME +} + +} // namespace art diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index 787296dc9d..9b094e989e 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -118,7 +118,7 @@ static bool HasEquality(IfCondition condition) { } static HConstant* Evaluate(HCondition* condition, HInstruction* left, HInstruction* right) { - if (left == right && !Primitive::IsFloatingPointType(left->GetType())) { + if (left == right && !DataType::IsFloatingPointType(left->GetType())) { return condition->GetBlock()->GetGraph()->GetIntConstant( HasEquality(condition->GetCondition()) ? 1 : 0); } diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc index fa3c4dfba8..0e9c81dae3 100644 --- a/compiler/optimizing/emit_swap_mips_test.cc +++ b/compiler/optimizing/emit_swap_mips_test.cc @@ -118,12 +118,12 @@ TEST_F(EmitSwapMipsTest, TwoRegisters) { moves_->AddMove( Location::RegisterLocation(4), Location::RegisterLocation(5), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves_->AddMove( Location::RegisterLocation(5), Location::RegisterLocation(4), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); const char* expected = "or $t8, $a1, $zero\n" @@ -136,12 +136,12 @@ TEST_F(EmitSwapMipsTest, TwoRegisterPairs) { moves_->AddMove( Location::RegisterPairLocation(4, 5), Location::RegisterPairLocation(6, 7), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves_->AddMove( Location::RegisterPairLocation(6, 7), Location::RegisterPairLocation(4, 5), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); const char* expected = "or $t8, $a2, $zero\n" @@ -157,12 +157,12 @@ TEST_F(EmitSwapMipsTest, TwoFpuRegistersFloat) { moves_->AddMove( Location::FpuRegisterLocation(4), Location::FpuRegisterLocation(2), - Primitive::kPrimFloat, + DataType::Type::kFloat32, nullptr); moves_->AddMove( Location::FpuRegisterLocation(2), Location::FpuRegisterLocation(4), - Primitive::kPrimFloat, + DataType::Type::kFloat32, nullptr); const char* expected = "mov.s $f6, $f2\n" @@ -175,12 +175,12 @@ TEST_F(EmitSwapMipsTest, TwoFpuRegistersDouble) { moves_->AddMove( Location::FpuRegisterLocation(4), Location::FpuRegisterLocation(2), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); moves_->AddMove( Location::FpuRegisterLocation(2), Location::FpuRegisterLocation(4), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); const char* expected = "mov.d $f6, $f2\n" @@ -193,12 +193,12 @@ TEST_F(EmitSwapMipsTest, RegisterAndFpuRegister) { moves_->AddMove( Location::RegisterLocation(4), Location::FpuRegisterLocation(2), - Primitive::kPrimFloat, + DataType::Type::kFloat32, nullptr); moves_->AddMove( Location::FpuRegisterLocation(2), Location::RegisterLocation(4), - Primitive::kPrimFloat, + DataType::Type::kFloat32, nullptr); const char* expected = "or $t8, $a0, $zero\n" @@ -211,12 +211,12 @@ TEST_F(EmitSwapMipsTest, RegisterPairAndFpuRegister) { moves_->AddMove( Location::RegisterPairLocation(4, 5), Location::FpuRegisterLocation(4), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); moves_->AddMove( Location::FpuRegisterLocation(4), Location::RegisterPairLocation(4, 5), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); const char* expected = "mfc1 $t8, $f4\n" @@ -232,12 +232,12 @@ TEST_F(EmitSwapMipsTest, TwoStackSlots) { moves_->AddMove( Location::StackSlot(52), Location::StackSlot(48), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves_->AddMove( Location::StackSlot(48), Location::StackSlot(52), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); const char* expected = "addiu $sp, $sp, -4\n" @@ -255,12 +255,12 @@ TEST_F(EmitSwapMipsTest, TwoDoubleStackSlots) { moves_->AddMove( Location::DoubleStackSlot(56), Location::DoubleStackSlot(48), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves_->AddMove( Location::DoubleStackSlot(48), Location::DoubleStackSlot(56), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); const char* expected = "addiu $sp, $sp, -4\n" @@ -282,12 +282,12 @@ TEST_F(EmitSwapMipsTest, RegisterAndStackSlot) { moves_->AddMove( Location::RegisterLocation(4), Location::StackSlot(48), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves_->AddMove( Location::StackSlot(48), Location::RegisterLocation(4), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); const char* expected = "or $t8, $a0, $zero\n" @@ -300,12 +300,12 @@ TEST_F(EmitSwapMipsTest, RegisterPairAndDoubleStackSlot) { moves_->AddMove( Location::RegisterPairLocation(4, 5), Location::DoubleStackSlot(32), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves_->AddMove( Location::DoubleStackSlot(32), Location::RegisterPairLocation(4, 5), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); const char* expected = "or $t8, $a0, $zero\n" @@ -321,12 +321,12 @@ TEST_F(EmitSwapMipsTest, FpuRegisterAndStackSlot) { moves_->AddMove( Location::FpuRegisterLocation(4), Location::StackSlot(48), - Primitive::kPrimFloat, + DataType::Type::kFloat32, nullptr); moves_->AddMove( Location::StackSlot(48), Location::FpuRegisterLocation(4), - Primitive::kPrimFloat, + DataType::Type::kFloat32, nullptr); const char* expected = "mov.s $f6, $f4\n" @@ -339,12 +339,12 @@ TEST_F(EmitSwapMipsTest, FpuRegisterAndDoubleStackSlot) { moves_->AddMove( Location::FpuRegisterLocation(4), Location::DoubleStackSlot(48), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); moves_->AddMove( Location::DoubleStackSlot(48), Location::FpuRegisterLocation(4), - Primitive::kPrimDouble, + DataType::Type::kFloat64, nullptr); const char* expected = "mov.d $f6, $f4\n" diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc index 327e11f7e7..1c7d1a0b69 100644 --- a/compiler/optimizing/graph_checker.cc +++ b/compiler/optimizing/graph_checker.cc @@ -456,7 +456,7 @@ void GraphChecker::VisitInstruction(HInstruction* instruction) { } // Ensure that reference type instructions have reference type info. - if (instruction->GetType() == Primitive::kPrimNot) { + if (instruction->GetType() == DataType::Type::kReference) { if (!instruction->GetReferenceTypeInfo().IsValid()) { AddError(StringPrintf("Reference type instruction %s:%d does not have " "valid reference type information.", @@ -674,7 +674,7 @@ void GraphChecker::HandleLoop(HBasicBlock* loop_header) { static bool IsSameSizeConstant(const HInstruction* insn1, const HInstruction* insn2) { return insn1->IsConstant() && insn2->IsConstant() - && Primitive::Is64BitType(insn1->GetType()) == Primitive::Is64BitType(insn2->GetType()); + && DataType::Is64BitType(insn1->GetType()) == DataType::Is64BitType(insn2->GetType()); } static bool IsConstantEquivalent(const HInstruction* insn1, @@ -721,20 +721,20 @@ void GraphChecker::VisitPhi(HPhi* phi) { // Ensure that the inputs have the same primitive kind as the phi. for (size_t i = 0; i < input_records.size(); ++i) { HInstruction* input = input_records[i].GetInstruction(); - if (Primitive::PrimitiveKind(input->GetType()) != Primitive::PrimitiveKind(phi->GetType())) { + if (DataType::Kind(input->GetType()) != DataType::Kind(phi->GetType())) { AddError(StringPrintf( "Input %d at index %zu of phi %d from block %d does not have the " "same kind as the phi: %s versus %s", input->GetId(), i, phi->GetId(), phi->GetBlock()->GetBlockId(), - Primitive::PrettyDescriptor(input->GetType()), - Primitive::PrettyDescriptor(phi->GetType()))); + DataType::PrettyDescriptor(input->GetType()), + DataType::PrettyDescriptor(phi->GetType()))); } } if (phi->GetType() != HPhi::ToPhiType(phi->GetType())) { AddError(StringPrintf("Phi %d in block %d does not have an expected phi type: %s", phi->GetId(), phi->GetBlock()->GetBlockId(), - Primitive::PrettyDescriptor(phi->GetType()))); + DataType::PrettyDescriptor(phi->GetType()))); } if (phi->IsCatchPhi()) { @@ -820,7 +820,7 @@ void GraphChecker::VisitPhi(HPhi* phi) { phi->GetId(), phi->GetRegNumber(), type_str.str().c_str())); - } else if (phi->GetType() == Primitive::kPrimNot) { + } else if (phi->GetType() == DataType::Type::kReference) { std::stringstream type_str; type_str << other_phi->GetType(); AddError(StringPrintf( @@ -859,7 +859,7 @@ void GraphChecker::HandleBooleanInput(HInstruction* instruction, size_t input_in static_cast<int>(input_index), value)); } - } else if (Primitive::PrimitiveKind(input->GetType()) != Primitive::kPrimInt) { + } else if (DataType::Kind(input->GetType()) != DataType::Type::kInt32) { // TODO: We need a data-flow analysis to determine if an input like Phi, // Select or a binary operation is actually Boolean. Allow for now. AddError(StringPrintf( @@ -867,7 +867,7 @@ void GraphChecker::HandleBooleanInput(HInstruction* instruction, size_t input_in instruction->DebugName(), instruction->GetId(), static_cast<int>(input_index), - Primitive::PrettyDescriptor(input->GetType()))); + DataType::PrettyDescriptor(input->GetType()))); } } @@ -904,27 +904,27 @@ void GraphChecker::VisitBooleanNot(HBooleanNot* instruction) { void GraphChecker::VisitCondition(HCondition* op) { VisitInstruction(op); - if (op->GetType() != Primitive::kPrimBoolean) { + if (op->GetType() != DataType::Type::kBool) { AddError(StringPrintf( "Condition %s %d has a non-Boolean result type: %s.", op->DebugName(), op->GetId(), - Primitive::PrettyDescriptor(op->GetType()))); + DataType::PrettyDescriptor(op->GetType()))); } HInstruction* lhs = op->InputAt(0); HInstruction* rhs = op->InputAt(1); - if (Primitive::PrimitiveKind(lhs->GetType()) != Primitive::PrimitiveKind(rhs->GetType())) { + if (DataType::Kind(lhs->GetType()) != DataType::Kind(rhs->GetType())) { AddError(StringPrintf( "Condition %s %d has inputs of different kinds: %s, and %s.", op->DebugName(), op->GetId(), - Primitive::PrettyDescriptor(lhs->GetType()), - Primitive::PrettyDescriptor(rhs->GetType()))); + DataType::PrettyDescriptor(lhs->GetType()), + DataType::PrettyDescriptor(rhs->GetType()))); } if (!op->IsEqual() && !op->IsNotEqual()) { - if ((lhs->GetType() == Primitive::kPrimNot)) { + if ((lhs->GetType() == DataType::Type::kReference)) { AddError(StringPrintf( "Condition %s %d uses an object as left-hand side input.", op->DebugName(), op->GetId())); - } else if (rhs->GetType() == Primitive::kPrimNot) { + } else if (rhs->GetType() == DataType::Type::kReference) { AddError(StringPrintf( "Condition %s %d uses an object as right-hand side input.", op->DebugName(), op->GetId())); @@ -934,72 +934,72 @@ void GraphChecker::VisitCondition(HCondition* op) { void GraphChecker::VisitNeg(HNeg* instruction) { VisitInstruction(instruction); - Primitive::Type input_type = instruction->InputAt(0)->GetType(); - Primitive::Type result_type = instruction->GetType(); - if (result_type != Primitive::PrimitiveKind(input_type)) { + DataType::Type input_type = instruction->InputAt(0)->GetType(); + DataType::Type result_type = instruction->GetType(); + if (result_type != DataType::Kind(input_type)) { AddError(StringPrintf("Binary operation %s %d has a result type different " "from its input kind: %s vs %s.", instruction->DebugName(), instruction->GetId(), - Primitive::PrettyDescriptor(result_type), - Primitive::PrettyDescriptor(input_type))); + DataType::PrettyDescriptor(result_type), + DataType::PrettyDescriptor(input_type))); } } void GraphChecker::VisitBinaryOperation(HBinaryOperation* op) { VisitInstruction(op); - Primitive::Type lhs_type = op->InputAt(0)->GetType(); - Primitive::Type rhs_type = op->InputAt(1)->GetType(); - Primitive::Type result_type = op->GetType(); + DataType::Type lhs_type = op->InputAt(0)->GetType(); + DataType::Type rhs_type = op->InputAt(1)->GetType(); + DataType::Type result_type = op->GetType(); // Type consistency between inputs. if (op->IsUShr() || op->IsShr() || op->IsShl() || op->IsRor()) { - if (Primitive::PrimitiveKind(rhs_type) != Primitive::kPrimInt) { + if (DataType::Kind(rhs_type) != DataType::Type::kInt32) { AddError(StringPrintf("Shift/rotate operation %s %d has a non-int kind second input: " "%s of type %s.", op->DebugName(), op->GetId(), op->InputAt(1)->DebugName(), - Primitive::PrettyDescriptor(rhs_type))); + DataType::PrettyDescriptor(rhs_type))); } } else { - if (Primitive::PrimitiveKind(lhs_type) != Primitive::PrimitiveKind(rhs_type)) { + if (DataType::Kind(lhs_type) != DataType::Kind(rhs_type)) { AddError(StringPrintf("Binary operation %s %d has inputs of different kinds: %s, and %s.", op->DebugName(), op->GetId(), - Primitive::PrettyDescriptor(lhs_type), - Primitive::PrettyDescriptor(rhs_type))); + DataType::PrettyDescriptor(lhs_type), + DataType::PrettyDescriptor(rhs_type))); } } // Type consistency between result and input(s). if (op->IsCompare()) { - if (result_type != Primitive::kPrimInt) { + if (result_type != DataType::Type::kInt32) { AddError(StringPrintf("Compare operation %d has a non-int result type: %s.", op->GetId(), - Primitive::PrettyDescriptor(result_type))); + DataType::PrettyDescriptor(result_type))); } } else if (op->IsUShr() || op->IsShr() || op->IsShl() || op->IsRor()) { // Only check the first input (value), as the second one (distance) // must invariably be of kind `int`. - if (result_type != Primitive::PrimitiveKind(lhs_type)) { + if (result_type != DataType::Kind(lhs_type)) { AddError(StringPrintf("Shift/rotate operation %s %d has a result type different " "from its left-hand side (value) input kind: %s vs %s.", op->DebugName(), op->GetId(), - Primitive::PrettyDescriptor(result_type), - Primitive::PrettyDescriptor(lhs_type))); + DataType::PrettyDescriptor(result_type), + DataType::PrettyDescriptor(lhs_type))); } } else { - if (Primitive::PrimitiveKind(result_type) != Primitive::PrimitiveKind(lhs_type)) { + if (DataType::Kind(result_type) != DataType::Kind(lhs_type)) { AddError(StringPrintf("Binary operation %s %d has a result kind different " "from its left-hand side input kind: %s vs %s.", op->DebugName(), op->GetId(), - Primitive::PrettyDescriptor(result_type), - Primitive::PrettyDescriptor(lhs_type))); + DataType::PrettyDescriptor(result_type), + DataType::PrettyDescriptor(lhs_type))); } - if (Primitive::PrimitiveKind(result_type) != Primitive::PrimitiveKind(rhs_type)) { + if (DataType::Kind(result_type) != DataType::Kind(rhs_type)) { AddError(StringPrintf("Binary operation %s %d has a result kind different " "from its right-hand side input kind: %s vs %s.", op->DebugName(), op->GetId(), - Primitive::PrettyDescriptor(result_type), - Primitive::PrettyDescriptor(rhs_type))); + DataType::PrettyDescriptor(result_type), + DataType::PrettyDescriptor(rhs_type))); } } } @@ -1028,16 +1028,16 @@ void GraphChecker::VisitBoundType(HBoundType* instruction) { void GraphChecker::VisitTypeConversion(HTypeConversion* instruction) { VisitInstruction(instruction); - Primitive::Type result_type = instruction->GetResultType(); - Primitive::Type input_type = instruction->GetInputType(); + DataType::Type result_type = instruction->GetResultType(); + DataType::Type input_type = instruction->GetInputType(); // Invariant: We should never generate a conversion to a Boolean value. - if (result_type == Primitive::kPrimBoolean) { + if (result_type == DataType::Type::kBool) { AddError(StringPrintf( "%s %d converts to a %s (from a %s).", instruction->DebugName(), instruction->GetId(), - Primitive::PrettyDescriptor(result_type), - Primitive::PrettyDescriptor(input_type))); + DataType::PrettyDescriptor(result_type), + DataType::PrettyDescriptor(input_type))); } } diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index 3035e4657d..194f063d48 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -24,6 +24,7 @@ #include "bounds_check_elimination.h" #include "builder.h" #include "code_generator.h" +#include "data_type-inl.h" #include "dead_code_elimination.h" #include "disassembler.h" #include "inliner.h" @@ -243,25 +244,6 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { } } - char GetTypeId(Primitive::Type type) { - // Note that Primitive::Descriptor would not work for us - // because it does not handle reference types (that is kPrimNot). - switch (type) { - case Primitive::kPrimBoolean: return 'z'; - case Primitive::kPrimByte: return 'b'; - case Primitive::kPrimChar: return 'c'; - case Primitive::kPrimShort: return 's'; - case Primitive::kPrimInt: return 'i'; - case Primitive::kPrimLong: return 'j'; - case Primitive::kPrimFloat: return 'f'; - case Primitive::kPrimDouble: return 'd'; - case Primitive::kPrimNot: return 'l'; - case Primitive::kPrimVoid: return 'v'; - } - LOG(FATAL) << "Unreachable"; - return 'v'; - } - void PrintPredecessors(HBasicBlock* block) { AddIndent(); output_ << "predecessors"; @@ -583,7 +565,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { if (!inputs.empty()) { StringList input_list; for (const HInstruction* input : inputs) { - input_list.NewEntryStream() << GetTypeId(input->GetType()) << input->GetId(); + input_list.NewEntryStream() << DataType::TypeId(input->GetType()) << input->GetId(); } StartAttributeStream() << input_list; } @@ -597,7 +579,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { for (size_t i = 0, e = environment->Size(); i < e; ++i) { HInstruction* insn = environment->GetInstructionAt(i); if (insn != nullptr) { - vregs.NewEntryStream() << GetTypeId(insn->GetType()) << insn->GetId(); + vregs.NewEntryStream() << DataType::TypeId(insn->GetType()) << insn->GetId(); } else { vregs.NewEntryStream() << "_"; } @@ -654,7 +636,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { if ((IsPass(HGraphBuilder::kBuilderPassName) || IsPass(HInliner::kInlinerPassName)) - && (instruction->GetType() == Primitive::kPrimNot)) { + && (instruction->GetType() == DataType::Type::kReference)) { ReferenceTypeInfo info = instruction->IsLoadClass() ? instruction->AsLoadClass()->GetLoadedClassRTI() : instruction->GetReferenceTypeInfo(); @@ -698,7 +680,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { size_t num_uses = instruction->GetUses().SizeSlow(); AddIndent(); output_ << bci << " " << num_uses << " " - << GetTypeId(instruction->GetType()) << instruction->GetId() << " "; + << DataType::TypeId(instruction->GetType()) << instruction->GetId() << " "; PrintInstruction(instruction); output_ << " " << kEndInstructionMarker << "\n"; } @@ -821,7 +803,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { AddIndent(); HInstruction* instruction = it.Current(); - output_ << instruction->GetId() << " " << GetTypeId(instruction->GetType()) + output_ << instruction->GetId() << " " << DataType::TypeId(instruction->GetType()) << instruction->GetId() << "[ "; for (const HInstruction* input : instruction->GetInputs()) { output_ << input->GetId() << " "; diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc index e1ed7f656e..ac0dbee2c5 100644 --- a/compiler/optimizing/gvn_test.cc +++ b/compiler/optimizing/gvn_test.cc @@ -37,7 +37,7 @@ TEST_F(GVNTest, LocalFieldElimination) { HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimNot); + DataType::Type::kReference); entry->AddInstruction(parameter); HBasicBlock* block = new (&allocator) HBasicBlock(graph); @@ -46,7 +46,7 @@ TEST_F(GVNTest, LocalFieldElimination) { block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimNot, + DataType::Type::kReference, MemberOffset(42), false, kUnknownFieldIndex, @@ -55,7 +55,7 @@ TEST_F(GVNTest, LocalFieldElimination) { 0)); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimNot, + DataType::Type::kReference, MemberOffset(42), false, kUnknownFieldIndex, @@ -65,7 +65,7 @@ TEST_F(GVNTest, LocalFieldElimination) { HInstruction* to_remove = block->GetLastInstruction(); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimNot, + DataType::Type::kReference, MemberOffset(43), false, kUnknownFieldIndex, @@ -77,7 +77,7 @@ TEST_F(GVNTest, LocalFieldElimination) { block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, nullptr, - Primitive::kPrimNot, + DataType::Type::kReference, MemberOffset(42), false, kUnknownFieldIndex, @@ -86,7 +86,7 @@ TEST_F(GVNTest, LocalFieldElimination) { 0)); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimNot, + DataType::Type::kReference, MemberOffset(42), false, kUnknownFieldIndex, @@ -121,7 +121,7 @@ TEST_F(GVNTest, GlobalFieldElimination) { HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimNot); + DataType::Type::kReference); entry->AddInstruction(parameter); HBasicBlock* block = new (&allocator) HBasicBlock(graph); @@ -129,7 +129,7 @@ TEST_F(GVNTest, GlobalFieldElimination) { entry->AddSuccessor(block); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(42), false, kUnknownFieldIndex, @@ -152,7 +152,7 @@ TEST_F(GVNTest, GlobalFieldElimination) { then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(42), false, kUnknownFieldIndex, @@ -162,7 +162,7 @@ TEST_F(GVNTest, GlobalFieldElimination) { then->AddInstruction(new (&allocator) HGoto()); else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(42), false, kUnknownFieldIndex, @@ -172,7 +172,7 @@ TEST_F(GVNTest, GlobalFieldElimination) { else_->AddInstruction(new (&allocator) HGoto()); join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(42), false, kUnknownFieldIndex, @@ -204,7 +204,7 @@ TEST_F(GVNTest, LoopFieldElimination) { HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimNot); + DataType::Type::kReference); entry->AddInstruction(parameter); HBasicBlock* block = new (&allocator) HBasicBlock(graph); @@ -212,7 +212,7 @@ TEST_F(GVNTest, LoopFieldElimination) { entry->AddSuccessor(block); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(42), false, kUnknownFieldIndex, @@ -235,7 +235,7 @@ TEST_F(GVNTest, LoopFieldElimination) { loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(42), false, kUnknownFieldIndex, @@ -250,7 +250,7 @@ TEST_F(GVNTest, LoopFieldElimination) { loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(42), false, kUnknownFieldIndex, @@ -260,7 +260,7 @@ TEST_F(GVNTest, LoopFieldElimination) { HInstruction* field_set = loop_body->GetLastInstruction(); loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(42), false, kUnknownFieldIndex, @@ -272,7 +272,7 @@ TEST_F(GVNTest, LoopFieldElimination) { exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(42), false, kUnknownFieldIndex, @@ -351,7 +351,7 @@ TEST_F(GVNTest, LoopSideEffects) { HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimBoolean); + DataType::Type::kBool); entry->AddInstruction(parameter); entry->AddInstruction(new (&allocator) HGoto()); outer_loop_header->AddInstruction(new (&allocator) HSuspendCheck()); @@ -374,7 +374,7 @@ TEST_F(GVNTest, LoopSideEffects) { entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, nullptr, - Primitive::kPrimNot, + DataType::Type::kReference, MemberOffset(42), false, kUnknownFieldIndex, @@ -399,7 +399,7 @@ TEST_F(GVNTest, LoopSideEffects) { new (&allocator) HInstanceFieldSet(parameter, parameter, nullptr, - Primitive::kPrimNot, + DataType::Type::kReference, MemberOffset(42), false, kUnknownFieldIndex, @@ -425,7 +425,7 @@ TEST_F(GVNTest, LoopSideEffects) { new (&allocator) HInstanceFieldSet(parameter, parameter, nullptr, - Primitive::kPrimNot, + DataType::Type::kReference, MemberOffset(42), false, kUnknownFieldIndex, diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc index 84b20f65e3..fe286ab88a 100644 --- a/compiler/optimizing/induction_var_analysis.cc +++ b/compiler/optimizing/induction_var_analysis.cc @@ -56,17 +56,17 @@ static void RotateEntryPhiFirst(HLoopInformation* loop, /** * Returns true if the from/to types denote a narrowing, integral conversion (precision loss). */ -static bool IsNarrowingIntegralConversion(Primitive::Type from, Primitive::Type to) { +static bool IsNarrowingIntegralConversion(DataType::Type from, DataType::Type to) { switch (from) { - case Primitive::kPrimLong: - return to == Primitive::kPrimByte || to == Primitive::kPrimShort - || to == Primitive::kPrimChar || to == Primitive::kPrimInt; - case Primitive::kPrimInt: - return to == Primitive::kPrimByte || to == Primitive::kPrimShort - || to == Primitive::kPrimChar; - case Primitive::kPrimChar: - case Primitive::kPrimShort: - return to == Primitive::kPrimByte; + case DataType::Type::kInt64: + return to == DataType::Type::kInt8 || to == DataType::Type::kInt16 + || to == DataType::Type::kUint16 || to == DataType::Type::kInt32; + case DataType::Type::kInt32: + return to == DataType::Type::kInt8 || to == DataType::Type::kInt16 + || to == DataType::Type::kUint16; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + return to == DataType::Type::kInt8; default: return false; } @@ -75,13 +75,13 @@ static bool IsNarrowingIntegralConversion(Primitive::Type from, Primitive::Type /** * Returns result of implicit widening type conversion done in HIR. */ -static Primitive::Type ImplicitConversion(Primitive::Type type) { +static DataType::Type ImplicitConversion(DataType::Type type) { switch (type) { - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimByte: - case Primitive::kPrimBoolean: - return Primitive::kPrimInt; + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt8: + case DataType::Type::kBool: + return DataType::Type::kInt32; default: return type; } @@ -100,7 +100,7 @@ HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph) scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)), cycle_(std::less<HInstruction*>(), graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)), - type_(Primitive::kPrimVoid), + type_(DataType::Type::kVoid), induction_(std::less<HLoopInformation*>(), graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)), cycles_(std::less<HPhi*>(), @@ -520,8 +520,8 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferMul(Inducti HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferConversion( InductionInfo* a, - Primitive::Type from, - Primitive::Type to) { + DataType::Type from, + DataType::Type to) { if (a != nullptr) { // Allow narrowing conversion on linear induction in certain cases: // induction is already at narrow type, or can be made narrower. @@ -723,15 +723,15 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveConversion( HLoopInformation* loop, HInstruction* entry_phi, HTypeConversion* conversion) { - Primitive::Type from = conversion->GetInputType(); - Primitive::Type to = conversion->GetResultType(); + DataType::Type from = conversion->GetInputType(); + DataType::Type to = conversion->GetResultType(); // A narrowing conversion is allowed as *last* operation of the cycle of a linear induction // with an initial value that fits the type, provided that the narrowest encountered type is // recorded with the induction to account for the precision loss. The narrower induction does // *not* transfer to any wider operations, however, since these may yield out-of-type values if (entry_phi->InputCount() == 2 && conversion == entry_phi->InputAt(1)) { - int64_t min = Primitive::MinValueOfIntegralType(to); - int64_t max = Primitive::MaxValueOfIntegralType(to); + int64_t min = DataType::MinValueOfIntegralType(to); + int64_t max = DataType::MaxValueOfIntegralType(to); int64_t value = 0; InductionInfo* initial = LookupInfo(loop, entry_phi->InputAt(0)); if (IsNarrowingIntegralConversion(from, to) && @@ -761,7 +761,7 @@ void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) { HCondition* condition = if_expr->AsCondition(); InductionInfo* a = LookupInfo(loop, condition->InputAt(0)); InductionInfo* b = LookupInfo(loop, condition->InputAt(1)); - Primitive::Type type = ImplicitConversion(condition->InputAt(0)->GetType()); + DataType::Type type = ImplicitConversion(condition->InputAt(0)->GetType()); // Determine if the loop control uses a known sequence on an if-exit (X outside) or on // an if-iterate (X inside), expressed as if-iterate when passed into VisitCondition(). if (a == nullptr || b == nullptr) { @@ -778,7 +778,7 @@ void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) { void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop, InductionInfo* a, InductionInfo* b, - Primitive::Type type, + DataType::Type type, IfCondition cmp) { if (a->induction_class == kInvariant && b->induction_class == kLinear) { // Swap condition if induction is at right-hand-side (e.g. U > i is same as i < U). @@ -809,7 +809,7 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop, } // Only accept integral condition. A mismatch between the type of condition and the induction // is only allowed if the, necessarily narrower, induction range fits the narrower control. - if (type != Primitive::kPrimInt && type != Primitive::kPrimLong) { + if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) { return; // not integral } else if (type != a->type && !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) { @@ -830,7 +830,7 @@ void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop, InductionInfo* upper_expr, InductionInfo* stride_expr, int64_t stride_value, - Primitive::Type type, + DataType::Type type, IfCondition cmp) { // Any loop of the general form: // @@ -931,10 +931,10 @@ bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr, bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr, int64_t stride_value, - Primitive::Type type, + DataType::Type type, IfCondition cmp) { - int64_t min = Primitive::MinValueOfIntegralType(type); - int64_t max = Primitive::MaxValueOfIntegralType(type); + int64_t min = DataType::MinValueOfIntegralType(type); + int64_t max = DataType::MaxValueOfIntegralType(type); // Some rules under which it is certain at compile-time that the loop is finite. int64_t value; switch (cmp) { @@ -957,10 +957,10 @@ bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr, bool HInductionVarAnalysis::FitsNarrowerControl(InductionInfo* lower_expr, InductionInfo* upper_expr, int64_t stride_value, - Primitive::Type type, + DataType::Type type, IfCondition cmp) { - int64_t min = Primitive::MinValueOfIntegralType(type); - int64_t max = Primitive::MaxValueOfIntegralType(type); + int64_t min = DataType::MinValueOfIntegralType(type); + int64_t max = DataType::MaxValueOfIntegralType(type); // Inclusive test need one extra. if (stride_value != 1 && stride_value != -1) { return false; // non-unit stride @@ -1008,13 +1008,13 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::LookupInfo(HLoopInf } HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateConstant(int64_t value, - Primitive::Type type) { + DataType::Type type) { HInstruction* constant; switch (type) { - case Primitive::kPrimDouble: constant = graph_->GetDoubleConstant(value); break; - case Primitive::kPrimFloat: constant = graph_->GetFloatConstant(value); break; - case Primitive::kPrimLong: constant = graph_->GetLongConstant(value); break; - default: constant = graph_->GetIntConstant(value); break; + case DataType::Type::kFloat64: constant = graph_->GetDoubleConstant(value); break; + case DataType::Type::kFloat32: constant = graph_->GetFloatConstant(value); break; + case DataType::Type::kInt64: constant = graph_->GetLongConstant(value); break; + default: constant = graph_->GetIntConstant(value); break; } return CreateInvariantFetch(constant); } @@ -1100,11 +1100,11 @@ HInstruction* HInductionVarAnalysis::GetShiftConstant(HLoopInformation* loop, InductionInfo* b = LookupInfo(loop, instruction->InputAt(1)); int64_t value = -1; if (IsExact(b, &value)) { - Primitive::Type type = instruction->InputAt(0)->GetType(); - if (type == Primitive::kPrimInt && 0 <= value && value < 31) { + DataType::Type type = instruction->InputAt(0)->GetType(); + if (type == DataType::Type::kInt32 && 0 <= value && value < 31) { return graph_->GetIntConstant(1 << value); } - if (type == Primitive::kPrimLong && 0 <= value && value < 63) { + if (type == DataType::Type::kInt64 && 0 <= value && value < 63) { return graph_->GetLongConstant(1L << value); } } @@ -1142,11 +1142,11 @@ bool HInductionVarAnalysis::IsAtLeast(InductionInfo* info, int64_t* value) { bool HInductionVarAnalysis::IsNarrowingLinear(InductionInfo* info) { return info != nullptr && info->induction_class == kLinear && - (info->type == Primitive::kPrimByte || - info->type == Primitive::kPrimShort || - info->type == Primitive::kPrimChar || - (info->type == Primitive::kPrimInt && (info->op_a->type == Primitive::kPrimLong || - info->op_b->type == Primitive::kPrimLong))); + (info->type == DataType::Type::kInt8 || + info->type == DataType::Type::kInt16 || + info->type == DataType::Type::kUint16 || + (info->type == DataType::Type::kInt32 && (info->op_a->type == DataType::Type::kInt64 || + info->op_b->type == DataType::Type::kInt64))); } bool HInductionVarAnalysis::InductionEqual(InductionInfo* info1, @@ -1207,12 +1207,12 @@ std::string HInductionVarAnalysis::InductionToString(InductionInfo* info) { DCHECK(info->operation == kNop); return "(" + InductionToString(info->op_a) + " * i + " + InductionToString(info->op_b) + "):" + - Primitive::PrettyDescriptor(info->type); + DataType::PrettyDescriptor(info->type); } else if (info->induction_class == kPolynomial) { DCHECK(info->operation == kNop); return "poly(sum_lt(" + InductionToString(info->op_a) + ") + " + InductionToString(info->op_b) + "):" + - Primitive::PrettyDescriptor(info->type); + DataType::PrettyDescriptor(info->type); } else if (info->induction_class == kGeometric) { DCHECK(info->operation == kMul || info->operation == kDiv); DCHECK(info->fetch != nullptr); @@ -1220,17 +1220,17 @@ std::string HInductionVarAnalysis::InductionToString(InductionInfo* info) { FetchToString(info->fetch) + (info->operation == kMul ? " ^ i + " : " ^ -i + ") + InductionToString(info->op_b) + "):" + - Primitive::PrettyDescriptor(info->type); + DataType::PrettyDescriptor(info->type); } else if (info->induction_class == kWrapAround) { DCHECK(info->operation == kNop); return "wrap(" + InductionToString(info->op_a) + ", " + InductionToString(info->op_b) + "):" + - Primitive::PrettyDescriptor(info->type); + DataType::PrettyDescriptor(info->type); } else if (info->induction_class == kPeriodic) { DCHECK(info->operation == kNop); return "periodic(" + InductionToString(info->op_a) + ", " + InductionToString(info->op_b) + "):" + - Primitive::PrettyDescriptor(info->type); + DataType::PrettyDescriptor(info->type); } } } diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h index 39b39cdf55..421b3ab9d0 100644 --- a/compiler/optimizing/induction_var_analysis.h +++ b/compiler/optimizing/induction_var_analysis.h @@ -103,7 +103,7 @@ class HInductionVarAnalysis : public HOptimization { InductionInfo* a, InductionInfo* b, HInstruction* f, - Primitive::Type t) + DataType::Type t) : induction_class(ic), operation(op), op_a(a), @@ -115,7 +115,7 @@ class HInductionVarAnalysis : public HOptimization { InductionInfo* op_a; InductionInfo* op_b; HInstruction* fetch; - Primitive::Type type; // precision of operation + DataType::Type type; // precision of operation }; bool IsVisitedNode(HInstruction* instruction) const { @@ -136,7 +136,7 @@ class HInductionVarAnalysis : public HOptimization { InductionInfo* CreateTripCount(InductionOp op, InductionInfo* a, InductionInfo* b, - Primitive::Type type) { + DataType::Type type) { DCHECK(a != nullptr && b != nullptr); return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, type); } @@ -146,7 +146,7 @@ class HInductionVarAnalysis : public HOptimization { InductionInfo* a, InductionInfo* b, HInstruction* f, - Primitive::Type type) { + DataType::Type type) { DCHECK(a != nullptr && b != nullptr); return new (graph_->GetArena()) InductionInfo(ic, op, a, b, f, type); } @@ -167,7 +167,7 @@ class HInductionVarAnalysis : public HOptimization { InductionInfo* TransferAddSub(InductionInfo* a, InductionInfo* b, InductionOp op); InductionInfo* TransferNeg(InductionInfo* a); InductionInfo* TransferMul(InductionInfo* a, InductionInfo* b); - InductionInfo* TransferConversion(InductionInfo* a, Primitive::Type from, Primitive::Type to); + InductionInfo* TransferConversion(InductionInfo* a, DataType::Type from, DataType::Type to); // Solvers. InductionInfo* SolvePhi(HInstruction* phi, size_t input_index, size_t adjust_input_size); @@ -200,30 +200,30 @@ class HInductionVarAnalysis : public HOptimization { void VisitCondition(HLoopInformation* loop, InductionInfo* a, InductionInfo* b, - Primitive::Type type, + DataType::Type type, IfCondition cmp); void VisitTripCount(HLoopInformation* loop, InductionInfo* lower_expr, InductionInfo* upper_expr, InductionInfo* stride, int64_t stride_value, - Primitive::Type type, + DataType::Type type, IfCondition cmp); bool IsTaken(InductionInfo* lower_expr, InductionInfo* upper_expr, IfCondition cmp); bool IsFinite(InductionInfo* upper_expr, int64_t stride_value, - Primitive::Type type, + DataType::Type type, IfCondition cmp); bool FitsNarrowerControl(InductionInfo* lower_expr, InductionInfo* upper_expr, int64_t stride_value, - Primitive::Type type, + DataType::Type type, IfCondition cmp); // Assign and lookup. void AssignInfo(HLoopInformation* loop, HInstruction* instruction, InductionInfo* info); InductionInfo* LookupInfo(HLoopInformation* loop, HInstruction* instruction); - InductionInfo* CreateConstant(int64_t value, Primitive::Type type); + InductionInfo* CreateConstant(int64_t value, DataType::Type type); InductionInfo* CreateSimplifiedInvariant(InductionOp op, InductionInfo* a, InductionInfo* b); HInstruction* GetShiftConstant(HLoopInformation* loop, HInstruction* instruction, @@ -250,7 +250,7 @@ class HInductionVarAnalysis : public HOptimization { ArenaSafeMap<HInstruction*, NodeInfo> map_; ArenaVector<HInstruction*> scc_; ArenaSafeMap<HInstruction*, InductionInfo*> cycle_; - Primitive::Type type_; + DataType::Type type_; /** * Maintains the results of the analysis as a mapping from loops to a mapping from instructions diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc index 9516ccb385..53c8044a0b 100644 --- a/compiler/optimizing/induction_var_analysis_test.cc +++ b/compiler/optimizing/induction_var_analysis_test.cc @@ -94,7 +94,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { // Provide entry and exit instructions. parameter_ = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot, true); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference, true); entry_->AddInstruction(parameter_); constant0_ = graph_->GetIntConstant(0); constant1_ = graph_->GetIntConstant(1); @@ -108,13 +108,13 @@ class InductionVarAnalysisTest : public CommonCompilerTest { // Provide loop instructions. for (int d = 0; d < n; d++) { - basic_[d] = new (&allocator_) HPhi(&allocator_, d, 0, Primitive::kPrimInt); + basic_[d] = new (&allocator_) HPhi(&allocator_, d, 0, DataType::Type::kInt32); loop_preheader_[d]->AddInstruction(new (&allocator_) HGoto()); loop_header_[d]->AddPhi(basic_[d]); HInstruction* compare = new (&allocator_) HLessThan(basic_[d], constant100_); loop_header_[d]->AddInstruction(compare); loop_header_[d]->AddInstruction(new (&allocator_) HIf(compare)); - increment_[d] = new (&allocator_) HAdd(Primitive::kPrimInt, basic_[d], constant1_); + increment_[d] = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[d], constant1_); loop_body_[d]->AddInstruction(increment_[d]); loop_body_[d]->AddInstruction(new (&allocator_) HGoto()); @@ -141,7 +141,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { *ifT = ifTrue; *ifF = ifFalse; - HPhi* select_phi = new (&allocator_) HPhi(&allocator_, -1, 0, Primitive::kPrimInt); + HPhi* select_phi = new (&allocator_) HPhi(&allocator_, -1, 0, DataType::Type::kInt32); loop_body_[d]->AddPhi(select_phi); return select_phi; } @@ -154,7 +154,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { // Inserts a phi to loop header at depth d and returns it. HPhi* InsertLoopPhi(int vreg, int d) { - HPhi* phi = new (&allocator_) HPhi(&allocator_, vreg, 0, Primitive::kPrimInt); + HPhi* phi = new (&allocator_) HPhi(&allocator_, vreg, 0, DataType::Type::kInt32); loop_header_[d]->AddPhi(phi); return phi; } @@ -165,7 +165,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { // ArraySet is given a float value in order to avoid SsaBuilder typing // it from the array's non-existent reference type info. return InsertInstruction(new (&allocator_) HArraySet( - parameter_, subscript, float_constant0_, Primitive::kPrimFloat, 0), d); + parameter_, subscript, float_constant0_, DataType::Type::kFloat32, 0), d); } // Returns induction information of instruction in loop at depth d. @@ -265,8 +265,8 @@ TEST_F(InductionVarAnalysisTest, FindBasicInduction) { HInstruction* store = InsertArrayStore(basic_[0], 0); PerformInductionVarAnalysis(); - EXPECT_STREQ("((1) * i + (0)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str()); - EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(increment_[0], 0).c_str()); + EXPECT_STREQ("((1) * i + (0)):Int32", GetInductionInfo(store->InputAt(1), 0).c_str()); + EXPECT_STREQ("((1) * i + (1)):Int32", GetInductionInfo(increment_[0], 0).c_str()); // Offset matters! EXPECT_FALSE(HaveSameInduction(store->InputAt(1), increment_[0])); @@ -286,22 +286,22 @@ TEST_F(InductionVarAnalysisTest, FindDerivedInduction) { // } BuildLoopNest(1); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, constant100_, basic_[0]), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, basic_[0]), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0]), 0); + new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(Primitive::kPrimInt, constant100_, basic_[0]), 0); + new (&allocator_) HMul(DataType::Type::kInt32, constant100_, basic_[0]), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(Primitive::kPrimInt, basic_[0], constant1_), 0); + new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(Primitive::kPrimInt, basic_[0]), 0); + new (&allocator_) HNeg(DataType::Type::kInt32, basic_[0]), 0); PerformInductionVarAnalysis(); - EXPECT_STREQ("((1) * i + (100)):PrimInt", GetInductionInfo(add, 0).c_str()); - EXPECT_STREQ("(( - (1)) * i + (100)):PrimInt", GetInductionInfo(sub, 0).c_str()); - EXPECT_STREQ("((100) * i + (0)):PrimInt", GetInductionInfo(mul, 0).c_str()); - EXPECT_STREQ("((2) * i + (0)):PrimInt", GetInductionInfo(shl, 0).c_str()); - EXPECT_STREQ("(( - (1)) * i + (0)):PrimInt", GetInductionInfo(neg, 0).c_str()); + EXPECT_STREQ("((1) * i + (100)):Int32", GetInductionInfo(add, 0).c_str()); + EXPECT_STREQ("(( - (1)) * i + (100)):Int32", GetInductionInfo(sub, 0).c_str()); + EXPECT_STREQ("((100) * i + (0)):Int32", GetInductionInfo(mul, 0).c_str()); + EXPECT_STREQ("((2) * i + (0)):Int32", GetInductionInfo(shl, 0).c_str()); + EXPECT_STREQ("(( - (1)) * i + (0)):Int32", GetInductionInfo(neg, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindChainInduction) { @@ -318,19 +318,19 @@ TEST_F(InductionVarAnalysisTest, FindChainInduction) { k_header->AddInput(constant0_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant100_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* store1 = InsertArrayStore(add, 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, add, constant1_), 0); + new (&allocator_) HSub(DataType::Type::kInt32, add, constant1_), 0); HInstruction* store2 = InsertArrayStore(sub, 0); k_header->AddInput(sub); PerformInductionVarAnalysis(); - EXPECT_STREQ("(((100) - (1)) * i + (0)):PrimInt", + EXPECT_STREQ("(((100) - (1)) * i + (0)):Int32", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("(((100) - (1)) * i + (100)):PrimInt", + EXPECT_STREQ("(((100) - (1)) * i + (100)):Int32", GetInductionInfo(store1->InputAt(1), 0).c_str()); - EXPECT_STREQ("(((100) - (1)) * i + ((100) - (1))):PrimInt", + EXPECT_STREQ("(((100) - (1)) * i + ((100) - (1))):Int32", GetInductionInfo(store2->InputAt(1), 0).c_str()); } @@ -351,11 +351,11 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayBasicInduction) { HPhi* k_body = BuildIf(0, &ifTrue, &ifFalse); // True-branch. - HInstruction* inc1 = new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant1_); + HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_); ifTrue->AddInstruction(inc1); k_body->AddInput(inc1); // False-branch. - HInstruction* inc2 = new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant1_); + HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_); ifFalse->AddInstruction(inc2); k_body->AddInput(inc2); // Merge over a phi. @@ -363,8 +363,8 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayBasicInduction) { k_header->AddInput(k_body); PerformInductionVarAnalysis(); - EXPECT_STREQ("((1) * i + (0)):PrimInt", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str()); + EXPECT_STREQ("((1) * i + (0)):Int32", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("((1) * i + (1)):Int32", GetInductionInfo(store->InputAt(1), 0).c_str()); // Both increments get same induction. EXPECT_TRUE(HaveSameInduction(store->InputAt(1), inc1)); @@ -384,18 +384,18 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayDerivedInduction) { HPhi* k = BuildIf(0, &ifTrue, &ifFalse); // True-branch. - HInstruction* inc1 = new (&allocator_) HAdd(Primitive::kPrimInt, basic_[0], constant1_); + HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_); ifTrue->AddInstruction(inc1); k->AddInput(inc1); // False-branch. - HInstruction* inc2 = new (&allocator_) HAdd(Primitive::kPrimInt, basic_[0], constant1_); + HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_); ifFalse->AddInstruction(inc2); k->AddInput(inc2); // Merge over a phi. HInstruction* store = InsertArrayStore(k, 0); PerformInductionVarAnalysis(); - EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str()); + EXPECT_STREQ("((1) * i + (1)):Int32", GetInductionInfo(store->InputAt(1), 0).c_str()); // Both increments get same induction. EXPECT_TRUE(HaveSameInduction(store->InputAt(1), inc1)); @@ -412,17 +412,17 @@ TEST_F(InductionVarAnalysisTest, AddLinear) { BuildLoopNest(1); HInstruction* add1 = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, basic_[0], basic_[0]), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], basic_[0]), 0); HInstruction* add2 = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, constant7_, basic_[0]), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, constant7_, basic_[0]), 0); HInstruction* add3 = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, add1, add2), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, add1, add2), 0); PerformInductionVarAnalysis(); - EXPECT_STREQ("((1) * i + (0)):PrimInt", GetInductionInfo(basic_[0], 0).c_str()); - EXPECT_STREQ("(((1) + (1)) * i + (0)):PrimInt", GetInductionInfo(add1, 0).c_str()); - EXPECT_STREQ("((1) * i + (7)):PrimInt", GetInductionInfo(add2, 0).c_str()); - EXPECT_STREQ("((((1) + (1)) + (1)) * i + (7)):PrimInt", GetInductionInfo(add3, 0).c_str()); + EXPECT_STREQ("((1) * i + (0)):Int32", GetInductionInfo(basic_[0], 0).c_str()); + EXPECT_STREQ("(((1) + (1)) * i + (0)):Int32", GetInductionInfo(add1, 0).c_str()); + EXPECT_STREQ("((1) * i + (7)):Int32", GetInductionInfo(add2, 0).c_str()); + EXPECT_STREQ("((((1) + (1)) + (1)) * i + (7)):Int32", GetInductionInfo(add3, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindPolynomialInduction) { @@ -438,18 +438,18 @@ TEST_F(InductionVarAnalysisTest, FindPolynomialInduction) { k_header->AddInput(constant1_); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(Primitive::kPrimInt, basic_[0], constant2_), 0); + new (&allocator_) HMul(DataType::Type::kInt32, basic_[0], constant2_), 0); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, constant100_, mul), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, mul), 0); HInstruction* pol = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, add, k_header), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, add, k_header), 0); k_header->AddInput(pol); PerformInductionVarAnalysis(); // Note, only the phi in the cycle and the base linear induction are classified. - EXPECT_STREQ("poly(sum_lt(((2) * i + (100)):PrimInt) + (1)):PrimInt", + EXPECT_STREQ("poly(sum_lt(((2) * i + (100)):Int32) + (1)):Int32", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("((2) * i + (100)):PrimInt", GetInductionInfo(add, 0).c_str()); + EXPECT_STREQ("((2) * i + (100)):Int32", GetInductionInfo(add, 0).c_str()); EXPECT_STREQ("", GetInductionInfo(pol, 0).c_str()); } @@ -469,32 +469,32 @@ TEST_F(InductionVarAnalysisTest, FindPolynomialInductionAndDerived) { k_header->AddInput(constant1_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant100_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(Primitive::kPrimInt, sub), 0); + new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(Primitive::kPrimInt, k_header, constant2_), 0); + new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(Primitive::kPrimInt, k_header, constant2_), 0); + new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* pol = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, basic_[0]), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0); k_header->AddInput(pol); PerformInductionVarAnalysis(); // Note, only the phi in the cycle and derived are classified. - EXPECT_STREQ("poly(sum_lt(((1) * i + (0)):PrimInt) + (1)):PrimInt", + EXPECT_STREQ("poly(sum_lt(((1) * i + (0)):Int32) + (1)):Int32", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("poly(sum_lt(((1) * i + (0)):PrimInt) + ((1) + (100))):PrimInt", + EXPECT_STREQ("poly(sum_lt(((1) * i + (0)):Int32) + ((1) + (100))):Int32", GetInductionInfo(add, 0).c_str()); - EXPECT_STREQ("poly(sum_lt(((1) * i + (0)):PrimInt) + ((1) - (1))):PrimInt", + EXPECT_STREQ("poly(sum_lt(((1) * i + (0)):Int32) + ((1) - (1))):Int32", GetInductionInfo(sub, 0).c_str()); - EXPECT_STREQ("poly(sum_lt((( - (1)) * i + (0)):PrimInt) + ((1) - (1))):PrimInt", + EXPECT_STREQ("poly(sum_lt((( - (1)) * i + (0)):Int32) + ((1) - (1))):Int32", GetInductionInfo(neg, 0).c_str()); - EXPECT_STREQ("poly(sum_lt(((2) * i + (0)):PrimInt) + (2)):PrimInt", + EXPECT_STREQ("poly(sum_lt(((2) * i + (0)):Int32) + (2)):Int32", GetInductionInfo(mul, 0).c_str()); - EXPECT_STREQ("poly(sum_lt(((4) * i + (0)):PrimInt) + (4)):PrimInt", + EXPECT_STREQ("poly(sum_lt(((4) * i + (0)):Int32) + (4)):Int32", GetInductionInfo(shl, 0).c_str()); EXPECT_STREQ("", GetInductionInfo(pol, 0).c_str()); } @@ -512,21 +512,21 @@ TEST_F(InductionVarAnalysisTest, AddPolynomial) { k_header->AddInput(constant7_); HInstruction* add1 = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, k_header), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, k_header), 0); HInstruction* add2 = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, add1, k_header), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, add1, k_header), 0); HInstruction* add3 = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, basic_[0]), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0); k_header->AddInput(add3); PerformInductionVarAnalysis(); // Note, only the phi in the cycle and added-derived are classified. - EXPECT_STREQ("poly(sum_lt(((1) * i + (0)):PrimInt) + (7)):PrimInt", + EXPECT_STREQ("poly(sum_lt(((1) * i + (0)):Int32) + (7)):Int32", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("poly(sum_lt((((1) + (1)) * i + (0)):PrimInt) + ((7) + (7))):PrimInt", + EXPECT_STREQ("poly(sum_lt((((1) + (1)) * i + (0)):Int32) + ((7) + (7))):Int32", GetInductionInfo(add1, 0).c_str()); EXPECT_STREQ( - "poly(sum_lt(((((1) + (1)) + (1)) * i + (0)):PrimInt) + (((7) + (7)) + (7))):PrimInt", + "poly(sum_lt(((((1) + (1)) + (1)) * i + (0)):Int32) + (((7) + (7)) + (7))):Int32", GetInductionInfo(add2, 0).c_str()); EXPECT_STREQ("", GetInductionInfo(add3, 0).c_str()); } @@ -542,12 +542,12 @@ TEST_F(InductionVarAnalysisTest, FindGeometricMulInduction) { k_header->AddInput(constant1_); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(Primitive::kPrimInt, k_header, constant100_), 0); + new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0); k_header->AddInput(mul); PerformInductionVarAnalysis(); - EXPECT_STREQ("geo((1) * 100 ^ i + (0)):PrimInt", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("geo((100) * 100 ^ i + (0)):PrimInt", GetInductionInfo(mul, 0).c_str()); + EXPECT_STREQ("geo((1) * 100 ^ i + (0)):Int32", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("geo((100) * 100 ^ i + (0)):Int32", GetInductionInfo(mul, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindGeometricShlInductionAndDerived) { @@ -567,31 +567,31 @@ TEST_F(InductionVarAnalysisTest, FindGeometricShlInductionAndDerived) { k_header->AddInput(constant1_); HInstruction* add1 = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* shl1 = InsertInstruction( - new (&allocator_) HShl(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* add2 = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, shl1, constant100_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, shl1, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, shl1, constant1_), 0); + new (&allocator_) HSub(DataType::Type::kInt32, shl1, constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(Primitive::kPrimInt, sub), 0); + new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(Primitive::kPrimInt, shl1, constant2_), 0); + new (&allocator_) HMul(DataType::Type::kInt32, shl1, constant2_), 0); HInstruction* shl2 = InsertInstruction( - new (&allocator_) HShl(Primitive::kPrimInt, shl1, constant2_), 0); + new (&allocator_) HShl(DataType::Type::kInt32, shl1, constant2_), 0); k_header->AddInput(shl1); PerformInductionVarAnalysis(); - EXPECT_STREQ("geo((1) * 2 ^ i + (0)):PrimInt", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("geo((1) * 2 ^ i + (1)):PrimInt", GetInductionInfo(add1, 0).c_str()); - EXPECT_STREQ("geo((2) * 2 ^ i + (0)):PrimInt", GetInductionInfo(shl1, 0).c_str()); - EXPECT_STREQ("geo((2) * 2 ^ i + (100)):PrimInt", GetInductionInfo(add2, 0).c_str()); - EXPECT_STREQ("geo((2) * 2 ^ i + ((0) - (1))):PrimInt", GetInductionInfo(sub, 0).c_str()); - EXPECT_STREQ("geo(( - (2)) * 2 ^ i + ( - ((0) - (1)))):PrimInt", + EXPECT_STREQ("geo((1) * 2 ^ i + (0)):Int32", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("geo((1) * 2 ^ i + (1)):Int32", GetInductionInfo(add1, 0).c_str()); + EXPECT_STREQ("geo((2) * 2 ^ i + (0)):Int32", GetInductionInfo(shl1, 0).c_str()); + EXPECT_STREQ("geo((2) * 2 ^ i + (100)):Int32", GetInductionInfo(add2, 0).c_str()); + EXPECT_STREQ("geo((2) * 2 ^ i + ((0) - (1))):Int32", GetInductionInfo(sub, 0).c_str()); + EXPECT_STREQ("geo(( - (2)) * 2 ^ i + ( - ((0) - (1)))):Int32", GetInductionInfo(neg, 0).c_str()); - EXPECT_STREQ("geo(((2) * (2)) * 2 ^ i + (0)):PrimInt", GetInductionInfo(mul, 0).c_str()); - EXPECT_STREQ("geo(((2) * (4)) * 2 ^ i + (0)):PrimInt", GetInductionInfo(shl2, 0).c_str()); + EXPECT_STREQ("geo(((2) * (2)) * 2 ^ i + (0)):Int32", GetInductionInfo(mul, 0).c_str()); + EXPECT_STREQ("geo(((2) * (4)) * 2 ^ i + (0)):Int32", GetInductionInfo(shl2, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindGeometricDivInductionAndDerived) { @@ -610,24 +610,24 @@ TEST_F(InductionVarAnalysisTest, FindGeometricDivInductionAndDerived) { k_header->AddInput(constant1_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant100_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(Primitive::kPrimInt, sub), 0); + new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(Primitive::kPrimInt, k_header, constant2_), 0); + new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(Primitive::kPrimInt, k_header, constant2_), 0); + new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* div = InsertInstruction( - new (&allocator_) HDiv(Primitive::kPrimInt, k_header, constant100_, kNoDexPc), 0); + new (&allocator_) HDiv(DataType::Type::kInt32, k_header, constant100_, kNoDexPc), 0); k_header->AddInput(div); PerformInductionVarAnalysis(); // Note, only the phi in the cycle and direct additive derived are classified. - EXPECT_STREQ("geo((1) * 100 ^ -i + (0)):PrimInt", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("geo((1) * 100 ^ -i + (100)):PrimInt", GetInductionInfo(add, 0).c_str()); - EXPECT_STREQ("geo((1) * 100 ^ -i + ((0) - (1))):PrimInt", GetInductionInfo(sub, 0).c_str()); + EXPECT_STREQ("geo((1) * 100 ^ -i + (0)):Int32", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("geo((1) * 100 ^ -i + (100)):Int32", GetInductionInfo(add, 0).c_str()); + EXPECT_STREQ("geo((1) * 100 ^ -i + ((0) - (1))):Int32", GetInductionInfo(sub, 0).c_str()); EXPECT_STREQ("", GetInductionInfo(neg, 0).c_str()); EXPECT_STREQ("", GetInductionInfo(mul, 0).c_str()); EXPECT_STREQ("", GetInductionInfo(shl, 0).c_str()); @@ -645,12 +645,12 @@ TEST_F(InductionVarAnalysisTest, FindGeometricShrInduction) { k_header->AddInput(constant100_); HInstruction* shr = InsertInstruction( - new (&allocator_) HShr(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0); k_header->AddInput(shr); PerformInductionVarAnalysis(); // Note, only the phi in the cycle is classified. - EXPECT_STREQ("geo((100) * 2 ^ -i + (0)):PrimInt", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("geo((100) * 2 ^ -i + (0)):Int32", GetInductionInfo(k_header, 0).c_str()); EXPECT_STREQ("", GetInductionInfo(shr, 0).c_str()); } @@ -665,7 +665,7 @@ TEST_F(InductionVarAnalysisTest, FindNotGeometricShrInduction) { k_header->AddInput(constantm1_); HInstruction* shr = InsertInstruction( - new (&allocator_) HShr(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0); k_header->AddInput(shr); PerformInductionVarAnalysis(); @@ -689,27 +689,32 @@ TEST_F(InductionVarAnalysisTest, FindRemWrapAroundInductionAndDerived) { k_header->AddInput(constant100_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant100_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(Primitive::kPrimInt, sub), 0); + new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(Primitive::kPrimInt, k_header, constant2_), 0); + new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(Primitive::kPrimInt, k_header, constant2_), 0); + new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* rem = InsertInstruction( - new (&allocator_) HRem(Primitive::kPrimInt, k_header, constant7_, kNoDexPc), 0); + new (&allocator_) HRem(DataType::Type::kInt32, k_header, constant7_, kNoDexPc), 0); k_header->AddInput(rem); PerformInductionVarAnalysis(); // Note, only the phi in the cycle and derived are classified. - EXPECT_STREQ("wrap((100), ((100) % (7))):PrimInt", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("wrap(((100) + (100)), (((100) % (7)) + (100))):PrimInt", GetInductionInfo(add, 0).c_str()); - EXPECT_STREQ("wrap(((100) - (1)), (((100) % (7)) - (1))):PrimInt", GetInductionInfo(sub, 0).c_str()); - EXPECT_STREQ("wrap(( - ((100) - (1))), ( - (((100) % (7)) - (1)))):PrimInt", GetInductionInfo(neg, 0).c_str()); - EXPECT_STREQ("wrap(((100) * (2)), (((100) % (7)) * (2))):PrimInt", GetInductionInfo(mul, 0).c_str()); - EXPECT_STREQ("wrap(((100) * (4)), (((100) % (7)) * (4))):PrimInt", GetInductionInfo(shl, 0).c_str()); + EXPECT_STREQ("wrap((100), ((100) % (7))):Int32", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("wrap(((100) + (100)), (((100) % (7)) + (100))):Int32", + GetInductionInfo(add, 0).c_str()); + EXPECT_STREQ("wrap(((100) - (1)), (((100) % (7)) - (1))):Int32", + GetInductionInfo(sub, 0).c_str()); + EXPECT_STREQ("wrap(( - ((100) - (1))), ( - (((100) % (7)) - (1)))):Int32", + GetInductionInfo(neg, 0).c_str()); + EXPECT_STREQ("wrap(((100) * (2)), (((100) % (7)) * (2))):Int32", + GetInductionInfo(mul, 0).c_str()); + EXPECT_STREQ("wrap(((100) * (4)), (((100) % (7)) * (4))):Int32", + GetInductionInfo(shl, 0).c_str()); EXPECT_STREQ("", GetInductionInfo(rem, 0).c_str()); } @@ -726,15 +731,15 @@ TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) { HInstruction* store = InsertArrayStore(k_header, 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0]), 0); + new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0); k_header->AddInput(sub); PerformInductionVarAnalysis(); - EXPECT_STREQ("wrap((0), (( - (1)) * i + (100)):PrimInt):PrimInt", + EXPECT_STREQ("wrap((0), (( - (1)) * i + (100)):Int32):Int32", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("wrap((0), (( - (1)) * i + (100)):PrimInt):PrimInt", + EXPECT_STREQ("wrap((0), (( - (1)) * i + (100)):Int32):Int32", GetInductionInfo(store->InputAt(1), 0).c_str()); - EXPECT_STREQ("(( - (1)) * i + (100)):PrimInt", GetInductionInfo(sub, 0).c_str()); + EXPECT_STREQ("(( - (1)) * i + (100)):Int32", GetInductionInfo(sub, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) { @@ -755,11 +760,11 @@ TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) { HInstruction* store = InsertArrayStore(k_header, 0); k_header->AddInput(t); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0], 0), 0); + new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0], 0), 0); t->AddInput(sub); PerformInductionVarAnalysis(); - EXPECT_STREQ("wrap((0), wrap((100), (( - (1)) * i + (100)):PrimInt):PrimInt):PrimInt", + EXPECT_STREQ("wrap((0), wrap((100), (( - (1)) * i + (100)):Int32):Int32):Int32", GetInductionInfo(store->InputAt(1), 0).c_str()); } @@ -780,34 +785,34 @@ TEST_F(InductionVarAnalysisTest, FindWrapAroundDerivedInduction) { k_header->AddInput(constant0_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant100_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, k_header, constant100_), 0); + new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(Primitive::kPrimInt, k_header, constant100_), 0); + new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* shl1 = InsertInstruction( - new (&allocator_) HShl(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* neg1 = InsertInstruction( - new (&allocator_) HNeg(Primitive::kPrimInt, k_header), 0); + new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0); HInstruction* shl2 = InsertInstruction( - new (&allocator_) HShl(Primitive::kPrimInt, basic_[0], constant1_), 0); + new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0); HInstruction* neg2 = InsertInstruction( - new (&allocator_) HNeg(Primitive::kPrimInt, shl2), 0); + new (&allocator_) HNeg(DataType::Type::kInt32, shl2), 0); k_header->AddInput(shl2); PerformInductionVarAnalysis(); - EXPECT_STREQ("wrap((100), ((2) * i + (100)):PrimInt):PrimInt", + EXPECT_STREQ("wrap((100), ((2) * i + (100)):Int32):Int32", GetInductionInfo(add, 0).c_str()); - EXPECT_STREQ("wrap(((0) - (100)), ((2) * i + ((0) - (100))):PrimInt):PrimInt", + EXPECT_STREQ("wrap(((0) - (100)), ((2) * i + ((0) - (100))):Int32):Int32", GetInductionInfo(sub, 0).c_str()); - EXPECT_STREQ("wrap((0), (((2) * (100)) * i + (0)):PrimInt):PrimInt", + EXPECT_STREQ("wrap((0), (((2) * (100)) * i + (0)):Int32):Int32", GetInductionInfo(mul, 0).c_str()); - EXPECT_STREQ("wrap((0), (((2) * (2)) * i + (0)):PrimInt):PrimInt", + EXPECT_STREQ("wrap((0), (((2) * (2)) * i + (0)):Int32):Int32", GetInductionInfo(shl1, 0).c_str()); - EXPECT_STREQ("wrap((0), (( - (2)) * i + (0)):PrimInt):PrimInt", + EXPECT_STREQ("wrap((0), (( - (2)) * i + (0)):Int32):Int32", GetInductionInfo(neg1, 0).c_str()); - EXPECT_STREQ("((2) * i + (0)):PrimInt", GetInductionInfo(shl2, 0).c_str()); - EXPECT_STREQ("(( - (2)) * i + (0)):PrimInt", GetInductionInfo(neg2, 0).c_str()); + EXPECT_STREQ("((2) * i + (0)):Int32", GetInductionInfo(shl2, 0).c_str()); + EXPECT_STREQ("(( - (2)) * i + (0)):Int32", GetInductionInfo(neg2, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindPeriodicInduction) { @@ -834,8 +839,8 @@ TEST_F(InductionVarAnalysisTest, FindPeriodicInduction) { t->AddInput(k_header); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((0), (100)):PrimInt", GetInductionInfo(store1->InputAt(1), 0).c_str()); - EXPECT_STREQ("periodic((100), (0)):PrimInt", GetInductionInfo(store2->InputAt(1), 0).c_str()); + EXPECT_STREQ("periodic((0), (100)):Int32", GetInductionInfo(store1->InputAt(1), 0).c_str()); + EXPECT_STREQ("periodic((100), (0)):Int32", GetInductionInfo(store2->InputAt(1), 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) { @@ -851,12 +856,12 @@ TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) { HInstruction* store = InsertArrayStore(k_header, 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, constant1_, k_header), 0); + new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0); k_header->AddInput(sub); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((0), (1)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str()); - EXPECT_STREQ("periodic((1), (0)):PrimInt", GetInductionInfo(sub, 0).c_str()); + EXPECT_STREQ("periodic((0), (1)):Int32", GetInductionInfo(store->InputAt(1), 0).c_str()); + EXPECT_STREQ("periodic((1), (0)):Int32", GetInductionInfo(sub, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindXorPeriodicInduction) { @@ -872,12 +877,12 @@ TEST_F(InductionVarAnalysisTest, FindXorPeriodicInduction) { HInstruction* store = InsertArrayStore(k_header, 0); HInstruction* x = InsertInstruction( - new (&allocator_) HXor(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant1_), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((0), (1)):PrimInt", GetInductionInfo(store->InputAt(1), 0).c_str()); - EXPECT_STREQ("periodic((1), (0)):PrimInt", GetInductionInfo(x, 0).c_str()); + EXPECT_STREQ("periodic((0), (1)):Int32", GetInductionInfo(store->InputAt(1), 0).c_str()); + EXPECT_STREQ("periodic((1), (0)):Int32", GetInductionInfo(x, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindXorConstantLeftPeriodicInduction) { @@ -891,12 +896,12 @@ TEST_F(InductionVarAnalysisTest, FindXorConstantLeftPeriodicInduction) { k_header->AddInput(constant1_); HInstruction* x = InsertInstruction( - new (&allocator_) HXor(Primitive::kPrimInt, constant1_, k_header), 0); + new (&allocator_) HXor(DataType::Type::kInt32, constant1_, k_header), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((1), ((1) ^ (1))):PrimInt", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("periodic(((1) ^ (1)), (1)):PrimInt", GetInductionInfo(x, 0).c_str()); + EXPECT_STREQ("periodic((1), ((1) ^ (1))):Int32", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("periodic(((1) ^ (1)), (1)):Int32", GetInductionInfo(x, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindXor100PeriodicInduction) { @@ -910,12 +915,12 @@ TEST_F(InductionVarAnalysisTest, FindXor100PeriodicInduction) { k_header->AddInput(constant1_); HInstruction* x = InsertInstruction( - new (&allocator_) HXor(Primitive::kPrimInt, k_header, constant100_), 0); + new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant100_), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((1), ((1) ^ (100))):PrimInt", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("periodic(((1) ^ (100)), (1)):PrimInt", GetInductionInfo(x, 0).c_str()); + EXPECT_STREQ("periodic((1), ((1) ^ (100))):Int32", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("periodic(((1) ^ (100)), (1)):Int32", GetInductionInfo(x, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindBooleanEqPeriodicInduction) { @@ -932,8 +937,8 @@ TEST_F(InductionVarAnalysisTest, FindBooleanEqPeriodicInduction) { k_header->AddInput(x); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((0), (1)):PrimBoolean", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("periodic((1), (0)):PrimBoolean", GetInductionInfo(x, 0).c_str()); + EXPECT_STREQ("periodic((0), (1)):Bool", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("periodic((1), (0)):Bool", GetInductionInfo(x, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindBooleanEqConstantLeftPeriodicInduction) { @@ -950,8 +955,8 @@ TEST_F(InductionVarAnalysisTest, FindBooleanEqConstantLeftPeriodicInduction) { k_header->AddInput(x); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((0), (1)):PrimBoolean", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("periodic((1), (0)):PrimBoolean", GetInductionInfo(x, 0).c_str()); + EXPECT_STREQ("periodic((0), (1)):Bool", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("periodic((1), (0)):Bool", GetInductionInfo(x, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindBooleanNePeriodicInduction) { @@ -968,8 +973,8 @@ TEST_F(InductionVarAnalysisTest, FindBooleanNePeriodicInduction) { k_header->AddInput(x); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((0), (1)):PrimBoolean", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("periodic((1), (0)):PrimBoolean", GetInductionInfo(x, 0).c_str()); + EXPECT_STREQ("periodic((0), (1)):Bool", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("periodic((1), (0)):Bool", GetInductionInfo(x, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindBooleanNeConstantLeftPeriodicInduction) { @@ -986,8 +991,8 @@ TEST_F(InductionVarAnalysisTest, FindBooleanNeConstantLeftPeriodicInduction) { k_header->AddInput(x); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((0), (1)):PrimBoolean", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("periodic((1), (0)):PrimBoolean", GetInductionInfo(x, 0).c_str()); + EXPECT_STREQ("periodic((0), (1)):Bool", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("periodic((1), (0)):Bool", GetInductionInfo(x, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) { @@ -1007,30 +1012,30 @@ TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) { k_header->AddInput(constant0_); HInstruction* neg1 = InsertInstruction( - new (&allocator_) HNeg(Primitive::kPrimInt, k_header), 0); + new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0); HInstruction* idiom = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, constant1_, k_header), 0); + new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, idiom, constant100_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, idiom, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(Primitive::kPrimInt, idiom, constant100_), 0); + new (&allocator_) HSub(DataType::Type::kInt32, idiom, constant100_), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(Primitive::kPrimInt, idiom, constant100_), 0); + new (&allocator_) HMul(DataType::Type::kInt32, idiom, constant100_), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(Primitive::kPrimInt, idiom, constant1_), 0); + new (&allocator_) HShl(DataType::Type::kInt32, idiom, constant1_), 0); HInstruction* neg2 = InsertInstruction( - new (&allocator_) HNeg(Primitive::kPrimInt, idiom), 0); + new (&allocator_) HNeg(DataType::Type::kInt32, idiom), 0); k_header->AddInput(idiom); PerformInductionVarAnalysis(); - EXPECT_STREQ("periodic((0), (1)):PrimInt", GetInductionInfo(k_header, 0).c_str()); - EXPECT_STREQ("periodic((0), ( - (1))):PrimInt", GetInductionInfo(neg1, 0).c_str()); - EXPECT_STREQ("periodic((1), (0)):PrimInt", GetInductionInfo(idiom, 0).c_str()); - EXPECT_STREQ("periodic(((1) + (100)), (100)):PrimInt", GetInductionInfo(add, 0).c_str()); - EXPECT_STREQ("periodic(((1) - (100)), ((0) - (100))):PrimInt", GetInductionInfo(sub, 0).c_str()); - EXPECT_STREQ("periodic((100), (0)):PrimInt", GetInductionInfo(mul, 0).c_str()); - EXPECT_STREQ("periodic((2), (0)):PrimInt", GetInductionInfo(shl, 0).c_str()); - EXPECT_STREQ("periodic(( - (1)), (0)):PrimInt", GetInductionInfo(neg2, 0).c_str()); + EXPECT_STREQ("periodic((0), (1)):Int32", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("periodic((0), ( - (1))):Int32", GetInductionInfo(neg1, 0).c_str()); + EXPECT_STREQ("periodic((1), (0)):Int32", GetInductionInfo(idiom, 0).c_str()); + EXPECT_STREQ("periodic(((1) + (100)), (100)):Int32", GetInductionInfo(add, 0).c_str()); + EXPECT_STREQ("periodic(((1) - (100)), ((0) - (100))):Int32", GetInductionInfo(sub, 0).c_str()); + EXPECT_STREQ("periodic((100), (0)):Int32", GetInductionInfo(mul, 0).c_str()); + EXPECT_STREQ("periodic((2), (0)):Int32", GetInductionInfo(shl, 0).c_str()); + EXPECT_STREQ("periodic(( - (1)), (0)):Int32", GetInductionInfo(neg2, 0).c_str()); } TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) { @@ -1052,7 +1057,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) { } HInstruction* inc = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, constant1_, k_header[9]), 9); + new (&allocator_) HAdd(DataType::Type::kInt32, constant1_, k_header[9]), 9); HInstruction* store = InsertArrayStore(inc, 9); for (int d = 0; d < 10; d++) { @@ -1063,7 +1068,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) { // Avoid exact phi number, since that depends on the SSA building phase. std::regex r("\\(\\(1\\) \\* i \\+ " - "\\(\\(1\\) \\+ \\(\\d+:Phi\\)\\)\\):PrimInt"); + "\\(\\(1\\) \\+ \\(\\d+:Phi\\)\\)\\):Int32"); for (int d = 0; d < 10; d++) { if (d == 9) { @@ -1071,7 +1076,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) { } else { EXPECT_STREQ("", GetInductionInfo(store->InputAt(1), d).c_str()); } - EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(increment_[d], d).c_str()); + EXPECT_STREQ("((1) * i + (1)):Int32", GetInductionInfo(increment_[d], d).c_str()); // Trip-count. EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(d).c_str()); } @@ -1086,15 +1091,15 @@ TEST_F(InductionVarAnalysisTest, ByteInductionIntLoopControl) { // } BuildLoopNest(1); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(Primitive::kPrimByte, basic_[0], kNoDexPc), 0); + new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0); HInstruction* store1 = InsertArrayStore(conv, 0); HInstruction* store2 = InsertArrayStore(basic_[0], 0); PerformInductionVarAnalysis(); // Regular int induction (i) is transferred over conversion into byte induction (k). - EXPECT_STREQ("((1) * i + (0)):PrimByte", GetInductionInfo(store1->InputAt(1), 0).c_str()); - EXPECT_STREQ("((1) * i + (0)):PrimInt", GetInductionInfo(store2->InputAt(1), 0).c_str()); - EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(increment_[0], 0).c_str()); + EXPECT_STREQ("((1) * i + (0)):Int8", GetInductionInfo(store1->InputAt(1), 0).c_str()); + EXPECT_STREQ("((1) * i + (0)):Int32", GetInductionInfo(store2->InputAt(1), 0).c_str()); + EXPECT_STREQ("((1) * i + (1)):Int32", GetInductionInfo(increment_[0], 0).c_str()); // Narrowing detected. EXPECT_TRUE(IsNarrowingLinear(store1->InputAt(1))); @@ -1117,17 +1122,17 @@ TEST_F(InductionVarAnalysisTest, ByteInductionDerivedIntLoopControl) { // } BuildLoopNest(1); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(Primitive::kPrimByte, basic_[0], kNoDexPc), 0); + new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0); HInstruction* store1 = InsertArrayStore(conv, 0); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, conv, constant1_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0); HInstruction* store2 = InsertArrayStore(add, 0); PerformInductionVarAnalysis(); // Byte induction (k) is detected, but it does not transfer over the addition, // since this may yield out-of-type values. - EXPECT_STREQ("((1) * i + (0)):PrimByte", GetInductionInfo(store1->InputAt(1), 0).c_str()); + EXPECT_STREQ("((1) * i + (0)):Int8", GetInductionInfo(store1->InputAt(1), 0).c_str()); EXPECT_STREQ("", GetInductionInfo(store2->InputAt(1), 0).c_str()); // Narrowing detected. @@ -1147,15 +1152,15 @@ TEST_F(InductionVarAnalysisTest, ByteInduction) { k_header->AddInput(graph_->GetIntConstant(-128)); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(Primitive::kPrimByte, add, kNoDexPc), 0); + new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0); k_header->AddInput(conv); PerformInductionVarAnalysis(); // Byte induction (k) is detected, but it does not transfer over the addition, // since this may yield out-of-type values. - EXPECT_STREQ("((1) * i + (-128)):PrimByte", GetInductionInfo(k_header, 0).c_str()); + EXPECT_STREQ("((1) * i + (-128)):Int8", GetInductionInfo(k_header, 0).c_str()); EXPECT_STREQ("", GetInductionInfo(add, 0).c_str()); // Narrowing detected. @@ -1175,9 +1180,9 @@ TEST_F(InductionVarAnalysisTest, NoByteInduction1) { k_header->AddInput(graph_->GetIntConstant(-129)); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant1_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(Primitive::kPrimByte, add, kNoDexPc), 0); + new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0); k_header->AddInput(conv); PerformInductionVarAnalysis(); @@ -1197,9 +1202,9 @@ TEST_F(InductionVarAnalysisTest, NoByteInduction2) { k_header->AddInput(constant0_); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(Primitive::kPrimByte, k_header, kNoDexPc), 0); + new (&allocator_) HTypeConversion(DataType::Type::kInt8, k_header, kNoDexPc), 0); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(Primitive::kPrimInt, conv, constant1_), 0); + new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0); k_header->AddInput(add); PerformInductionVarAnalysis(); @@ -1216,13 +1221,13 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl1) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(127), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(Primitive::kPrimByte, increment_[0], kNoDexPc); + new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); // Recorded at the phi, but not transferred to increment. - EXPECT_STREQ("((1) * i + (-128)):PrimByte", GetInductionInfo(basic_[0], 0).c_str()); + EXPECT_STREQ("((1) * i + (-128)):Int8", GetInductionInfo(basic_[0], 0).c_str()); EXPECT_STREQ("", GetInductionInfo(increment_[0], 0).c_str()); // Narrowing detected. @@ -1242,13 +1247,13 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl2) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(128), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(Primitive::kPrimByte, increment_[0], kNoDexPc); + new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); // Recorded at the phi, but not transferred to increment. - EXPECT_STREQ("((1) * i + (-128)):PrimByte", GetInductionInfo(basic_[0], 0).c_str()); + EXPECT_STREQ("((1) * i + (-128)):Int8", GetInductionInfo(basic_[0], 0).c_str()); EXPECT_STREQ("", GetInductionInfo(increment_[0], 0).c_str()); // Narrowing detected. @@ -1268,13 +1273,13 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl1) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(32767), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(Primitive::kPrimShort, increment_[0], kNoDexPc); + new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); // Recorded at the phi, but not transferred to increment. - EXPECT_STREQ("((1) * i + (-32768)):PrimShort", GetInductionInfo(basic_[0], 0).c_str()); + EXPECT_STREQ("((1) * i + (-32768)):Int16", GetInductionInfo(basic_[0], 0).c_str()); EXPECT_STREQ("", GetInductionInfo(increment_[0], 0).c_str()); // Narrowing detected. @@ -1294,13 +1299,13 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl2) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(32768), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(Primitive::kPrimShort, increment_[0], kNoDexPc); + new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); // Recorded at the phi, but not transferred to increment. - EXPECT_STREQ("((1) * i + (-32768)):PrimShort", GetInductionInfo(basic_[0], 0).c_str()); + EXPECT_STREQ("((1) * i + (-32768)):Int16", GetInductionInfo(basic_[0], 0).c_str()); EXPECT_STREQ("", GetInductionInfo(increment_[0], 0).c_str()); // Narrowing detected. @@ -1319,13 +1324,13 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl1) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(65535), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(Primitive::kPrimChar, increment_[0], kNoDexPc); + new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); // Recorded at the phi, but not transferred to increment. - EXPECT_STREQ("((1) * i + (0)):PrimChar", GetInductionInfo(basic_[0], 0).c_str()); + EXPECT_STREQ("((1) * i + (0)):Uint16", GetInductionInfo(basic_[0], 0).c_str()); EXPECT_STREQ("", GetInductionInfo(increment_[0], 0).c_str()); // Narrowing detected. @@ -1344,13 +1349,13 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl2) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(65536), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(Primitive::kPrimChar, increment_[0], kNoDexPc); + new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); // Recorded at the phi, but not transferred to increment. - EXPECT_STREQ("((1) * i + (0)):PrimChar", GetInductionInfo(basic_[0], 0).c_str()); + EXPECT_STREQ("((1) * i + (0)):Uint16", GetInductionInfo(basic_[0], 0).c_str()); EXPECT_STREQ("", GetInductionInfo(increment_[0], 0).c_str()); // Narrowing detected. diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc index 191d3d128c..92b584cc3b 100644 --- a/compiler/optimizing/induction_var_range.cc +++ b/compiler/optimizing/induction_var_range.cc @@ -157,15 +157,15 @@ static bool IsConstantValue(InductionVarRange::Value v) { } /** Corrects a value for type to account for arithmetic wrap-around in lower precision. */ -static InductionVarRange::Value CorrectForType(InductionVarRange::Value v, Primitive::Type type) { +static InductionVarRange::Value CorrectForType(InductionVarRange::Value v, DataType::Type type) { switch (type) { - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimByte: { + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt8: { // Constants within range only. // TODO: maybe some room for improvement, like allowing widening conversions - int32_t min = Primitive::MinValueOfIntegralType(type); - int32_t max = Primitive::MaxValueOfIntegralType(type); + int32_t min = DataType::MinValueOfIntegralType(type); + int32_t max = DataType::MaxValueOfIntegralType(type); return (IsConstantValue(v) && min <= v.b_constant && v.b_constant <= max) ? v : InductionVarRange::Value(); @@ -216,10 +216,10 @@ bool InductionVarRange::GetInductionRange(HInstruction* context, // bounds check elimination, will have truncated higher precision induction // at their use point already). switch (info->type) { - case Primitive::kPrimInt: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimByte: + case DataType::Type::kInt32: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt8: break; default: return false; @@ -689,8 +689,8 @@ InductionVarRange::Value InductionVarRange::GetFetch(HInstruction* instruction, } else if (instruction->IsTypeConversion()) { // Since analysis is 32-bit (or narrower), chase beyond widening along the path. // For example, this discovers the length in: for (long i = 0; i < a.length; i++); - if (instruction->AsTypeConversion()->GetInputType() == Primitive::kPrimInt && - instruction->AsTypeConversion()->GetResultType() == Primitive::kPrimLong) { + if (instruction->AsTypeConversion()->GetInputType() == DataType::Type::kInt32 && + instruction->AsTypeConversion()->GetResultType() == DataType::Type::kInt64) { return GetFetch(instruction->InputAt(0), trip, in_body, is_min); } } @@ -1051,9 +1051,9 @@ bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::Induc HInstruction* c = nullptr; if (GenerateCode(info->op_b, nullptr, graph, block, graph ? &c : nullptr, false, false)) { if (graph != nullptr) { - Primitive::Type type = info->type; + DataType::Type type = info->type; int64_t sum = a * ((m * (m - 1)) / 2) + b * m; - if (type != Primitive::kPrimLong) { + if (type != DataType::Type::kInt64) { sum = static_cast<int32_t>(sum); // okay to truncate } *result = @@ -1081,16 +1081,16 @@ bool InductionVarRange::GenerateLastValueGeometric(HInductionVarAnalysis::Induct if (GenerateCode(info->op_a, nullptr, graph, block, &opa, false, false) && GenerateCode(info->op_b, nullptr, graph, block, &opb, false, false)) { if (graph != nullptr) { - Primitive::Type type = info->type; + DataType::Type type = info->type; // Compute f ^ m for known maximum index value m. bool overflow = false; int64_t fpow = IntPow(f, m, &overflow); if (info->operation == HInductionVarAnalysis::kDiv) { // For division, any overflow truncates to zero. - if (overflow || (type != Primitive::kPrimLong && !CanLongValueFitIntoInt(fpow))) { + if (overflow || (type != DataType::Type::kInt64 && !CanLongValueFitIntoInt(fpow))) { fpow = 0; } - } else if (type != Primitive::kPrimLong) { + } else if (type != DataType::Type::kInt64) { // For multiplication, okay to truncate to required precision. DCHECK(info->operation == HInductionVarAnalysis::kMul); fpow = static_cast<int32_t>(fpow); @@ -1161,7 +1161,7 @@ bool InductionVarRange::GenerateLastValuePeriodic(HInductionVarAnalysis::Inducti } // Don't rely on FP arithmetic to be precise, unless the full period // consist of pre-computed expressions only. - if (info->type == Primitive::kPrimFloat || info->type == Primitive::kPrimDouble) { + if (info->type == DataType::Type::kFloat32 || info->type == DataType::Type::kFloat64) { if (!all_invariants) { return false; } @@ -1187,7 +1187,7 @@ bool InductionVarRange::GenerateLastValuePeriodic(HInductionVarAnalysis::Inducti GenerateCode(trip->op_a, nullptr, graph, block, graph ? &t : nullptr, false, false)) { // During actual code generation (graph != nullptr), generate is_even ? x : y. if (graph != nullptr) { - Primitive::Type type = trip->type; + DataType::Type type = trip->type; HInstruction* msk = Insert(block, new (graph->GetArena()) HAnd(type, t, graph->GetConstant(type, 1))); HInstruction* is_even = @@ -1224,7 +1224,7 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, return true; } // Handle current operation. - Primitive::Type type = info->type; + DataType::Type type = info->type; HInstruction* opa = nullptr; HInstruction* opb = nullptr; switch (info->induction_class) { diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc index 9437014407..1c8426954b 100644 --- a/compiler/optimizing/induction_var_range_test.cc +++ b/compiler/optimizing/induction_var_range_test.cc @@ -71,12 +71,12 @@ class InductionVarRangeTest : public CommonCompilerTest { x_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimInt); + DataType::Type::kInt32); entry_block_->AddInstruction(x_); y_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimInt); + DataType::Type::kInt32); entry_block_->AddInstruction(y_); // Set arbitrary range analysis hint while testing private methods. SetHint(x_); @@ -101,7 +101,7 @@ class InductionVarRangeTest : public CommonCompilerTest { return_block->AddSuccessor(exit_block_); // Instructions. loop_preheader_->AddInstruction(new (&allocator_) HGoto()); - HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt); + HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); loop_header_->AddPhi(phi); phi->AddInput(graph_->GetIntConstant(lower)); // i = l if (stride > 0) { @@ -111,7 +111,8 @@ class InductionVarRangeTest : public CommonCompilerTest { } loop_header_->AddInstruction(condition_); loop_header_->AddInstruction(new (&allocator_) HIf(condition_)); - increment_ = new (&allocator_) HAdd(Primitive::kPrimInt, phi, graph_->GetIntConstant(stride)); + increment_ = + new (&allocator_) HAdd(DataType::Type::kInt32, phi, graph_->GetIntConstant(stride)); loop_body_->AddInstruction(increment_); // i += s phi->AddInput(increment_); loop_body_->AddInstruction(new (&allocator_) HGoto()); @@ -173,7 +174,7 @@ class InductionVarRangeTest : public CommonCompilerTest { return iva_->CreateTripCount(op, CreateConst(tc), CreateInvariant('<', CreateConst(0), CreateConst(tc)), - Primitive::kPrimInt); + DataType::Type::kInt32); } /** Constructs a linear a * i + b induction. */ @@ -183,7 +184,7 @@ class InductionVarRangeTest : public CommonCompilerTest { CreateConst(a), CreateConst(b), nullptr, - Primitive::kPrimInt); + DataType::Type::kInt32); } /** Constructs a polynomial sum(a * i + b) + c induction. */ @@ -193,7 +194,7 @@ class InductionVarRangeTest : public CommonCompilerTest { CreateLinear(a, b), CreateConst(c), nullptr, - Primitive::kPrimInt); + DataType::Type::kInt32); } /** Constructs a geometric a * f^i + b induction. */ @@ -204,7 +205,7 @@ class InductionVarRangeTest : public CommonCompilerTest { CreateConst(a), CreateConst(b), graph_->GetIntConstant(f), - Primitive::kPrimInt); + DataType::Type::kInt32); } /** Constructs a range [lo, hi] using a periodic induction. */ @@ -214,7 +215,7 @@ class InductionVarRangeTest : public CommonCompilerTest { CreateConst(lo), CreateConst(hi), nullptr, - Primitive::kPrimInt); + DataType::Type::kInt32); } /** Constructs a wrap-around induction consisting of a constant, followed by info. */ @@ -226,7 +227,7 @@ class InductionVarRangeTest : public CommonCompilerTest { CreateConst(initial), info, nullptr, - Primitive::kPrimInt); + DataType::Type::kInt32); } /** Constructs a wrap-around induction consisting of a constant, followed by a range. */ @@ -725,13 +726,13 @@ TEST_F(InductionVarRangeTest, ArrayLengthAndHints) { TEST_F(InductionVarRangeTest, AddOrSubAndConstant) { HInstruction* add = new (&allocator_) - HAdd(Primitive::kPrimInt, x_, graph_->GetIntConstant(-1)); + HAdd(DataType::Type::kInt32, x_, graph_->GetIntConstant(-1)); HInstruction* alt = new (&allocator_) - HAdd(Primitive::kPrimInt, graph_->GetIntConstant(-1), x_); + HAdd(DataType::Type::kInt32, graph_->GetIntConstant(-1), x_); HInstruction* sub = new (&allocator_) - HSub(Primitive::kPrimInt, x_, graph_->GetIntConstant(1)); + HSub(DataType::Type::kInt32, x_, graph_->GetIntConstant(1)); HInstruction* rev = new (&allocator_) - HSub(Primitive::kPrimInt, graph_->GetIntConstant(1), x_); + HSub(DataType::Type::kInt32, graph_->GetIntConstant(1), x_); entry_block_->AddInstruction(add); entry_block_->AddInstruction(alt); entry_block_->AddInstruction(sub); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 793e781bae..90e3d2ade7 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -21,6 +21,7 @@ #include "builder.h" #include "class_linker.h" #include "constant_folding.h" +#include "data_type-inl.h" #include "dead_code_elimination.h" #include "dex/inline_method_analyser.h" #include "dex/verification_results.h" @@ -707,7 +708,7 @@ HInstanceFieldGet* HInliner::BuildGetReceiverClass(ClassLinker* class_linker, HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet( receiver, field, - Primitive::kPrimNot, + DataType::Type::kReference, field->GetOffset(), field->IsVolatile(), field->GetDexFieldIndex(), @@ -1143,9 +1144,9 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget( HInstanceFieldGet* receiver_class = BuildGetReceiverClass( class_linker, receiver, invoke_instruction->GetDexPc()); - Primitive::Type type = Is64BitInstructionSet(graph_->GetInstructionSet()) - ? Primitive::kPrimLong - : Primitive::kPrimInt; + DataType::Type type = Is64BitInstructionSet(graph_->GetInstructionSet()) + ? DataType::Type::kInt64 + : DataType::Type::kInt32; HClassTableGet* class_table_get = new (graph_->GetArena()) HClassTableGet( receiver_class, type, @@ -1155,7 +1156,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget( invoke_instruction->GetDexPc()); HConstant* constant; - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { constant = graph_->GetLongConstant( reinterpret_cast<intptr_t>(actual_method), invoke_instruction->GetDexPc()); } else { @@ -1253,7 +1254,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, } invoke_instruction->GetBlock()->InsertInstructionBefore(new_invoke, invoke_instruction); new_invoke->CopyEnvironmentFrom(invoke_instruction->GetEnvironment()); - if (invoke_instruction->GetType() == Primitive::kPrimNot) { + if (invoke_instruction->GetType() == DataType::Type::kReference) { new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo()); } return_replacement = new_invoke; @@ -1403,7 +1404,7 @@ static HInstruction* GetInvokeInputForArgVRegIndex(HInvoke* invoke_instruction, size_t input_index = 0; for (size_t i = 0; i < arg_vreg_index; ++i, ++input_index) { DCHECK_LT(input_index, invoke_instruction->GetNumberOfArguments()); - if (Primitive::Is64BitType(invoke_instruction->InputAt(input_index)->GetType())) { + if (DataType::Is64BitType(invoke_instruction->InputAt(input_index)->GetType())) { ++i; DCHECK_NE(i, arg_vreg_index); } @@ -1423,7 +1424,7 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction, switch (inline_method.opcode) { case kInlineOpNop: - DCHECK_EQ(invoke_instruction->GetType(), Primitive::kPrimVoid); + DCHECK_EQ(invoke_instruction->GetType(), DataType::Type::kVoid); *return_replacement = nullptr; break; case kInlineOpReturnArg: @@ -1541,7 +1542,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index, HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet( obj, resolved_field, - resolved_field->GetTypeAsPrimitiveType(), + DataType::FromShorty(resolved_field->GetTypeDescriptor()[0]), resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, @@ -1550,7 +1551,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index, // Read barrier generates a runtime call in slow path and we need a valid // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537. /* dex_pc */ 0); - if (iget->GetType() == Primitive::kPrimNot) { + if (iget->GetType() == DataType::Type::kReference) { // Use the same dex_cache that we used for field lookup as the hint_dex_cache. Handle<mirror::DexCache> dex_cache = handles_->NewHandle(referrer->GetDexCache()); ReferenceTypePropagation rtp(graph_, @@ -1582,7 +1583,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index, obj, value, resolved_field, - resolved_field->GetTypeAsPrimitiveType(), + DataType::FromShorty(resolved_field->GetTypeDescriptor()[0]), resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, @@ -1667,8 +1668,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, HGraphBuilder builder(callee_graph, &dex_compilation_unit, &outer_compilation_unit_, - resolved_method->GetDexFile(), - *code_item, compiler_driver_, codegen_, inline_stats_, @@ -1711,7 +1710,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, } else if (argument->IsDoubleConstant()) { current->ReplaceWith( callee_graph->GetDoubleConstant(argument->AsDoubleConstant()->GetValue())); - } else if (argument->GetType() == Primitive::kPrimNot) { + } else if (argument->GetType() == DataType::Type::kReference) { if (!resolved_method->IsStatic() && parameter_index == 0 && receiver_type.IsValid()) { run_rtp = true; current->SetReferenceTypeInfo(receiver_type); @@ -1975,7 +1974,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod* param_idx < e; ++param_idx, ++input_idx) { HInstruction* input = invoke_instruction->InputAt(input_idx); - if (input->GetType() == Primitive::kPrimNot) { + if (input->GetType() == DataType::Type::kReference) { ObjPtr<mirror::Class> param_cls = resolved_method->LookupResolvedClassFromTypeIndex( param_list->GetTypeItem(param_idx).type_idx_); if (IsReferenceTypeRefinement(GetClassRTI(param_cls), @@ -1993,7 +1992,7 @@ bool HInliner::ReturnTypeMoreSpecific(HInvoke* invoke_instruction, HInstruction* return_replacement) { // Check the integrity of reference types and run another type propagation if needed. if (return_replacement != nullptr) { - if (return_replacement->GetType() == Primitive::kPrimNot) { + if (return_replacement->GetType() == DataType::Type::kReference) { // Test if the return type is a refinement of the declared return type. if (IsReferenceTypeRefinement(invoke_instruction->GetReferenceTypeInfo(), /* declared_can_be_null */ true, @@ -2019,7 +2018,7 @@ bool HInliner::ReturnTypeMoreSpecific(HInvoke* invoke_instruction, void HInliner::FixUpReturnReferenceType(ArtMethod* resolved_method, HInstruction* return_replacement) { if (return_replacement != nullptr) { - if (return_replacement->GetType() == Primitive::kPrimNot) { + if (return_replacement->GetType() == DataType::Type::kReference) { if (!return_replacement->GetReferenceTypeInfo().IsValid()) { // Make sure that we have a valid type for the return. We may get an invalid one when // we inline invokes with multiple branches and create a Phi for the result. diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index 6532ec123d..e832b10b79 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -19,6 +19,7 @@ #include "art_method-inl.h" #include "bytecode_utils.h" #include "class_linker.h" +#include "data_type-inl.h" #include "dex_instruction-inl.h" #include "driver/compiler_options.h" #include "imtable-inl.h" @@ -221,7 +222,7 @@ void HInstructionBuilder::InitializeInstruction(HInstruction* instruction) { } HInstruction* HInstructionBuilder::LoadNullCheckedLocal(uint32_t register_index, uint32_t dex_pc) { - HInstruction* ref = LoadLocal(register_index, Primitive::kPrimNot); + HInstruction* ref = LoadLocal(register_index, DataType::Type::kReference); if (!ref->CanBeNull()) { return ref; } @@ -388,15 +389,15 @@ void HInstructionBuilder::FindNativeDebugInfoLocations(ArenaBitVector* locations } } -HInstruction* HInstructionBuilder::LoadLocal(uint32_t reg_number, Primitive::Type type) const { +HInstruction* HInstructionBuilder::LoadLocal(uint32_t reg_number, DataType::Type type) const { HInstruction* value = (*current_locals_)[reg_number]; DCHECK(value != nullptr); // If the operation requests a specific type, we make sure its input is of that type. if (type != value->GetType()) { - if (Primitive::IsFloatingPointType(type)) { + if (DataType::IsFloatingPointType(type)) { value = ssa_builder_->GetFloatOrDoubleEquivalent(value, type); - } else if (type == Primitive::kPrimNot) { + } else if (type == DataType::Type::kReference) { value = ssa_builder_->GetReferenceTypeEquivalent(value); } DCHECK(value != nullptr); @@ -406,8 +407,8 @@ HInstruction* HInstructionBuilder::LoadLocal(uint32_t reg_number, Primitive::Typ } void HInstructionBuilder::UpdateLocal(uint32_t reg_number, HInstruction* stored_value) { - Primitive::Type stored_type = stored_value->GetType(); - DCHECK_NE(stored_type, Primitive::kPrimVoid); + DataType::Type stored_type = stored_value->GetType(); + DCHECK_NE(stored_type, DataType::Type::kVoid); // Storing into vreg `reg_number` may implicitly invalidate the surrounding // registers. Consider the following cases: @@ -420,7 +421,7 @@ void HInstructionBuilder::UpdateLocal(uint32_t reg_number, HInstruction* stored_ if (reg_number != 0) { HInstruction* local_low = (*current_locals_)[reg_number - 1]; - if (local_low != nullptr && Primitive::Is64BitType(local_low->GetType())) { + if (local_low != nullptr && DataType::Is64BitType(local_low->GetType())) { // The vreg we are storing into was previously the high vreg of a pair. // We need to invalidate its low vreg. DCHECK((*current_locals_)[reg_number] == nullptr); @@ -429,7 +430,7 @@ void HInstructionBuilder::UpdateLocal(uint32_t reg_number, HInstruction* stored_ } (*current_locals_)[reg_number] = stored_value; - if (Primitive::Is64BitType(stored_type)) { + if (DataType::Is64BitType(stored_type)) { // We are storing a pair. Invalidate the instruction in the high vreg. (*current_locals_)[reg_number + 1] = nullptr; } @@ -455,7 +456,7 @@ void HInstructionBuilder::InitializeParameters() { HParameterValue* parameter = new (arena_) HParameterValue(*dex_file_, referrer_method_id.class_idx_, parameter_index++, - Primitive::kPrimNot, + DataType::Type::kReference, /* is_this */ true); AppendInstruction(parameter); UpdateLocal(locals_index++, parameter); @@ -472,14 +473,14 @@ void HInstructionBuilder::InitializeParameters() { *dex_file_, arg_types->GetTypeItem(shorty_pos - 1).type_idx_, parameter_index++, - Primitive::GetType(shorty[shorty_pos]), + DataType::FromShorty(shorty[shorty_pos]), /* is_this */ false); ++shorty_pos; AppendInstruction(parameter); // Store the parameter value in the local that the dex code will use // to reference that parameter. UpdateLocal(locals_index++, parameter); - if (Primitive::Is64BitType(parameter->GetType())) { + if (DataType::Is64BitType(parameter->GetType())) { i++; locals_index++; parameter_index++; @@ -489,8 +490,8 @@ void HInstructionBuilder::InitializeParameters() { template<typename T> void HInstructionBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) { - HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt); - HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt); + HInstruction* first = LoadLocal(instruction.VRegA(), DataType::Type::kInt32); + HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32); T* comparison = new (arena_) T(first, second, dex_pc); AppendInstruction(comparison); AppendInstruction(new (arena_) HIf(comparison, dex_pc)); @@ -499,7 +500,7 @@ void HInstructionBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc template<typename T> void HInstructionBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) { - HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt); + HInstruction* value = LoadLocal(instruction.VRegA(), DataType::Type::kInt32); T* comparison = new (arena_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc); AppendInstruction(comparison); AppendInstruction(new (arena_) HIf(comparison, dex_pc)); @@ -508,7 +509,7 @@ void HInstructionBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc template<typename T> void HInstructionBuilder::Unop_12x(const Instruction& instruction, - Primitive::Type type, + DataType::Type type, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), type); AppendInstruction(new (arena_) T(type, first, dex_pc)); @@ -516,8 +517,8 @@ void HInstructionBuilder::Unop_12x(const Instruction& instruction, } void HInstructionBuilder::Conversion_12x(const Instruction& instruction, - Primitive::Type input_type, - Primitive::Type result_type, + DataType::Type input_type, + DataType::Type result_type, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), input_type); AppendInstruction(new (arena_) HTypeConversion(result_type, first, dex_pc)); @@ -526,7 +527,7 @@ void HInstructionBuilder::Conversion_12x(const Instruction& instruction, template<typename T> void HInstructionBuilder::Binop_23x(const Instruction& instruction, - Primitive::Type type, + DataType::Type type, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), type); HInstruction* second = LoadLocal(instruction.VRegC(), type); @@ -536,16 +537,16 @@ void HInstructionBuilder::Binop_23x(const Instruction& instruction, template<typename T> void HInstructionBuilder::Binop_23x_shift(const Instruction& instruction, - Primitive::Type type, + DataType::Type type, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), type); - HInstruction* second = LoadLocal(instruction.VRegC(), Primitive::kPrimInt); + HInstruction* second = LoadLocal(instruction.VRegC(), DataType::Type::kInt32); AppendInstruction(new (arena_) T(type, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } void HInstructionBuilder::Binop_23x_cmp(const Instruction& instruction, - Primitive::Type type, + DataType::Type type, ComparisonBias bias, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), type); @@ -556,17 +557,17 @@ void HInstructionBuilder::Binop_23x_cmp(const Instruction& instruction, template<typename T> void HInstructionBuilder::Binop_12x_shift(const Instruction& instruction, - Primitive::Type type, + DataType::Type type, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegA(), type); - HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt); + HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32); AppendInstruction(new (arena_) T(type, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } template<typename T> void HInstructionBuilder::Binop_12x(const Instruction& instruction, - Primitive::Type type, + DataType::Type type, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegA(), type); HInstruction* second = LoadLocal(instruction.VRegB(), type); @@ -576,23 +577,23 @@ void HInstructionBuilder::Binop_12x(const Instruction& instruction, template<typename T> void HInstructionBuilder::Binop_22s(const Instruction& instruction, bool reverse, uint32_t dex_pc) { - HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt); + HInstruction* first = LoadLocal(instruction.VRegB(), DataType::Type::kInt32); HInstruction* second = graph_->GetIntConstant(instruction.VRegC_22s(), dex_pc); if (reverse) { std::swap(first, second); } - AppendInstruction(new (arena_) T(Primitive::kPrimInt, first, second, dex_pc)); + AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } template<typename T> void HInstructionBuilder::Binop_22b(const Instruction& instruction, bool reverse, uint32_t dex_pc) { - HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt); + HInstruction* first = LoadLocal(instruction.VRegB(), DataType::Type::kInt32); HInstruction* second = graph_->GetIntConstant(instruction.VRegC_22b(), dex_pc); if (reverse) { std::swap(first, second); } - AppendInstruction(new (arena_) T(Primitive::kPrimInt, first, second, dex_pc)); + AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -624,7 +625,7 @@ static bool IsFallthroughInstruction(const Instruction& instruction, } void HInstructionBuilder::BuildSwitch(const Instruction& instruction, uint32_t dex_pc) { - HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt); + HInstruction* value = LoadLocal(instruction.VRegA(), DataType::Type::kInt32); DexSwitchTable table(instruction, dex_pc); if (table.GetNumEntries() == 0) { @@ -651,9 +652,9 @@ void HInstructionBuilder::BuildSwitch(const Instruction& instruction, uint32_t d } void HInstructionBuilder::BuildReturn(const Instruction& instruction, - Primitive::Type type, + DataType::Type type, uint32_t dex_pc) { - if (type == Primitive::kPrimVoid) { + if (type == DataType::Type::kVoid) { // Only <init> (which is a return-void) could possibly have a constructor fence. // This may insert additional redundant constructor fences from the super constructors. // TODO: remove redundant constructor fences (b/36656456). @@ -802,7 +803,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, uint32_t register_index) { InvokeType invoke_type = GetInvokeTypeFromOpCode(instruction.Opcode()); const char* descriptor = dex_file_->GetMethodShorty(method_idx); - Primitive::Type return_type = Primitive::GetType(descriptor[0]); + DataType::Type return_type = DataType::FromShorty(descriptor[0]); // Remove the return type from the 'proto'. size_t number_of_arguments = strlen(descriptor) - 1; @@ -844,7 +845,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, HInvoke* invoke = new (arena_) HInvokeStaticOrDirect( arena_, number_of_arguments - 1, - Primitive::kPrimNot /*return_type */, + DataType::Type::kReference /*return_type */, dex_pc, method_idx, nullptr, @@ -938,7 +939,7 @@ bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction uint32_t register_index) { const char* descriptor = dex_file_->GetShorty(proto_idx); DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments); - Primitive::Type return_type = Primitive::GetType(descriptor[0]); + DataType::Type return_type = DataType::FromShorty(descriptor[0]); size_t number_of_arguments = strlen(descriptor); HInvoke* invoke = new (arena_) HInvokePolymorphic(arena_, number_of_arguments, @@ -1113,8 +1114,8 @@ bool HInstructionBuilder::SetupInvokeArguments(HInvoke* invoke, // it hasn't been properly checked. (i < number_of_vreg_arguments) && (*argument_index < invoke->GetNumberOfArguments()); i++, (*argument_index)++) { - Primitive::Type type = Primitive::GetType(descriptor[descriptor_index++]); - bool is_wide = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble); + DataType::Type type = DataType::FromShorty(descriptor[descriptor_index++]); + bool is_wide = (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64); if (!is_range && is_wide && ((i + 1 == number_of_vreg_arguments) || (args[i] + 1 != args[i + 1]))) { @@ -1169,7 +1170,7 @@ bool HInstructionBuilder::HandleInvoke(HInvoke* invoke, if (invoke->GetInvokeType() != InvokeType::kStatic) { // Instance call. uint32_t obj_reg = is_range ? register_index : args[0]; HInstruction* arg = is_unresolved - ? LoadLocal(obj_reg, Primitive::kPrimNot) + ? LoadLocal(obj_reg, DataType::Type::kReference) : LoadNullCheckedLocal(obj_reg, invoke->GetDexPc()); invoke->SetArgumentAt(0, arg); start_index = 1; @@ -1229,7 +1230,7 @@ bool HInstructionBuilder::HandleStringInit(HInvoke* invoke, // This is a StringFactory call, not an actual String constructor. Its result // replaces the empty String pre-allocated by NewInstance. uint32_t orig_this_reg = is_range ? register_index : args[0]; - HInstruction* arg_this = LoadLocal(orig_this_reg, Primitive::kPrimNot); + HInstruction* arg_this = LoadLocal(orig_this_reg, DataType::Type::kReference); // Replacing the NewInstance might render it redundant. Keep a list of these // to be visited once it is clear whether it is has remaining uses. @@ -1251,10 +1252,10 @@ bool HInstructionBuilder::HandleStringInit(HInvoke* invoke, return true; } -static Primitive::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) { +static DataType::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) { const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index); const char* type = dex_file.GetFieldTypeDescriptor(field_id); - return Primitive::GetType(type[0]); + return DataType::FromShorty(type[0]); } bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instruction, @@ -1280,12 +1281,10 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio // is unresolved. In that case, we rely on the runtime to perform various // checks first, followed by a null check. HInstruction* object = (resolved_field == nullptr) - ? LoadLocal(obj_reg, Primitive::kPrimNot) + ? LoadLocal(obj_reg, DataType::Type::kReference) : LoadNullCheckedLocal(obj_reg, dex_pc); - Primitive::Type field_type = (resolved_field == nullptr) - ? GetFieldAccessType(*dex_file_, field_index) - : resolved_field->GetTypeAsPrimitiveType(); + DataType::Type field_type = GetFieldAccessType(*dex_file_, field_index); if (is_put) { HInstruction* value = LoadLocal(source_or_dest_reg, field_type); HInstruction* field_set = nullptr; @@ -1377,7 +1376,7 @@ bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) c void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put, - Primitive::Type field_type) { + DataType::Type field_type) { uint32_t source_or_dest_reg = instruction.VRegA_21c(); uint16_t field_index = instruction.VRegB_21c(); @@ -1452,12 +1451,12 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction, if (resolved_field == nullptr) { MaybeRecordStat(compilation_stats_, MethodCompilationStat::kUnresolvedField); - Primitive::Type field_type = GetFieldAccessType(*dex_file_, field_index); + DataType::Type field_type = GetFieldAccessType(*dex_file_, field_index); BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type); return true; } - Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType(); + DataType::Type field_type = GetFieldAccessType(*dex_file_, field_index); Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass()); HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(), @@ -1515,15 +1514,15 @@ void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg, uint16_t first_vreg, int64_t second_vreg_or_constant, uint32_t dex_pc, - Primitive::Type type, + DataType::Type type, bool second_is_constant, bool isDiv) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); HInstruction* first = LoadLocal(first_vreg, type); HInstruction* second = nullptr; if (second_is_constant) { - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { second = graph_->GetIntConstant(second_vreg_or_constant, dex_pc); } else { second = graph_->GetLongConstant(second_vreg_or_constant, dex_pc); @@ -1533,8 +1532,8 @@ void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg, } if (!second_is_constant - || (type == Primitive::kPrimInt && second->AsIntConstant()->GetValue() == 0) - || (type == Primitive::kPrimLong && second->AsLongConstant()->GetValue() == 0)) { + || (type == DataType::Type::kInt32 && second->AsIntConstant()->GetValue() == 0) + || (type == DataType::Type::kInt64 && second->AsLongConstant()->GetValue() == 0)) { second = new (arena_) HDivZeroCheck(second, dex_pc); AppendInstruction(second); } @@ -1550,7 +1549,7 @@ void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg, void HInstructionBuilder::BuildArrayAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put, - Primitive::Type anticipated_type) { + DataType::Type anticipated_type) { uint8_t source_or_dest_reg = instruction.VRegA_23x(); uint8_t array_reg = instruction.VRegB_23x(); uint8_t index_reg = instruction.VRegC_23x(); @@ -1558,7 +1557,7 @@ void HInstructionBuilder::BuildArrayAccess(const Instruction& instruction, HInstruction* object = LoadNullCheckedLocal(array_reg, dex_pc); HInstruction* length = new (arena_) HArrayLength(object, dex_pc); AppendInstruction(length); - HInstruction* index = LoadLocal(index_reg, Primitive::kPrimInt); + HInstruction* index = LoadLocal(index_reg, DataType::Type::kInt32); index = new (arena_) HBoundsCheck(index, length, dex_pc); AppendInstruction(index); if (is_put) { @@ -1594,7 +1593,7 @@ HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc, || primitive == 'L' || primitive == '[') << descriptor; bool is_reference_array = (primitive == 'L') || (primitive == '['); - Primitive::Type type = is_reference_array ? Primitive::kPrimNot : Primitive::kPrimInt; + DataType::Type type = is_reference_array ? DataType::Type::kReference : DataType::Type::kInt32; for (size_t i = 0; i < number_of_vreg_arguments; ++i) { HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type); @@ -1612,7 +1611,7 @@ template <typename T> void HInstructionBuilder::BuildFillArrayData(HInstruction* object, const T* data, uint32_t element_count, - Primitive::Type anticipated_type, + DataType::Type anticipated_type, uint32_t dex_pc) { for (uint32_t i = 0; i < element_count; ++i) { HInstruction* index = graph_->GetIntConstant(i, dex_pc); @@ -1650,21 +1649,21 @@ void HInstructionBuilder::BuildFillArrayData(const Instruction& instruction, uin BuildFillArrayData(array, reinterpret_cast<const int8_t*>(data), element_count, - Primitive::kPrimByte, + DataType::Type::kInt8, dex_pc); break; case 2: BuildFillArrayData(array, reinterpret_cast<const int16_t*>(data), element_count, - Primitive::kPrimShort, + DataType::Type::kInt16, dex_pc); break; case 4: BuildFillArrayData(array, reinterpret_cast<const int32_t*>(data), element_count, - Primitive::kPrimInt, + DataType::Type::kInt32, dex_pc); break; case 8: @@ -1686,7 +1685,7 @@ void HInstructionBuilder::BuildFillWideArrayData(HInstruction* object, for (uint32_t i = 0; i < element_count; ++i) { HInstruction* index = graph_->GetIntConstant(i, dex_pc); HInstruction* value = graph_->GetLongConstant(data[i], dex_pc); - HArraySet* aset = new (arena_) HArraySet(object, index, value, Primitive::kPrimLong, dex_pc); + HArraySet* aset = new (arena_) HArraySet(object, index, value, DataType::Type::kInt64, dex_pc); ssa_builder_->MaybeAddAmbiguousArraySet(aset); AppendInstruction(aset); } @@ -1783,7 +1782,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction, uint8_t reference, dex::TypeIndex type_index, uint32_t dex_pc) { - HInstruction* object = LoadLocal(reference, Primitive::kPrimNot); + HInstruction* object = LoadLocal(reference, DataType::Type::kReference); HLoadClass* cls = BuildLoadClass(type_index, dex_pc); ScopedObjectAccess soa(Thread::Current()); @@ -1889,7 +1888,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::MOVE: case Instruction::MOVE_FROM16: case Instruction::MOVE_16: { - HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimInt); + HInstruction* value = LoadLocal(instruction.VRegB(), DataType::Type::kInt32); UpdateLocal(instruction.VRegA(), value); break; } @@ -1898,7 +1897,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::MOVE_WIDE: case Instruction::MOVE_WIDE_FROM16: case Instruction::MOVE_WIDE_16: { - HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimLong); + HInstruction* value = LoadLocal(instruction.VRegB(), DataType::Type::kInt64); UpdateLocal(instruction.VRegA(), value); break; } @@ -1916,9 +1915,10 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, if (value->IsIntConstant()) { DCHECK_EQ(value->AsIntConstant()->GetValue(), 0); } else if (value->IsPhi()) { - DCHECK(value->GetType() == Primitive::kPrimInt || value->GetType() == Primitive::kPrimNot); + DCHECK(value->GetType() == DataType::Type::kInt32 || + value->GetType() == DataType::Type::kReference); } else { - value = LoadLocal(reg_number, Primitive::kPrimNot); + value = LoadLocal(reg_number, DataType::Type::kReference); } UpdateLocal(instruction.VRegA(), value); break; @@ -1926,7 +1926,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::RETURN_VOID_NO_BARRIER: case Instruction::RETURN_VOID: { - BuildReturn(instruction, Primitive::kPrimVoid, dex_pc); + BuildReturn(instruction, DataType::Type::kVoid, dex_pc); break; } @@ -2045,435 +2045,435 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, } case Instruction::NEG_INT: { - Unop_12x<HNeg>(instruction, Primitive::kPrimInt, dex_pc); + Unop_12x<HNeg>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::NEG_LONG: { - Unop_12x<HNeg>(instruction, Primitive::kPrimLong, dex_pc); + Unop_12x<HNeg>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::NEG_FLOAT: { - Unop_12x<HNeg>(instruction, Primitive::kPrimFloat, dex_pc); + Unop_12x<HNeg>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::NEG_DOUBLE: { - Unop_12x<HNeg>(instruction, Primitive::kPrimDouble, dex_pc); + Unop_12x<HNeg>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::NOT_INT: { - Unop_12x<HNot>(instruction, Primitive::kPrimInt, dex_pc); + Unop_12x<HNot>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::NOT_LONG: { - Unop_12x<HNot>(instruction, Primitive::kPrimLong, dex_pc); + Unop_12x<HNot>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::INT_TO_LONG: { - Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimLong, dex_pc); + Conversion_12x(instruction, DataType::Type::kInt32, DataType::Type::kInt64, dex_pc); break; } case Instruction::INT_TO_FLOAT: { - Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimFloat, dex_pc); + Conversion_12x(instruction, DataType::Type::kInt32, DataType::Type::kFloat32, dex_pc); break; } case Instruction::INT_TO_DOUBLE: { - Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimDouble, dex_pc); + Conversion_12x(instruction, DataType::Type::kInt32, DataType::Type::kFloat64, dex_pc); break; } case Instruction::LONG_TO_INT: { - Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimInt, dex_pc); + Conversion_12x(instruction, DataType::Type::kInt64, DataType::Type::kInt32, dex_pc); break; } case Instruction::LONG_TO_FLOAT: { - Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimFloat, dex_pc); + Conversion_12x(instruction, DataType::Type::kInt64, DataType::Type::kFloat32, dex_pc); break; } case Instruction::LONG_TO_DOUBLE: { - Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimDouble, dex_pc); + Conversion_12x(instruction, DataType::Type::kInt64, DataType::Type::kFloat64, dex_pc); break; } case Instruction::FLOAT_TO_INT: { - Conversion_12x(instruction, Primitive::kPrimFloat, Primitive::kPrimInt, dex_pc); + Conversion_12x(instruction, DataType::Type::kFloat32, DataType::Type::kInt32, dex_pc); break; } case Instruction::FLOAT_TO_LONG: { - Conversion_12x(instruction, Primitive::kPrimFloat, Primitive::kPrimLong, dex_pc); + Conversion_12x(instruction, DataType::Type::kFloat32, DataType::Type::kInt64, dex_pc); break; } case Instruction::FLOAT_TO_DOUBLE: { - Conversion_12x(instruction, Primitive::kPrimFloat, Primitive::kPrimDouble, dex_pc); + Conversion_12x(instruction, DataType::Type::kFloat32, DataType::Type::kFloat64, dex_pc); break; } case Instruction::DOUBLE_TO_INT: { - Conversion_12x(instruction, Primitive::kPrimDouble, Primitive::kPrimInt, dex_pc); + Conversion_12x(instruction, DataType::Type::kFloat64, DataType::Type::kInt32, dex_pc); break; } case Instruction::DOUBLE_TO_LONG: { - Conversion_12x(instruction, Primitive::kPrimDouble, Primitive::kPrimLong, dex_pc); + Conversion_12x(instruction, DataType::Type::kFloat64, DataType::Type::kInt64, dex_pc); break; } case Instruction::DOUBLE_TO_FLOAT: { - Conversion_12x(instruction, Primitive::kPrimDouble, Primitive::kPrimFloat, dex_pc); + Conversion_12x(instruction, DataType::Type::kFloat64, DataType::Type::kFloat32, dex_pc); break; } case Instruction::INT_TO_BYTE: { - Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimByte, dex_pc); + Conversion_12x(instruction, DataType::Type::kInt32, DataType::Type::kInt8, dex_pc); break; } case Instruction::INT_TO_SHORT: { - Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimShort, dex_pc); + Conversion_12x(instruction, DataType::Type::kInt32, DataType::Type::kInt16, dex_pc); break; } case Instruction::INT_TO_CHAR: { - Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimChar, dex_pc); + Conversion_12x(instruction, DataType::Type::kInt32, DataType::Type::kUint16, dex_pc); break; } case Instruction::ADD_INT: { - Binop_23x<HAdd>(instruction, Primitive::kPrimInt, dex_pc); + Binop_23x<HAdd>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::ADD_LONG: { - Binop_23x<HAdd>(instruction, Primitive::kPrimLong, dex_pc); + Binop_23x<HAdd>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::ADD_DOUBLE: { - Binop_23x<HAdd>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_23x<HAdd>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::ADD_FLOAT: { - Binop_23x<HAdd>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_23x<HAdd>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::SUB_INT: { - Binop_23x<HSub>(instruction, Primitive::kPrimInt, dex_pc); + Binop_23x<HSub>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::SUB_LONG: { - Binop_23x<HSub>(instruction, Primitive::kPrimLong, dex_pc); + Binop_23x<HSub>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::SUB_FLOAT: { - Binop_23x<HSub>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_23x<HSub>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::SUB_DOUBLE: { - Binop_23x<HSub>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_23x<HSub>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::ADD_INT_2ADDR: { - Binop_12x<HAdd>(instruction, Primitive::kPrimInt, dex_pc); + Binop_12x<HAdd>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::MUL_INT: { - Binop_23x<HMul>(instruction, Primitive::kPrimInt, dex_pc); + Binop_23x<HMul>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::MUL_LONG: { - Binop_23x<HMul>(instruction, Primitive::kPrimLong, dex_pc); + Binop_23x<HMul>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::MUL_FLOAT: { - Binop_23x<HMul>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_23x<HMul>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::MUL_DOUBLE: { - Binop_23x<HMul>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_23x<HMul>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::DIV_INT: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(), - dex_pc, Primitive::kPrimInt, false, true); + dex_pc, DataType::Type::kInt32, false, true); break; } case Instruction::DIV_LONG: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(), - dex_pc, Primitive::kPrimLong, false, true); + dex_pc, DataType::Type::kInt64, false, true); break; } case Instruction::DIV_FLOAT: { - Binop_23x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_23x<HDiv>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::DIV_DOUBLE: { - Binop_23x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_23x<HDiv>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::REM_INT: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(), - dex_pc, Primitive::kPrimInt, false, false); + dex_pc, DataType::Type::kInt32, false, false); break; } case Instruction::REM_LONG: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(), - dex_pc, Primitive::kPrimLong, false, false); + dex_pc, DataType::Type::kInt64, false, false); break; } case Instruction::REM_FLOAT: { - Binop_23x<HRem>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_23x<HRem>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::REM_DOUBLE: { - Binop_23x<HRem>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_23x<HRem>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::AND_INT: { - Binop_23x<HAnd>(instruction, Primitive::kPrimInt, dex_pc); + Binop_23x<HAnd>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::AND_LONG: { - Binop_23x<HAnd>(instruction, Primitive::kPrimLong, dex_pc); + Binop_23x<HAnd>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::SHL_INT: { - Binop_23x_shift<HShl>(instruction, Primitive::kPrimInt, dex_pc); + Binop_23x_shift<HShl>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::SHL_LONG: { - Binop_23x_shift<HShl>(instruction, Primitive::kPrimLong, dex_pc); + Binop_23x_shift<HShl>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::SHR_INT: { - Binop_23x_shift<HShr>(instruction, Primitive::kPrimInt, dex_pc); + Binop_23x_shift<HShr>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::SHR_LONG: { - Binop_23x_shift<HShr>(instruction, Primitive::kPrimLong, dex_pc); + Binop_23x_shift<HShr>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::USHR_INT: { - Binop_23x_shift<HUShr>(instruction, Primitive::kPrimInt, dex_pc); + Binop_23x_shift<HUShr>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::USHR_LONG: { - Binop_23x_shift<HUShr>(instruction, Primitive::kPrimLong, dex_pc); + Binop_23x_shift<HUShr>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::OR_INT: { - Binop_23x<HOr>(instruction, Primitive::kPrimInt, dex_pc); + Binop_23x<HOr>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::OR_LONG: { - Binop_23x<HOr>(instruction, Primitive::kPrimLong, dex_pc); + Binop_23x<HOr>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::XOR_INT: { - Binop_23x<HXor>(instruction, Primitive::kPrimInt, dex_pc); + Binop_23x<HXor>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::XOR_LONG: { - Binop_23x<HXor>(instruction, Primitive::kPrimLong, dex_pc); + Binop_23x<HXor>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::ADD_LONG_2ADDR: { - Binop_12x<HAdd>(instruction, Primitive::kPrimLong, dex_pc); + Binop_12x<HAdd>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::ADD_DOUBLE_2ADDR: { - Binop_12x<HAdd>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_12x<HAdd>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::ADD_FLOAT_2ADDR: { - Binop_12x<HAdd>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_12x<HAdd>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::SUB_INT_2ADDR: { - Binop_12x<HSub>(instruction, Primitive::kPrimInt, dex_pc); + Binop_12x<HSub>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::SUB_LONG_2ADDR: { - Binop_12x<HSub>(instruction, Primitive::kPrimLong, dex_pc); + Binop_12x<HSub>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::SUB_FLOAT_2ADDR: { - Binop_12x<HSub>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_12x<HSub>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::SUB_DOUBLE_2ADDR: { - Binop_12x<HSub>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_12x<HSub>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::MUL_INT_2ADDR: { - Binop_12x<HMul>(instruction, Primitive::kPrimInt, dex_pc); + Binop_12x<HMul>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::MUL_LONG_2ADDR: { - Binop_12x<HMul>(instruction, Primitive::kPrimLong, dex_pc); + Binop_12x<HMul>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::MUL_FLOAT_2ADDR: { - Binop_12x<HMul>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_12x<HMul>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::MUL_DOUBLE_2ADDR: { - Binop_12x<HMul>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_12x<HMul>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::DIV_INT_2ADDR: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(), - dex_pc, Primitive::kPrimInt, false, true); + dex_pc, DataType::Type::kInt32, false, true); break; } case Instruction::DIV_LONG_2ADDR: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(), - dex_pc, Primitive::kPrimLong, false, true); + dex_pc, DataType::Type::kInt64, false, true); break; } case Instruction::REM_INT_2ADDR: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(), - dex_pc, Primitive::kPrimInt, false, false); + dex_pc, DataType::Type::kInt32, false, false); break; } case Instruction::REM_LONG_2ADDR: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(), - dex_pc, Primitive::kPrimLong, false, false); + dex_pc, DataType::Type::kInt64, false, false); break; } case Instruction::REM_FLOAT_2ADDR: { - Binop_12x<HRem>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_12x<HRem>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::REM_DOUBLE_2ADDR: { - Binop_12x<HRem>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_12x<HRem>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::SHL_INT_2ADDR: { - Binop_12x_shift<HShl>(instruction, Primitive::kPrimInt, dex_pc); + Binop_12x_shift<HShl>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::SHL_LONG_2ADDR: { - Binop_12x_shift<HShl>(instruction, Primitive::kPrimLong, dex_pc); + Binop_12x_shift<HShl>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::SHR_INT_2ADDR: { - Binop_12x_shift<HShr>(instruction, Primitive::kPrimInt, dex_pc); + Binop_12x_shift<HShr>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::SHR_LONG_2ADDR: { - Binop_12x_shift<HShr>(instruction, Primitive::kPrimLong, dex_pc); + Binop_12x_shift<HShr>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::USHR_INT_2ADDR: { - Binop_12x_shift<HUShr>(instruction, Primitive::kPrimInt, dex_pc); + Binop_12x_shift<HUShr>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::USHR_LONG_2ADDR: { - Binop_12x_shift<HUShr>(instruction, Primitive::kPrimLong, dex_pc); + Binop_12x_shift<HUShr>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::DIV_FLOAT_2ADDR: { - Binop_12x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc); + Binop_12x<HDiv>(instruction, DataType::Type::kFloat32, dex_pc); break; } case Instruction::DIV_DOUBLE_2ADDR: { - Binop_12x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc); + Binop_12x<HDiv>(instruction, DataType::Type::kFloat64, dex_pc); break; } case Instruction::AND_INT_2ADDR: { - Binop_12x<HAnd>(instruction, Primitive::kPrimInt, dex_pc); + Binop_12x<HAnd>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::AND_LONG_2ADDR: { - Binop_12x<HAnd>(instruction, Primitive::kPrimLong, dex_pc); + Binop_12x<HAnd>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::OR_INT_2ADDR: { - Binop_12x<HOr>(instruction, Primitive::kPrimInt, dex_pc); + Binop_12x<HOr>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::OR_LONG_2ADDR: { - Binop_12x<HOr>(instruction, Primitive::kPrimLong, dex_pc); + Binop_12x<HOr>(instruction, DataType::Type::kInt64, dex_pc); break; } case Instruction::XOR_INT_2ADDR: { - Binop_12x<HXor>(instruction, Primitive::kPrimInt, dex_pc); + Binop_12x<HXor>(instruction, DataType::Type::kInt32, dex_pc); break; } case Instruction::XOR_LONG_2ADDR: { - Binop_12x<HXor>(instruction, Primitive::kPrimLong, dex_pc); + Binop_12x<HXor>(instruction, DataType::Type::kInt64, dex_pc); break; } @@ -2540,14 +2540,14 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::DIV_INT_LIT16: case Instruction::DIV_INT_LIT8: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(), - dex_pc, Primitive::kPrimInt, true, true); + dex_pc, DataType::Type::kInt32, true, true); break; } case Instruction::REM_INT_LIT16: case Instruction::REM_INT_LIT8: { BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(), - dex_pc, Primitive::kPrimInt, true, false); + dex_pc, DataType::Type::kInt32, true, false); break; } @@ -2578,7 +2578,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::NEW_ARRAY: { dex::TypeIndex type_index(instruction.VRegC_22c()); - HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt); + HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32); HLoadClass* cls = BuildLoadClass(type_index, dex_pc); HNewArray* new_array = new (arena_) HNewArray(cls, length, dex_pc); @@ -2632,27 +2632,27 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, } case Instruction::CMP_LONG: { - Binop_23x_cmp(instruction, Primitive::kPrimLong, ComparisonBias::kNoBias, dex_pc); + Binop_23x_cmp(instruction, DataType::Type::kInt64, ComparisonBias::kNoBias, dex_pc); break; } case Instruction::CMPG_FLOAT: { - Binop_23x_cmp(instruction, Primitive::kPrimFloat, ComparisonBias::kGtBias, dex_pc); + Binop_23x_cmp(instruction, DataType::Type::kFloat32, ComparisonBias::kGtBias, dex_pc); break; } case Instruction::CMPG_DOUBLE: { - Binop_23x_cmp(instruction, Primitive::kPrimDouble, ComparisonBias::kGtBias, dex_pc); + Binop_23x_cmp(instruction, DataType::Type::kFloat64, ComparisonBias::kGtBias, dex_pc); break; } case Instruction::CMPL_FLOAT: { - Binop_23x_cmp(instruction, Primitive::kPrimFloat, ComparisonBias::kLtBias, dex_pc); + Binop_23x_cmp(instruction, DataType::Type::kFloat32, ComparisonBias::kLtBias, dex_pc); break; } case Instruction::CMPL_DOUBLE: { - Binop_23x_cmp(instruction, Primitive::kPrimDouble, ComparisonBias::kLtBias, dex_pc); + Binop_23x_cmp(instruction, DataType::Type::kFloat64, ComparisonBias::kLtBias, dex_pc); break; } @@ -2735,13 +2735,13 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, break; \ } - ARRAY_XX(, Primitive::kPrimInt); - ARRAY_XX(_WIDE, Primitive::kPrimLong); - ARRAY_XX(_OBJECT, Primitive::kPrimNot); - ARRAY_XX(_BOOLEAN, Primitive::kPrimBoolean); - ARRAY_XX(_BYTE, Primitive::kPrimByte); - ARRAY_XX(_CHAR, Primitive::kPrimChar); - ARRAY_XX(_SHORT, Primitive::kPrimShort); + ARRAY_XX(, DataType::Type::kInt32); + ARRAY_XX(_WIDE, DataType::Type::kInt64); + ARRAY_XX(_OBJECT, DataType::Type::kReference); + ARRAY_XX(_BOOLEAN, DataType::Type::kBool); + ARRAY_XX(_BYTE, DataType::Type::kInt8); + ARRAY_XX(_CHAR, DataType::Type::kUint16); + ARRAY_XX(_SHORT, DataType::Type::kInt16); case Instruction::ARRAY_LENGTH: { HInstruction* object = LoadNullCheckedLocal(instruction.VRegB_12x(), dex_pc); @@ -2781,7 +2781,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, } case Instruction::THROW: { - HInstruction* exception = LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot); + HInstruction* exception = LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference); AppendInstruction(new (arena_) HThrow(exception, dex_pc)); // We finished building this block. Set the current block to null to avoid // adding dead instructions to it. @@ -2806,7 +2806,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::MONITOR_ENTER: { AppendInstruction(new (arena_) HMonitorOperation( - LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot), + LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference), HMonitorOperation::OperationKind::kEnter, dex_pc)); break; @@ -2814,7 +2814,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::MONITOR_EXIT: { AppendInstruction(new (arena_) HMonitorOperation( - LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot), + LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference), HMonitorOperation::OperationKind::kExit, dex_pc)); break; diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h index b7fa39404b..a684bf40e6 100644 --- a/compiler/optimizing/instruction_builder.h +++ b/compiler/optimizing/instruction_builder.h @@ -42,7 +42,7 @@ class HInstructionBuilder : public ValueObject { SsaBuilder* ssa_builder, const DexFile* dex_file, const DexFile::CodeItem& code_item, - Primitive::Type return_type, + DataType::Type return_type, DexCompilationUnit* dex_compilation_unit, const DexCompilationUnit* const outer_compilation_unit, CompilerDriver* driver, @@ -96,7 +96,7 @@ class HInstructionBuilder : public ValueObject { ArenaVector<HInstruction*>* GetLocalsForWithAllocation( HBasicBlock* block, ArenaVector<HInstruction*>* locals, const size_t vregs); HInstruction* ValueOfLocalAt(HBasicBlock* block, size_t local); - HInstruction* LoadLocal(uint32_t register_index, Primitive::Type type) const; + HInstruction* LoadLocal(uint32_t register_index, DataType::Type type) const; HInstruction* LoadNullCheckedLocal(uint32_t register_index, uint32_t dex_pc); void UpdateLocal(uint32_t register_index, HInstruction* instruction); @@ -112,24 +112,24 @@ class HInstructionBuilder : public ValueObject { REQUIRES_SHARED(Locks::mutator_lock_); template<typename T> - void Unop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc); + void Unop_12x(const Instruction& instruction, DataType::Type type, uint32_t dex_pc); template<typename T> - void Binop_23x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc); + void Binop_23x(const Instruction& instruction, DataType::Type type, uint32_t dex_pc); template<typename T> - void Binop_23x_shift(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc); + void Binop_23x_shift(const Instruction& instruction, DataType::Type type, uint32_t dex_pc); void Binop_23x_cmp(const Instruction& instruction, - Primitive::Type type, + DataType::Type type, ComparisonBias bias, uint32_t dex_pc); template<typename T> - void Binop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc); + void Binop_12x(const Instruction& instruction, DataType::Type type, uint32_t dex_pc); template<typename T> - void Binop_12x_shift(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc); + void Binop_12x_shift(const Instruction& instruction, DataType::Type type, uint32_t dex_pc); template<typename T> void Binop_22b(const Instruction& instruction, bool reverse, uint32_t dex_pc); @@ -141,19 +141,19 @@ class HInstructionBuilder : public ValueObject { template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_pc); void Conversion_12x(const Instruction& instruction, - Primitive::Type input_type, - Primitive::Type result_type, + DataType::Type input_type, + DataType::Type result_type, uint32_t dex_pc); void BuildCheckedDivRem(uint16_t out_reg, uint16_t first_reg, int64_t second_reg_or_constant, uint32_t dex_pc, - Primitive::Type type, + DataType::Type type, bool second_is_lit, bool is_div); - void BuildReturn(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc); + void BuildReturn(const Instruction& instruction, DataType::Type type, uint32_t dex_pc); // Builds an instance field access node and returns whether the instruction is supported. bool BuildInstanceFieldAccess(const Instruction& instruction, @@ -164,14 +164,14 @@ class HInstructionBuilder : public ValueObject { void BuildUnresolvedStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put, - Primitive::Type field_type); + DataType::Type field_type); // Builds a static field access node and returns whether the instruction is supported. bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put); void BuildArrayAccess(const Instruction& instruction, uint32_t dex_pc, bool is_get, - Primitive::Type anticipated_type); + DataType::Type anticipated_type); // Builds an invocation node and returns whether the instruction is supported. bool BuildInvoke(const Instruction& instruction, @@ -210,7 +210,7 @@ class HInstructionBuilder : public ValueObject { void BuildFillArrayData(HInstruction* object, const T* data, uint32_t element_count, - Primitive::Type anticipated_type, + DataType::Type anticipated_type, uint32_t dex_pc); // Fills the given object with data as specified in the fill-array-data @@ -321,7 +321,7 @@ class HInstructionBuilder : public ValueObject { const DexFile::CodeItem& code_item_; // The return type of the method being compiled. - const Primitive::Type return_type_; + const DataType::Type return_type_; HBasicBlockBuilder* block_builder_; SsaBuilder* ssa_builder_; diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 337177fa80..1a2494a992 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -18,6 +18,7 @@ #include "art_method-inl.h" #include "class_linker-inl.h" +#include "data_type-inl.h" #include "escape.h" #include "intrinsics.h" #include "mirror/class-inl.h" @@ -103,10 +104,10 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const; - void SimplifyRotate(HInvoke* invoke, bool is_left, Primitive::Type type); + void SimplifyRotate(HInvoke* invoke, bool is_left, DataType::Type type); void SimplifySystemArrayCopy(HInvoke* invoke); void SimplifyStringEquals(HInvoke* invoke); - void SimplifyCompare(HInvoke* invoke, bool is_signum, Primitive::Type type); + void SimplifyCompare(HInvoke* invoke, bool is_signum, DataType::Type type); void SimplifyIsNaN(HInvoke* invoke); void SimplifyFP2Int(HInvoke* invoke); void SimplifyStringCharAt(HInvoke* invoke); @@ -178,7 +179,7 @@ bool InstructionSimplifierVisitor::TryMoveNegOnInputsAfterBinop(HBinaryOperation // Note that we cannot optimize `(-a) + (-b)` to `-(a + b)` for floating-point. // When `a` is `-0.0` and `b` is `0.0`, the former expression yields `0.0`, // while the later yields `-0.0`. - if (!Primitive::IsIntegralType(binop->GetType())) { + if (!DataType::IsIntegralType(binop->GetType())) { return false; } binop->ReplaceInput(left_neg->GetInput(), 0); @@ -194,7 +195,7 @@ bool InstructionSimplifierVisitor::TryMoveNegOnInputsAfterBinop(HBinaryOperation bool InstructionSimplifierVisitor::TryDeMorganNegationFactoring(HBinaryOperation* op) { DCHECK(op->IsAnd() || op->IsOr()) << op->DebugName(); - Primitive::Type type = op->GetType(); + DataType::Type type = op->GetType(); HInstruction* left = op->GetLeft(); HInstruction* right = op->GetRight(); @@ -246,24 +247,24 @@ bool InstructionSimplifierVisitor::TryDeMorganNegationFactoring(HBinaryOperation } bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul) { - Primitive::Type type = mul->GetPackedType(); + DataType::Type type = mul->GetPackedType(); InstructionSet isa = codegen_->GetInstructionSet(); switch (isa) { case kArm64: - if (!(type == Primitive::kPrimByte || - type == Primitive::kPrimChar || - type == Primitive::kPrimShort || - type == Primitive::kPrimInt)) { + if (!(type == DataType::Type::kInt8 || + type == DataType::Type::kUint16 || + type == DataType::Type::kInt16 || + type == DataType::Type::kInt32)) { return false; } break; case kMips: case kMips64: - if (!(type == Primitive::kPrimByte || - type == Primitive::kPrimChar || - type == Primitive::kPrimShort || - type == Primitive::kPrimInt || - type == Primitive::kPrimLong)) { + if (!(type == DataType::Type::kInt8 || + type == DataType::Type::kUint16 || + type == DataType::Type::kInt16 || + type == DataType::Type::kInt32 || + type == DataType::Type::kInt64)) { return false; } break; @@ -328,7 +329,7 @@ void InstructionSimplifierVisitor::VisitShift(HBinaryOperation* instruction) { HInstruction* shift_amount = instruction->GetRight(); HInstruction* value = instruction->GetLeft(); - int64_t implicit_mask = (value->GetType() == Primitive::kPrimLong) + int64_t implicit_mask = (value->GetType() == DataType::Type::kInt64) ? kMaxLongShiftDistance : kMaxIntShiftDistance; @@ -351,7 +352,7 @@ void InstructionSimplifierVisitor::VisitShift(HBinaryOperation* instruction) { // SHL dst, value, cst & implicit_mask // (as defined by shift semantics). This ensures other // optimizations do not need to special case for such situations. - DCHECK_EQ(shift_amount->GetType(), Primitive::kPrimInt); + DCHECK_EQ(shift_amount->GetType(), DataType::Type::kInt32); instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index */ 1); RecordSimplification(); return; @@ -412,7 +413,7 @@ bool InstructionSimplifierVisitor::TryReplaceWithRotate(HBinaryOperation* op) { if ((left->IsUShr() && right->IsShl()) || (left->IsShl() && right->IsUShr())) { HUShr* ushr = left->IsUShr() ? left->AsUShr() : right->AsUShr(); HShl* shl = left->IsShl() ? left->AsShl() : right->AsShl(); - DCHECK(Primitive::IsIntOrLongType(ushr->GetType())); + DCHECK(DataType::IsIntOrLongType(ushr->GetType())); if (ushr->GetType() == shl->GetType() && ushr->GetLeft() == shl->GetLeft()) { if (ushr->GetRight()->IsConstant() && shl->GetRight()->IsConstant()) { @@ -445,7 +446,7 @@ bool InstructionSimplifierVisitor::TryReplaceWithRotateConstantPattern(HBinaryOp HUShr* ushr, HShl* shl) { DCHECK(op->IsAdd() || op->IsXor() || op->IsOr()); - size_t reg_bits = Primitive::ComponentSize(ushr->GetType()) * kBitsPerByte; + size_t reg_bits = DataType::Size(ushr->GetType()) * kBitsPerByte; size_t rdist = Int64FromConstant(ushr->GetRight()->AsConstant()); size_t ldist = Int64FromConstant(shl->GetRight()->AsConstant()); if (((ldist + rdist) & (reg_bits - 1)) == 0) { @@ -506,7 +507,7 @@ bool InstructionSimplifierVisitor::TryReplaceWithRotateRegisterSubPattern(HBinar HShl* shl) { DCHECK(op->IsAdd() || op->IsXor() || op->IsOr()); DCHECK(ushr->GetRight()->IsSub() || shl->GetRight()->IsSub()); - size_t reg_bits = Primitive::ComponentSize(ushr->GetType()) * kBitsPerByte; + size_t reg_bits = DataType::Size(ushr->GetType()) * kBitsPerByte; HInstruction* shl_shift = shl->GetRight(); HInstruction* ushr_shift = ushr->GetRight(); if ((shl_shift->IsSub() && IsSubRegBitsMinusOther(shl_shift->AsSub(), reg_bits, ushr_shift)) || @@ -664,14 +665,14 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) { } void InstructionSimplifierVisitor::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { - if ((instruction->GetValue()->GetType() == Primitive::kPrimNot) + if ((instruction->GetValue()->GetType() == DataType::Type::kReference) && CanEnsureNotNullAt(instruction->GetValue(), instruction)) { instruction->ClearValueCanBeNull(); } } void InstructionSimplifierVisitor::VisitStaticFieldSet(HStaticFieldSet* instruction) { - if ((instruction->GetValue()->GetType() == Primitive::kPrimNot) + if ((instruction->GetValue()->GetType() == DataType::Type::kReference) && CanEnsureNotNullAt(instruction->GetValue(), instruction)) { instruction->ClearValueCanBeNull(); } @@ -708,7 +709,7 @@ static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* arena, HInstructi } static bool CmpHasBoolType(HInstruction* input, HInstruction* cmp) { - if (input->GetType() == Primitive::kPrimBoolean) { + if (input->GetType() == DataType::Type::kBool) { return true; // input has direct boolean type } else if (cmp->GetUses().HasExactlyOneElement()) { // Comparison also has boolean type if both its input and the instruction @@ -801,7 +802,7 @@ void InstructionSimplifierVisitor::VisitBooleanNot(HBooleanNot* bool_not) { } else if (input->IsCondition() && // Don't change FP compares. The definition of compares involving // NaNs forces the compares to be done as written by the user. - !Primitive::IsFloatingPointType(input->InputAt(0)->GetType())) { + !DataType::IsFloatingPointType(input->InputAt(0)->GetType())) { // Replace condition with its opposite. replace_with = GetGraph()->InsertOppositeCondition(input->AsCondition(), bool_not); } @@ -815,8 +816,8 @@ void InstructionSimplifierVisitor::VisitBooleanNot(HBooleanNot* bool_not) { // Constructs a new ABS(x) node in the HIR. static HInstruction* NewIntegralAbs(ArenaAllocator* arena, HInstruction* x, HInstruction* cursor) { - Primitive::Type type = x->GetType(); - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DataType::Type type = x->GetType(); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); // Construct a fake intrinsic with as much context as is needed to allocate one. // The intrinsic will always be lowered into code later anyway. // TODO: b/65164101 : moving towards a real HAbs node makes more sense. @@ -837,8 +838,8 @@ static HInstruction* NewIntegralAbs(ArenaAllocator* arena, HInstruction* x, HIns MethodReference(nullptr, dex::kDexNoIndex), HInvokeStaticOrDirect::ClinitCheckRequirement::kNone); invoke->SetArgumentAt(0, x); - invoke->SetIntrinsic(type == Primitive::kPrimInt ? Intrinsics::kMathAbsInt - : Intrinsics::kMathAbsLong, + invoke->SetIntrinsic(type == DataType::Type::kInt32 ? Intrinsics::kMathAbsInt + : Intrinsics::kMathAbsLong, kNoEnvironmentOrCache, kNoSideEffects, kNoThrow); @@ -848,20 +849,20 @@ static HInstruction* NewIntegralAbs(ArenaAllocator* arena, HInstruction* x, HIns // Returns true if operands a and b consists of widening type conversions // (either explicit or implicit) to the given to_type. -static bool AreLowerPrecisionArgs(Primitive::Type to_type, HInstruction* a, HInstruction* b) { +static bool AreLowerPrecisionArgs(DataType::Type to_type, HInstruction* a, HInstruction* b) { if (a->IsTypeConversion() && a->GetType() == to_type) { a = a->InputAt(0); } if (b->IsTypeConversion() && b->GetType() == to_type) { b = b->InputAt(0); } - Primitive::Type type1 = a->GetType(); - Primitive::Type type2 = b->GetType(); - return (type1 == Primitive::kPrimByte && type2 == Primitive::kPrimByte) || - (type1 == Primitive::kPrimShort && type2 == Primitive::kPrimShort) || - (type1 == Primitive::kPrimChar && type2 == Primitive::kPrimChar) || - (type1 == Primitive::kPrimInt && type2 == Primitive::kPrimInt && - to_type == Primitive::kPrimLong); + DataType::Type type1 = a->GetType(); + DataType::Type type2 = b->GetType(); + return (type1 == DataType::Type::kInt8 && type2 == DataType::Type::kInt8) || + (type1 == DataType::Type::kInt16 && type2 == DataType::Type::kInt16) || + (type1 == DataType::Type::kUint16 && type2 == DataType::Type::kUint16) || + (type1 == DataType::Type::kInt32 && type2 == DataType::Type::kInt32 && + to_type == DataType::Type::kInt64); } void InstructionSimplifierVisitor::VisitSelect(HSelect* select) { @@ -904,11 +905,12 @@ void InstructionSimplifierVisitor::VisitSelect(HSelect* select) { IfCondition cmp = condition->AsCondition()->GetCondition(); HInstruction* a = condition->InputAt(0); HInstruction* b = condition->InputAt(1); - Primitive::Type t_type = true_value->GetType(); - Primitive::Type f_type = false_value->GetType(); + DataType::Type t_type = true_value->GetType(); + DataType::Type f_type = false_value->GetType(); // Here we have a <cmp> b ? true_value : false_value. // Test if both values are same-typed int or long. - if (t_type == f_type && (t_type == Primitive::kPrimInt || t_type == Primitive::kPrimLong)) { + if (t_type == f_type && + (t_type == DataType::Type::kInt32 || t_type == DataType::Type::kInt64)) { // Try to replace typical integral ABS constructs. if (true_value->IsNeg()) { HInstruction* negated = true_value->InputAt(0); @@ -974,7 +976,9 @@ void InstructionSimplifierVisitor::VisitArrayLength(HArrayLength* instruction) { void InstructionSimplifierVisitor::VisitArraySet(HArraySet* instruction) { HInstruction* value = instruction->GetValue(); - if (value->GetType() != Primitive::kPrimNot) return; + if (value->GetType() != DataType::Type::kReference) { + return; + } if (CanEnsureNotNullAt(value, instruction)) { instruction->ClearValueCanBeNull(); @@ -1014,39 +1018,39 @@ void InstructionSimplifierVisitor::VisitArraySet(HArraySet* instruction) { } } -static bool IsTypeConversionImplicit(Primitive::Type input_type, Primitive::Type result_type) { +static bool IsTypeConversionImplicit(DataType::Type input_type, DataType::Type result_type) { // Invariant: We should never generate a conversion to a Boolean value. - DCHECK_NE(Primitive::kPrimBoolean, result_type); + DCHECK_NE(DataType::Type::kBool, result_type); // Besides conversion to the same type, widening integral conversions are implicit, // excluding conversions to long and the byte->char conversion where we need to // clear the high 16 bits of the 32-bit sign-extended representation of byte. return result_type == input_type || - (result_type == Primitive::kPrimInt && (input_type == Primitive::kPrimBoolean || - input_type == Primitive::kPrimByte || - input_type == Primitive::kPrimShort || - input_type == Primitive::kPrimChar)) || - (result_type == Primitive::kPrimChar && input_type == Primitive::kPrimBoolean) || - (result_type == Primitive::kPrimShort && (input_type == Primitive::kPrimBoolean || - input_type == Primitive::kPrimByte)) || - (result_type == Primitive::kPrimByte && input_type == Primitive::kPrimBoolean); + (result_type == DataType::Type::kInt32 && (input_type == DataType::Type::kBool || + input_type == DataType::Type::kInt8 || + input_type == DataType::Type::kInt16 || + input_type == DataType::Type::kUint16)) || + (result_type == DataType::Type::kUint16 && input_type == DataType::Type::kBool) || + (result_type == DataType::Type::kInt16 && (input_type == DataType::Type::kBool || + input_type == DataType::Type::kInt8)) || + (result_type == DataType::Type::kInt8 && input_type == DataType::Type::kBool); } -static bool IsTypeConversionLossless(Primitive::Type input_type, Primitive::Type result_type) { +static bool IsTypeConversionLossless(DataType::Type input_type, DataType::Type result_type) { // The conversion to a larger type is loss-less with the exception of two cases, - // - conversion to char, the only unsigned type, where we may lose some bits, and + // - conversion to Uint16, the only unsigned type, where we may lose some bits, and // - conversion from float to long, the only FP to integral conversion with smaller FP type. // For integral to FP conversions this holds because the FP mantissa is large enough. DCHECK_NE(input_type, result_type); - return Primitive::ComponentSize(result_type) > Primitive::ComponentSize(input_type) && - result_type != Primitive::kPrimChar && - !(result_type == Primitive::kPrimLong && input_type == Primitive::kPrimFloat); + return DataType::Size(result_type) > DataType::Size(input_type) && + result_type != DataType::Type::kUint16 && + !(result_type == DataType::Type::kInt64 && input_type == DataType::Type::kFloat32); } void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruction) { HInstruction* input = instruction->GetInput(); - Primitive::Type input_type = input->GetType(); - Primitive::Type result_type = instruction->GetResultType(); + DataType::Type input_type = input->GetType(); + DataType::Type result_type = instruction->GetResultType(); if (IsTypeConversionImplicit(input_type, result_type)) { // Remove the implicit conversion; this includes conversion to the same type. instruction->ReplaceWith(input); @@ -1058,7 +1062,7 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct if (input->IsTypeConversion()) { HTypeConversion* input_conversion = input->AsTypeConversion(); HInstruction* original_input = input_conversion->GetInput(); - Primitive::Type original_type = original_input->GetType(); + DataType::Type original_type = original_input->GetType(); // When the first conversion is lossless, a direct conversion from the original type // to the final type yields the same result, even for a lossy second conversion, for @@ -1069,10 +1073,10 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct // doesn't need, i.e. the final type is no wider than the intermediate. If so, direct // conversion yields the same result, for example long->int->short or int->char->short. bool integral_conversions_with_non_widening_second = - Primitive::IsIntegralType(input_type) && - Primitive::IsIntegralType(original_type) && - Primitive::IsIntegralType(result_type) && - Primitive::ComponentSize(result_type) <= Primitive::ComponentSize(input_type); + DataType::IsIntegralType(input_type) && + DataType::IsIntegralType(original_type) && + DataType::IsIntegralType(result_type) && + DataType::Size(result_type) <= DataType::Size(input_type); if (is_first_conversion_lossless || integral_conversions_with_non_widening_second) { // If the merged conversion is implicit, do the simplification unconditionally. @@ -1094,15 +1098,15 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct return; } } - } else if (input->IsAnd() && Primitive::IsIntegralType(result_type)) { - DCHECK(Primitive::IsIntegralType(input_type)); + } else if (input->IsAnd() && DataType::IsIntegralType(result_type)) { + DCHECK(DataType::IsIntegralType(input_type)); HAnd* input_and = input->AsAnd(); HConstant* constant = input_and->GetConstantRight(); if (constant != nullptr) { int64_t value = Int64FromConstant(constant); DCHECK_NE(value, -1); // "& -1" would have been optimized away in VisitAnd(). size_t trailing_ones = CTZ(~static_cast<uint64_t>(value)); - if (trailing_ones >= kBitsPerByte * Primitive::ComponentSize(result_type)) { + if (trailing_ones >= kBitsPerByte * DataType::Size(result_type)) { // The `HAnd` is useless, for example in `(byte) (x & 0xff)`, get rid of it. HInstruction* original_input = input_and->GetLeastConstantLeft(); if (IsTypeConversionImplicit(original_input->GetType(), result_type)) { @@ -1124,7 +1128,7 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) { HConstant* input_cst = instruction->GetConstantRight(); HInstruction* input_other = instruction->GetLeastConstantLeft(); - bool integral_type = Primitive::IsIntegralType(instruction->GetType()); + bool integral_type = DataType::IsIntegralType(instruction->GetType()); if ((input_cst != nullptr) && input_cst->IsArithmeticZero()) { // Replace code looking like // ADD dst, src, 0 @@ -1226,7 +1230,7 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) { // can be non-zero after UShr. Transform Shr+And to UShr if the And-mask // precisely clears the shifted-in sign bits. if ((input_other->IsUShr() || input_other->IsShr()) && input_other->InputAt(1)->IsConstant()) { - size_t reg_bits = (instruction->GetResultType() == Primitive::kPrimLong) ? 64 : 32; + size_t reg_bits = (instruction->GetResultType() == DataType::Type::kInt64) ? 64 : 32; size_t shift = Int64FromConstant(input_other->InputAt(1)->AsConstant()) & (reg_bits - 1); size_t num_tail_bits_set = CTZ(value + 1); if ((num_tail_bits_set >= reg_bits - shift) && input_other->IsUShr()) { @@ -1447,7 +1451,7 @@ static constexpr bool CanDivideByReciprocalMultiplyDouble(int64_t divisor) { void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) { HConstant* input_cst = instruction->GetConstantRight(); HInstruction* input_other = instruction->GetLeastConstantLeft(); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); if ((input_cst != nullptr) && input_cst->IsOne()) { // Replace code looking like @@ -1471,19 +1475,19 @@ void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) { return; } - if ((input_cst != nullptr) && Primitive::IsFloatingPointType(type)) { + if ((input_cst != nullptr) && DataType::IsFloatingPointType(type)) { // Try replacing code looking like // DIV dst, src, constant // with // MUL dst, src, 1 / constant HConstant* reciprocal = nullptr; - if (type == Primitive::Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { double value = input_cst->AsDoubleConstant()->GetValue(); if (CanDivideByReciprocalMultiplyDouble(bit_cast<int64_t, double>(value))) { reciprocal = GetGraph()->GetDoubleConstant(1.0 / value); } } else { - DCHECK_EQ(type, Primitive::kPrimFloat); + DCHECK_EQ(type, DataType::Type::kFloat32); float value = input_cst->AsFloatConstant()->GetValue(); if (CanDivideByReciprocalMultiplyFloat(bit_cast<int32_t, float>(value))) { reciprocal = GetGraph()->GetFloatConstant(1.0f / value); @@ -1502,7 +1506,7 @@ void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) { void InstructionSimplifierVisitor::VisitMul(HMul* instruction) { HConstant* input_cst = instruction->GetConstantRight(); HInstruction* input_other = instruction->GetLeastConstantLeft(); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); HBasicBlock* block = instruction->GetBlock(); ArenaAllocator* allocator = GetGraph()->GetArena(); @@ -1522,7 +1526,7 @@ void InstructionSimplifierVisitor::VisitMul(HMul* instruction) { } if (input_cst->IsMinusOne() && - (Primitive::IsFloatingPointType(type) || Primitive::IsIntOrLongType(type))) { + (DataType::IsFloatingPointType(type) || DataType::IsIntOrLongType(type))) { // Replace code looking like // MUL dst, src, -1 // with @@ -1533,7 +1537,7 @@ void InstructionSimplifierVisitor::VisitMul(HMul* instruction) { return; } - if (Primitive::IsFloatingPointType(type) && + if (DataType::IsFloatingPointType(type) && ((input_cst->IsFloatConstant() && input_cst->AsFloatConstant()->GetValue() == 2.0f) || (input_cst->IsDoubleConstant() && input_cst->AsDoubleConstant()->GetValue() == 2.0))) { // Replace code looking like @@ -1547,7 +1551,7 @@ void InstructionSimplifierVisitor::VisitMul(HMul* instruction) { return; } - if (Primitive::IsIntOrLongType(type)) { + if (DataType::IsIntOrLongType(type)) { int64_t factor = Int64FromConstant(input_cst); // Even though constant propagation also takes care of the zero case, other // optimizations can lead to having a zero multiplication. @@ -1630,7 +1634,7 @@ void InstructionSimplifierVisitor::VisitNeg(HNeg* instruction) { } if (input->IsSub() && input->HasOnlyOneNonEnvironmentUse() && - !Primitive::IsFloatingPointType(input->GetType())) { + !DataType::IsFloatingPointType(input->GetType())) { // Replace code looking like // SUB tmp, a, b // NEG dst, tmp @@ -1726,8 +1730,8 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) { HConstant* input_cst = instruction->GetConstantRight(); HInstruction* input_other = instruction->GetLeastConstantLeft(); - Primitive::Type type = instruction->GetType(); - if (Primitive::IsFloatingPointType(type)) { + DataType::Type type = instruction->GetType(); + if (DataType::IsFloatingPointType(type)) { return; } @@ -1818,7 +1822,7 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) { // SUB instruction is not needed in this case, we may use // one of inputs of ADD instead. // It is applicable to integral types only. - DCHECK(Primitive::IsIntegralType(type)); + DCHECK(DataType::IsIntegralType(type)); if (left->InputAt(1) == right) { instruction->ReplaceWith(left->InputAt(0)); RecordSimplification(); @@ -1853,7 +1857,7 @@ void InstructionSimplifierVisitor::VisitXor(HXor* instruction) { } if ((input_cst != nullptr) && input_cst->IsOne() - && input_other->GetType() == Primitive::kPrimBoolean) { + && input_other->GetType() == DataType::Type::kBool) { // Replace code looking like // XOR dst, src, 1 // with @@ -1930,7 +1934,7 @@ void InstructionSimplifierVisitor::SimplifyStringEquals(HInvoke* instruction) { void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke, bool is_left, - Primitive::Type type) { + DataType::Type type) { DCHECK(invoke->IsInvokeStaticOrDirect()); DCHECK_EQ(invoke->GetInvokeType(), InvokeType::kStatic); HInstruction* value = invoke->InputAt(0); @@ -1940,7 +1944,7 @@ void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke, // Unconditionally set the type of the negated distance to `int`, // as shift and rotate operations expect a 32-bit (or narrower) // value for their distance input. - distance = new (GetGraph()->GetArena()) HNeg(Primitive::kPrimInt, distance); + distance = new (GetGraph()->GetArena()) HNeg(DataType::Type::kInt32, distance); invoke->GetBlock()->InsertInstructionBefore(distance, invoke); } HRor* ror = new (GetGraph()->GetArena()) HRor(type, value, distance); @@ -1993,8 +1997,8 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction) { ScopedObjectAccess soa(Thread::Current()); - Primitive::Type source_component_type = Primitive::kPrimVoid; - Primitive::Type destination_component_type = Primitive::kPrimVoid; + DataType::Type source_component_type = DataType::Type::kVoid; + DataType::Type destination_component_type = DataType::Type::kVoid; ReferenceTypeInfo destination_rti = destination->GetReferenceTypeInfo(); if (destination_rti.IsValid()) { if (destination_rti.IsObjectArray()) { @@ -2004,8 +2008,8 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction) optimizations.SetDestinationIsTypedObjectArray(); } if (destination_rti.IsPrimitiveArrayClass()) { - destination_component_type = - destination_rti.GetTypeHandle()->GetComponentType()->GetPrimitiveType(); + destination_component_type = DataTypeFromPrimitive( + destination_rti.GetTypeHandle()->GetComponentType()->GetPrimitiveType()); optimizations.SetDestinationIsPrimitiveArray(); } else if (destination_rti.IsNonPrimitiveArrayClass()) { optimizations.SetDestinationIsNonPrimitiveArray(); @@ -2018,13 +2022,14 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction) } if (source_rti.IsPrimitiveArrayClass()) { optimizations.SetSourceIsPrimitiveArray(); - source_component_type = source_rti.GetTypeHandle()->GetComponentType()->GetPrimitiveType(); + source_component_type = DataTypeFromPrimitive( + source_rti.GetTypeHandle()->GetComponentType()->GetPrimitiveType()); } else if (source_rti.IsNonPrimitiveArrayClass()) { optimizations.SetSourceIsNonPrimitiveArray(); } } // For primitive arrays, use their optimized ArtMethod implementations. - if ((source_component_type != Primitive::kPrimVoid) && + if ((source_component_type != DataType::Type::kVoid) && (source_component_type == destination_component_type)) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); PointerSize image_size = class_linker->GetImagePointerSize(); @@ -2032,28 +2037,28 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction) mirror::Class* system = invoke->GetResolvedMethod()->GetDeclaringClass(); ArtMethod* method = nullptr; switch (source_component_type) { - case Primitive::kPrimBoolean: + case DataType::Type::kBool: method = system->FindClassMethod("arraycopy", "([ZI[ZII)V", image_size); break; - case Primitive::kPrimByte: + case DataType::Type::kInt8: method = system->FindClassMethod("arraycopy", "([BI[BII)V", image_size); break; - case Primitive::kPrimChar: + case DataType::Type::kUint16: method = system->FindClassMethod("arraycopy", "([CI[CII)V", image_size); break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: method = system->FindClassMethod("arraycopy", "([SI[SII)V", image_size); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: method = system->FindClassMethod("arraycopy", "([II[III)V", image_size); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: method = system->FindClassMethod("arraycopy", "([FI[FII)V", image_size); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: method = system->FindClassMethod("arraycopy", "([JI[JII)V", image_size); break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: method = system->FindClassMethod("arraycopy", "([DI[DII)V", image_size); break; default: @@ -2074,14 +2079,14 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction) void InstructionSimplifierVisitor::SimplifyCompare(HInvoke* invoke, bool is_signum, - Primitive::Type type) { + DataType::Type type) { DCHECK(invoke->IsInvokeStaticOrDirect()); uint32_t dex_pc = invoke->GetDexPc(); HInstruction* left = invoke->InputAt(0); HInstruction* right; if (!is_signum) { right = invoke->InputAt(1); - } else if (type == Primitive::kPrimLong) { + } else if (type == DataType::Type::kInt64) { right = GetGraph()->GetLongConstant(0); } else { right = GetGraph()->GetIntConstant(0); @@ -2105,17 +2110,17 @@ void InstructionSimplifierVisitor::SimplifyFP2Int(HInvoke* invoke) { DCHECK(invoke->IsInvokeStaticOrDirect()); uint32_t dex_pc = invoke->GetDexPc(); HInstruction* x = invoke->InputAt(0); - Primitive::Type type = x->GetType(); + DataType::Type type = x->GetType(); // Set proper bit pattern for NaN and replace intrinsic with raw version. HInstruction* nan; - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { nan = GetGraph()->GetLongConstant(0x7ff8000000000000L); invoke->SetIntrinsic(Intrinsics::kDoubleDoubleToRawLongBits, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow); } else { - DCHECK_EQ(type, Primitive::kPrimFloat); + DCHECK_EQ(type, DataType::Type::kFloat32); nan = GetGraph()->GetIntConstant(0x7fc00000); invoke->SetIntrinsic(Intrinsics::kFloatFloatToRawIntBits, kNeedsEnvironmentOrCache, @@ -2145,7 +2150,7 @@ void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) { index, length, dex_pc, invoke->GetDexMethodIndex()); invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke); HArrayGet* array_get = new (arena) HArrayGet( - str, bounds_check, Primitive::kPrimChar, dex_pc, /* is_string_char_at */ true); + str, bounds_check, DataType::Type::kUint16, dex_pc, /* is_string_char_at */ true); invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get); bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment()); GetGraph()->SetHasBoundsChecks(true); @@ -2248,28 +2253,28 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) { SimplifySystemArrayCopy(instruction); break; case Intrinsics::kIntegerRotateRight: - SimplifyRotate(instruction, /* is_left */ false, Primitive::kPrimInt); + SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt32); break; case Intrinsics::kLongRotateRight: - SimplifyRotate(instruction, /* is_left */ false, Primitive::kPrimLong); + SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt64); break; case Intrinsics::kIntegerRotateLeft: - SimplifyRotate(instruction, /* is_left */ true, Primitive::kPrimInt); + SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt32); break; case Intrinsics::kLongRotateLeft: - SimplifyRotate(instruction, /* is_left */ true, Primitive::kPrimLong); + SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt64); break; case Intrinsics::kIntegerCompare: - SimplifyCompare(instruction, /* is_signum */ false, Primitive::kPrimInt); + SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt32); break; case Intrinsics::kLongCompare: - SimplifyCompare(instruction, /* is_signum */ false, Primitive::kPrimLong); + SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt64); break; case Intrinsics::kIntegerSignum: - SimplifyCompare(instruction, /* is_signum */ true, Primitive::kPrimInt); + SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt32); break; case Intrinsics::kLongSignum: - SimplifyCompare(instruction, /* is_signum */ true, Primitive::kPrimLong); + SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt64); break; case Intrinsics::kFloatIsNaN: case Intrinsics::kDoubleIsNaN: @@ -2337,7 +2342,7 @@ bool InstructionSimplifierVisitor::TryHandleAssociativeAndCommutativeOperation( HBinaryOperation* instruction) { DCHECK(instruction->IsCommutative()); - if (!Primitive::IsIntegralType(instruction->GetType())) { + if (!DataType::IsIntegralType(instruction->GetType())) { return false; } @@ -2387,12 +2392,12 @@ static HBinaryOperation* AsAddOrSub(HInstruction* binop) { } // Helper function that performs addition statically, considering the result type. -static int64_t ComputeAddition(Primitive::Type type, int64_t x, int64_t y) { +static int64_t ComputeAddition(DataType::Type type, int64_t x, int64_t y) { // Use the Compute() method for consistency with TryStaticEvaluation(). - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { return HAdd::Compute<int32_t>(x, y); } else { - DCHECK_EQ(type, Primitive::kPrimLong); + DCHECK_EQ(type, DataType::Type::kInt64); return HAdd::Compute<int64_t>(x, y); } } @@ -2414,8 +2419,8 @@ bool InstructionSimplifierVisitor::TrySubtractionChainSimplification( HBinaryOperation* instruction) { DCHECK(instruction->IsAdd() || instruction->IsSub()) << instruction->DebugName(); - Primitive::Type type = instruction->GetType(); - if (!Primitive::IsIntegralType(type)) { + DataType::Type type = instruction->GetType(); + if (!DataType::IsIntegralType(type)) { return false; } diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc index a32d0ce42b..efd7cb47fe 100644 --- a/compiler/optimizing/instruction_simplifier_arm.cc +++ b/compiler/optimizing/instruction_simplifier_arm.cc @@ -38,8 +38,8 @@ bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* u DCHECK(CanFitInShifterOperand(bitfield_op)); DCHECK(!bitfield_op->HasEnvironmentUses()); - Primitive::Type type = use->GetType(); - if (type != Primitive::kPrimInt && type != Primitive::kPrimLong) { + DataType::Type type = use->GetType(); + if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) { return false; } @@ -70,17 +70,17 @@ bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* u int shift_amount = 0; HDataProcWithShifterOp::GetOpInfoFromInstruction(bitfield_op, &op_kind, &shift_amount); - shift_amount &= use->GetType() == Primitive::kPrimInt + shift_amount &= use->GetType() == DataType::Type::kInt32 ? kMaxIntShiftDistance : kMaxLongShiftDistance; if (HDataProcWithShifterOp::IsExtensionOp(op_kind)) { - if (!use->IsAdd() && (!use->IsSub() || use->GetType() != Primitive::kPrimLong)) { + if (!use->IsAdd() && (!use->IsSub() || use->GetType() != DataType::Type::kInt64)) { return false; } // Shift by 1 is a special case that results in the same number and type of instructions // as this simplification, but potentially shorter code. - } else if (type == Primitive::kPrimLong && shift_amount == 1) { + } else if (type == DataType::Type::kInt64 && shift_amount == 1) { return false; } @@ -143,7 +143,7 @@ void InstructionSimplifierArmVisitor::VisitAnd(HAnd* instruction) { void InstructionSimplifierArmVisitor::VisitArrayGet(HArrayGet* instruction) { size_t data_offset = CodeGenerator::GetArrayDataOffset(instruction); - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); // TODO: Implement reading (length + compression) for String compression feature from // negative offset (count_offset - data_offset). Thumb2Assembler (now removed) did @@ -153,9 +153,9 @@ void InstructionSimplifierArmVisitor::VisitArrayGet(HArrayGet* instruction) { return; } - if (type == Primitive::kPrimLong - || type == Primitive::kPrimFloat - || type == Primitive::kPrimDouble) { + if (type == DataType::Type::kInt64 + || type == DataType::Type::kFloat32 + || type == DataType::Type::kFloat64) { // T32 doesn't support ShiftedRegOffset mem address mode for these types // to enable optimization. return; @@ -170,13 +170,13 @@ void InstructionSimplifierArmVisitor::VisitArrayGet(HArrayGet* instruction) { } void InstructionSimplifierArmVisitor::VisitArraySet(HArraySet* instruction) { - size_t access_size = Primitive::ComponentSize(instruction->GetComponentType()); + size_t access_size = DataType::Size(instruction->GetComponentType()); size_t data_offset = mirror::Array::DataOffset(access_size).Uint32Value(); - Primitive::Type type = instruction->GetComponentType(); + DataType::Type type = instruction->GetComponentType(); - if (type == Primitive::kPrimLong - || type == Primitive::kPrimFloat - || type == Primitive::kPrimDouble) { + if (type == DataType::Type::kInt64 + || type == DataType::Type::kFloat32 + || type == DataType::Type::kFloat64) { // T32 doesn't support ShiftedRegOffset mem address mode for these types // to enable optimization. return; @@ -215,15 +215,15 @@ void InstructionSimplifierArmVisitor::VisitShr(HShr* instruction) { } void InstructionSimplifierArmVisitor::VisitTypeConversion(HTypeConversion* instruction) { - Primitive::Type result_type = instruction->GetResultType(); - Primitive::Type input_type = instruction->GetInputType(); + DataType::Type result_type = instruction->GetResultType(); + DataType::Type input_type = instruction->GetInputType(); if (input_type == result_type) { // We let the arch-independent code handle this. return; } - if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) { + if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) { TryMergeIntoUsersShifterOperand(instruction); } } diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index 7c9bfb11b2..1c3b79dc03 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -38,8 +38,8 @@ bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* DCHECK(CanFitInShifterOperand(bitfield_op)); DCHECK(!bitfield_op->HasEnvironmentUses()); - Primitive::Type type = use->GetType(); - if (type != Primitive::kPrimInt && type != Primitive::kPrimLong) { + DataType::Type type = use->GetType(); + if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) { return false; } @@ -150,7 +150,7 @@ void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) { } void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) { - size_t access_size = Primitive::ComponentSize(instruction->GetComponentType()); + size_t access_size = DataType::Size(instruction->GetComponentType()); size_t data_offset = mirror::Array::DataOffset(access_size).Uint32Value(); if (TryExtractArrayAccessAddress(instruction, instruction->GetArray(), @@ -185,15 +185,15 @@ void InstructionSimplifierArm64Visitor::VisitShr(HShr* instruction) { } void InstructionSimplifierArm64Visitor::VisitTypeConversion(HTypeConversion* instruction) { - Primitive::Type result_type = instruction->GetResultType(); - Primitive::Type input_type = instruction->GetInputType(); + DataType::Type result_type = instruction->GetResultType(); + DataType::Type input_type = instruction->GetInputType(); if (input_type == result_type) { // We let the arch-independent code handle this. return; } - if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) { + if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) { TryMergeIntoUsersShifterOperand(instruction); } } diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc index 7a759b9118..73d866fbea 100644 --- a/compiler/optimizing/instruction_simplifier_shared.cc +++ b/compiler/optimizing/instruction_simplifier_shared.cc @@ -25,7 +25,7 @@ namespace { bool TrySimpleMultiplyAccumulatePatterns(HMul* mul, HBinaryOperation* input_binop, HInstruction* input_other) { - DCHECK(Primitive::IsIntOrLongType(mul->GetType())); + DCHECK(DataType::IsIntOrLongType(mul->GetType())); DCHECK(input_binop->IsAdd() || input_binop->IsSub()); DCHECK_NE(input_binop, input_other); if (!input_binop->HasOnlyOneNonEnvironmentUse()) { @@ -88,16 +88,16 @@ bool TrySimpleMultiplyAccumulatePatterns(HMul* mul, } // namespace bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) { - Primitive::Type type = mul->GetType(); + DataType::Type type = mul->GetType(); switch (isa) { case kArm: case kThumb2: - if (type != Primitive::kPrimInt) { + if (type != DataType::Type::kInt32) { return false; } break; case kArm64: - if (!Primitive::IsIntOrLongType(type)) { + if (!DataType::IsIntOrLongType(type)) { return false; } break; @@ -240,13 +240,13 @@ bool TryExtractArrayAccessAddress(HInstruction* access, return false; } if (access->IsArraySet() && - access->AsArraySet()->GetValue()->GetType() == Primitive::kPrimNot) { + access->AsArraySet()->GetValue()->GetType() == DataType::Type::kReference) { // The access may require a runtime call or the original array pointer. return false; } if (kEmitCompilerReadBarrier && access->IsArrayGet() && - access->GetType() == Primitive::kPrimNot) { + access->GetType() == DataType::Type::kReference) { // For object arrays, the read barrier instrumentation requires // the original array pointer. // TODO: This can be relaxed for Baker CC. @@ -290,10 +290,10 @@ bool TryExtractVecArrayAccessAddress(HVecMemoryOperation* access, HInstruction* HGraph* graph = access->GetBlock()->GetGraph(); ArenaAllocator* arena = graph->GetArena(); - Primitive::Type packed_type = access->GetPackedType(); + DataType::Type packed_type = access->GetPackedType(); uint32_t data_offset = mirror::Array::DataOffset( - Primitive::ComponentSize(packed_type)).Uint32Value(); - size_t component_shift = Primitive::ComponentSizeShift(packed_type); + DataType::Size(packed_type)).Uint32Value(); + size_t component_shift = DataType::SizeShift(packed_type); bool is_extracting_beneficial = false; // It is beneficial to extract index intermediate address only if there are at least 2 users. @@ -301,10 +301,10 @@ bool TryExtractVecArrayAccessAddress(HVecMemoryOperation* access, HInstruction* HInstruction* user = use.GetUser(); if (user->IsVecMemoryOperation() && user != access) { HVecMemoryOperation* another_access = user->AsVecMemoryOperation(); - Primitive::Type another_packed_type = another_access->GetPackedType(); + DataType::Type another_packed_type = another_access->GetPackedType(); uint32_t another_data_offset = mirror::Array::DataOffset( - Primitive::ComponentSize(another_packed_type)).Uint32Value(); - size_t another_component_shift = Primitive::ComponentSizeShift(another_packed_type); + DataType::Size(another_packed_type)).Uint32Value(); + size_t another_component_shift = DataType::SizeShift(another_packed_type); if (another_data_offset == data_offset && another_component_shift == component_shift) { is_extracting_beneficial = true; break; diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h index 31e23833b1..b016a8769e 100644 --- a/compiler/optimizing/instruction_simplifier_shared.h +++ b/compiler/optimizing/instruction_simplifier_shared.h @@ -26,10 +26,10 @@ namespace helpers { inline bool CanFitInShifterOperand(HInstruction* instruction) { if (instruction->IsTypeConversion()) { HTypeConversion* conversion = instruction->AsTypeConversion(); - Primitive::Type result_type = conversion->GetResultType(); - Primitive::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); // We don't expect to see the same type as input and result. - return Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type) && + return DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type) && (result_type != input_type); } else { return (instruction->IsShl() && instruction->AsShl()->InputAt(1)->IsIntConstant()) || diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 96efe7f3b1..75a1ce7e6f 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -76,16 +76,16 @@ ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() { #define __ codegen->GetVIXLAssembler()-> static void MoveFromReturnRegister(Location trg, - Primitive::Type type, + DataType::Type type, CodeGeneratorARM64* codegen) { if (!trg.IsValid()) { - DCHECK(type == Primitive::kPrimVoid); + DCHECK(type == DataType::Type::kVoid); return; } - DCHECK_NE(type, Primitive::kPrimVoid); + DCHECK_NE(type, DataType::Type::kVoid); - if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) { + if (DataType::IsIntegralType(type) || type == DataType::Type::kReference) { Register trg_reg = RegisterFrom(trg, type); Register res_reg = RegisterFrom(ARM64ReturnLocation(type), type); __ Mov(trg_reg, res_reg, kDiscardForSameWReg); @@ -173,7 +173,7 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 { DCHECK(instruction_->GetLocations()->Intrinsified()); DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy); - const int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); + const int32_t element_size = DataType::Size(DataType::Type::kReference); Register src_curr_addr = XRegisterFrom(locations->GetTemp(0)); Register dst_curr_addr = XRegisterFrom(locations->GetTemp(1)); @@ -303,18 +303,18 @@ static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { } static void GenReverseBytes(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, MacroAssembler* masm) { Location in = locations->InAt(0); Location out = locations->Out(); switch (type) { - case Primitive::kPrimShort: + case DataType::Type::kInt16: __ Rev16(WRegisterFrom(out), WRegisterFrom(in)); __ Sxth(WRegisterFrom(out), WRegisterFrom(out)); break; - case Primitive::kPrimInt: - case Primitive::kPrimLong: + case DataType::Type::kInt32: + case DataType::Type::kInt64: __ Rev(RegisterFrom(out, type), RegisterFrom(in, type)); break; default: @@ -328,7 +328,7 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerReverseBytes(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitIntegerReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt32, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitLongReverseBytes(HInvoke* invoke) { @@ -336,7 +336,7 @@ void IntrinsicLocationsBuilderARM64::VisitLongReverseBytes(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitLongReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt64, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitShortReverseBytes(HInvoke* invoke) { @@ -344,7 +344,7 @@ void IntrinsicLocationsBuilderARM64::VisitShortReverseBytes(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitShortReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetVIXLAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetVIXLAssembler()); } static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -357,9 +357,9 @@ static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { } static void GenNumberOfLeadingZeros(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, MacroAssembler* masm) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); Location in = locations->InAt(0); Location out = locations->Out(); @@ -372,7 +372,7 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* i } void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler()); + GenNumberOfLeadingZeros(invoke->GetLocations(), DataType::Type::kInt32, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { @@ -380,13 +380,13 @@ void IntrinsicLocationsBuilderARM64::VisitLongNumberOfLeadingZeros(HInvoke* invo } void IntrinsicCodeGeneratorARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler()); + GenNumberOfLeadingZeros(invoke->GetLocations(), DataType::Type::kInt64, GetVIXLAssembler()); } static void GenNumberOfTrailingZeros(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, MacroAssembler* masm) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); Location in = locations->InAt(0); Location out = locations->Out(); @@ -400,7 +400,7 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* } void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler()); + GenNumberOfTrailingZeros(invoke->GetLocations(), DataType::Type::kInt32, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { @@ -408,13 +408,13 @@ void IntrinsicLocationsBuilderARM64::VisitLongNumberOfTrailingZeros(HInvoke* inv } void IntrinsicCodeGeneratorARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler()); + GenNumberOfTrailingZeros(invoke->GetLocations(), DataType::Type::kInt64, GetVIXLAssembler()); } static void GenReverse(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, MacroAssembler* masm) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); Location in = locations->InAt(0); Location out = locations->Out(); @@ -427,7 +427,7 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerReverse(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitIntegerReverse(HInvoke* invoke) { - GenReverse(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler()); + GenReverse(invoke->GetLocations(), DataType::Type::kInt32, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitLongReverse(HInvoke* invoke) { @@ -435,19 +435,19 @@ void IntrinsicLocationsBuilderARM64::VisitLongReverse(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitLongReverse(HInvoke* invoke) { - GenReverse(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler()); + GenReverse(invoke->GetLocations(), DataType::Type::kInt64, GetVIXLAssembler()); } -static void GenBitCount(HInvoke* instr, Primitive::Type type, MacroAssembler* masm) { - DCHECK(Primitive::IsIntOrLongType(type)) << type; - DCHECK_EQ(instr->GetType(), Primitive::kPrimInt); - DCHECK_EQ(Primitive::PrimitiveKind(instr->InputAt(0)->GetType()), type); +static void GenBitCount(HInvoke* instr, DataType::Type type, MacroAssembler* masm) { + DCHECK(DataType::IsIntOrLongType(type)) << type; + DCHECK_EQ(instr->GetType(), DataType::Type::kInt32); + DCHECK_EQ(DataType::Kind(instr->InputAt(0)->GetType()), type); UseScratchRegisterScope temps(masm); Register src = InputRegisterAt(instr, 0); Register dst = RegisterFrom(instr->GetLocations()->Out(), type); - FPRegister fpr = (type == Primitive::kPrimLong) ? temps.AcquireD() : temps.AcquireS(); + FPRegister fpr = (type == DataType::Type::kInt64) ? temps.AcquireD() : temps.AcquireS(); __ Fmov(fpr, src); __ Cnt(fpr.V8B(), fpr.V8B()); @@ -460,7 +460,7 @@ void IntrinsicLocationsBuilderARM64::VisitLongBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitLongBitCount(HInvoke* invoke) { - GenBitCount(invoke, Primitive::kPrimLong, GetVIXLAssembler()); + GenBitCount(invoke, DataType::Type::kInt64, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitIntegerBitCount(HInvoke* invoke) { @@ -468,19 +468,19 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitIntegerBitCount(HInvoke* invoke) { - GenBitCount(invoke, Primitive::kPrimInt, GetVIXLAssembler()); + GenBitCount(invoke, DataType::Type::kInt32, GetVIXLAssembler()); } -static void GenHighestOneBit(HInvoke* invoke, Primitive::Type type, MacroAssembler* masm) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); +static void GenHighestOneBit(HInvoke* invoke, DataType::Type type, MacroAssembler* masm) { + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); UseScratchRegisterScope temps(masm); Register src = InputRegisterAt(invoke, 0); Register dst = RegisterFrom(invoke->GetLocations()->Out(), type); - Register temp = (type == Primitive::kPrimLong) ? temps.AcquireX() : temps.AcquireW(); - size_t high_bit = (type == Primitive::kPrimLong) ? 63u : 31u; - size_t clz_high_bit = (type == Primitive::kPrimLong) ? 6u : 5u; + Register temp = (type == DataType::Type::kInt64) ? temps.AcquireX() : temps.AcquireW(); + size_t high_bit = (type == DataType::Type::kInt64) ? 63u : 31u; + size_t clz_high_bit = (type == DataType::Type::kInt64) ? 6u : 5u; __ Clz(temp, src); __ Mov(dst, UINT64_C(1) << high_bit); // MOV (bitmask immediate) @@ -493,7 +493,7 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerHighestOneBit(HInvoke* invoke) } void IntrinsicCodeGeneratorARM64::VisitIntegerHighestOneBit(HInvoke* invoke) { - GenHighestOneBit(invoke, Primitive::kPrimInt, GetVIXLAssembler()); + GenHighestOneBit(invoke, DataType::Type::kInt32, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitLongHighestOneBit(HInvoke* invoke) { @@ -501,17 +501,17 @@ void IntrinsicLocationsBuilderARM64::VisitLongHighestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitLongHighestOneBit(HInvoke* invoke) { - GenHighestOneBit(invoke, Primitive::kPrimLong, GetVIXLAssembler()); + GenHighestOneBit(invoke, DataType::Type::kInt64, GetVIXLAssembler()); } -static void GenLowestOneBit(HInvoke* invoke, Primitive::Type type, MacroAssembler* masm) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); +static void GenLowestOneBit(HInvoke* invoke, DataType::Type type, MacroAssembler* masm) { + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); UseScratchRegisterScope temps(masm); Register src = InputRegisterAt(invoke, 0); Register dst = RegisterFrom(invoke->GetLocations()->Out(), type); - Register temp = (type == Primitive::kPrimLong) ? temps.AcquireX() : temps.AcquireW(); + Register temp = (type == DataType::Type::kInt64) ? temps.AcquireX() : temps.AcquireW(); __ Neg(temp, src); __ And(dst, temp, src); @@ -522,7 +522,7 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerLowestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitIntegerLowestOneBit(HInvoke* invoke) { - GenLowestOneBit(invoke, Primitive::kPrimInt, GetVIXLAssembler()); + GenLowestOneBit(invoke, DataType::Type::kInt32, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitLongLowestOneBit(HInvoke* invoke) { @@ -530,7 +530,7 @@ void IntrinsicLocationsBuilderARM64::VisitLongLowestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitLongLowestOneBit(HInvoke* invoke) { - GenLowestOneBit(invoke, Primitive::kPrimLong, GetVIXLAssembler()); + GenLowestOneBit(invoke, DataType::Type::kInt64, GetVIXLAssembler()); } static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -902,18 +902,18 @@ void IntrinsicLocationsBuilderARM64::VisitThreadCurrentThread(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitThreadCurrentThread(HInvoke* invoke) { - codegen_->Load(Primitive::kPrimNot, WRegisterFrom(invoke->GetLocations()->Out()), + codegen_->Load(DataType::Type::kReference, WRegisterFrom(invoke->GetLocations()->Out()), MemOperand(tr, Thread::PeerOffset<kArm64PointerSize>().Int32Value())); } static void GenUnsafeGet(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, bool is_volatile, CodeGeneratorARM64* codegen) { LocationSummary* locations = invoke->GetLocations(); - DCHECK((type == Primitive::kPrimInt) || - (type == Primitive::kPrimLong) || - (type == Primitive::kPrimNot)); + DCHECK((type == DataType::Type::kInt32) || + (type == DataType::Type::kInt64) || + (type == DataType::Type::kReference)); Location base_loc = locations->InAt(1); Register base = WRegisterFrom(base_loc); // Object pointer. Location offset_loc = locations->InAt(2); @@ -921,7 +921,7 @@ static void GenUnsafeGet(HInvoke* invoke, Location trg_loc = locations->Out(); Register trg = RegisterFrom(trg_loc, type); - if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // UnsafeGetObject/UnsafeGetObjectVolatile with Baker's read barrier case. Register temp = WRegisterFrom(locations->GetTemp(0)); codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke, @@ -942,7 +942,7 @@ static void GenUnsafeGet(HInvoke* invoke, codegen->Load(type, trg, mem_op); } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { DCHECK(trg.IsW()); codegen->MaybeGenerateReadBarrierSlow(invoke, trg_loc, trg_loc, base_loc, 0u, offset_loc); } @@ -991,22 +991,22 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObjectVolatile(HInvoke* invok } void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); } static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) { @@ -1048,7 +1048,7 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) } static void GenUnsafePut(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, bool is_volatile, bool is_ordered, CodeGeneratorARM64* codegen) { @@ -1066,7 +1066,7 @@ static void GenUnsafePut(HInvoke* invoke, // freeing the temporary registers so they can be used in `MarkGCCard`. UseScratchRegisterScope temps(masm); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { DCHECK(value.IsW()); Register temp = temps.AcquireW(); __ Mov(temp.W(), value.W()); @@ -1081,7 +1081,7 @@ static void GenUnsafePut(HInvoke* invoke, } } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { bool value_can_be_null = true; // TODO: Worth finding out this information? codegen->MarkGCCard(base, value, value_can_be_null); } @@ -1089,63 +1089,63 @@ static void GenUnsafePut(HInvoke* invoke, void IntrinsicCodeGeneratorARM64::VisitUnsafePut(HInvoke* invoke) { GenUnsafePut(invoke, - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ false, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutOrdered(HInvoke* invoke) { GenUnsafePut(invoke, - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ false, /* is_ordered */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutVolatile(HInvoke* invoke) { GenUnsafePut(invoke, - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ true, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut(invoke, - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ false, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut(invoke, - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ false, /* is_ordered */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut(invoke, - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ true, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutLong(HInvoke* invoke) { GenUnsafePut(invoke, - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ false, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) { GenUnsafePut(invoke, - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ false, /* is_ordered */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) { GenUnsafePut(invoke, - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ true, /* is_ordered */ false, codegen_); @@ -1153,7 +1153,7 @@ void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) { static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, HInvoke* invoke, - Primitive::Type type) { + DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject); @@ -1172,17 +1172,17 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, // operations to potentially clobber the output. Likewise when // emitting a (Baker) read barrier, which may call. Location::OutputOverlap overlaps = - ((kPoisonHeapReferences && type == Primitive::kPrimNot) || can_call) + ((kPoisonHeapReferences && type == DataType::Type::kReference) || can_call) ? Location::kOutputOverlap : Location::kNoOutputOverlap; locations->SetOut(Location::RequiresRegister(), overlaps); - if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // Temporary register for (Baker) read barrier. locations->AddTemp(Location::RequiresRegister()); } } -static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARM64* codegen) { +static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARM64* codegen) { MacroAssembler* masm = codegen->GetVIXLAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -1196,7 +1196,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARM64* co Register value = RegisterFrom(locations->InAt(4), type); // Value. // This needs to be before the temp registers, as MarkGCCard also uses VIXL temps. - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // Mark card for object assuming new value is stored. bool value_can_be_null = true; // TODO: Worth finding out this information? codegen->MarkGCCard(base, value, value_can_be_null); @@ -1228,7 +1228,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARM64* co __ Add(tmp_ptr, base.X(), Operand(offset)); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { codegen->GetAssembler()->PoisonHeapReference(expected); if (value.Is(expected)) { // Do not poison `value`, as it is the same register as @@ -1253,7 +1253,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARM64* co __ Bind(&exit_loop); __ Cset(out, eq); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { codegen->GetAssembler()->UnpoisonHeapReference(expected); if (value.Is(expected)) { // Do not unpoison `value`, as it is the same register as @@ -1265,10 +1265,10 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARM64* co } void IntrinsicLocationsBuilderARM64::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, invoke, Primitive::kPrimInt); + CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt32); } void IntrinsicLocationsBuilderARM64::VisitUnsafeCASLong(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, invoke, Primitive::kPrimLong); + CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt64); } void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) { // The only read barrier implementation supporting the @@ -1277,21 +1277,21 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToInt(arena_, invoke, Primitive::kPrimNot); + CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorARM64::VisitUnsafeCASInt(HInvoke* invoke) { - GenCas(invoke, Primitive::kPrimInt, codegen_); + GenCas(invoke, DataType::Type::kInt32, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeCASLong(HInvoke* invoke) { - GenCas(invoke, Primitive::kPrimLong, codegen_); + GenCas(invoke, DataType::Type::kInt64, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeCASObject(HInvoke* invoke) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); - GenCas(invoke, Primitive::kPrimNot, codegen_); + GenCas(invoke, DataType::Type::kReference, codegen_); } void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) { @@ -1397,7 +1397,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) { DCHECK_ALIGNED(value_offset, 8); static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded"); - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); // Promote temp2 to an X reg, ready for LDR. @@ -1457,7 +1457,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) { __ Bind(&different_compression); // Comparison for different compression style. - const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte); + const size_t c_char_size = DataType::Size(DataType::Type::kInt8); DCHECK_EQ(c_char_size, 1u); temp1 = temp1.W(); temp2 = temp2.W(); @@ -1731,7 +1731,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else if (code_point->GetType() != Primitive::kPrimChar) { + } else if (code_point->GetType() != DataType::Type::kUint16) { Register char_reg = WRegisterFrom(locations->InAt(1)); __ Tst(char_reg, 0xFFFF0000); slow_path = new (allocator) IntrinsicSlowPathARM64(invoke); @@ -1762,7 +1762,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kInt32)); // Need to send start_index=0. locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2))); @@ -1783,7 +1783,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) { locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kInt32)); } void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) { @@ -1800,7 +1800,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invo locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); locations->SetInAt(3, LocationFrom(calling_convention.GetRegisterAt(3))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); } void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke) { @@ -1826,7 +1826,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromChars(HInvoke* invo locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); } void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke) { @@ -1846,7 +1846,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* inv kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); } void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke) { @@ -1866,8 +1866,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { DCHECK_EQ(invoke->GetNumberOfArguments(), 1U); - DCHECK(Primitive::IsFloatingPointType(invoke->InputAt(0)->GetType())); - DCHECK(Primitive::IsFloatingPointType(invoke->GetType())); + DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType())); + DCHECK(DataType::IsFloatingPointType(invoke->GetType())); LocationSummary* const locations = new (arena) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, @@ -1880,9 +1880,9 @@ static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { DCHECK_EQ(invoke->GetNumberOfArguments(), 2U); - DCHECK(Primitive::IsFloatingPointType(invoke->InputAt(0)->GetType())); - DCHECK(Primitive::IsFloatingPointType(invoke->InputAt(1)->GetType())); - DCHECK(Primitive::IsFloatingPointType(invoke->GetType())); + DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType())); + DCHECK(DataType::IsFloatingPointType(invoke->InputAt(1)->GetType())); + DCHECK(DataType::IsFloatingPointType(invoke->GetType())); LocationSummary* const locations = new (arena) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, @@ -2056,7 +2056,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); // Location of data in char array buffer. @@ -2135,7 +2135,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) { __ B(&done); if (mirror::kUseStringCompression) { - const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte); + const size_t c_char_size = DataType::Size(DataType::Type::kInt8); DCHECK_EQ(c_char_size, 1u); __ Bind(&compressed_string_preloop); __ Add(src_ptr, src_ptr, Operand(srcBegin)); @@ -2219,7 +2219,7 @@ static void CheckSystemArrayCopyPosition(MacroAssembler* masm, if (!length_is_input_length) { // Check that length(input) >= length. __ Ldr(temp, MemOperand(input, length_offset)); - __ Cmp(temp, OperandFrom(length, Primitive::kPrimInt)); + __ Cmp(temp, OperandFrom(length, DataType::Type::kInt32)); __ B(slow_path->GetEntryLabel(), lt); } } else { @@ -2229,7 +2229,7 @@ static void CheckSystemArrayCopyPosition(MacroAssembler* masm, __ B(slow_path->GetEntryLabel(), lt); // Check that (length(input) - pos) >= length. - __ Cmp(temp, OperandFrom(length, Primitive::kPrimInt)); + __ Cmp(temp, OperandFrom(length, DataType::Type::kInt32)); __ B(slow_path->GetEntryLabel(), lt); } } else if (length_is_input_length) { @@ -2244,7 +2244,7 @@ static void CheckSystemArrayCopyPosition(MacroAssembler* masm, __ Ldr(temp, MemOperand(input, length_offset)); __ Subs(temp, temp, pos_reg); // Ccmp if length(input) >= pos, else definitely bail to slow path (N!=V == lt). - __ Ccmp(temp, OperandFrom(length, Primitive::kPrimInt), NFlag, ge); + __ Ccmp(temp, OperandFrom(length, DataType::Type::kInt32), NFlag, ge); __ B(slow_path->GetEntryLabel(), lt); } } @@ -2253,7 +2253,7 @@ static void CheckSystemArrayCopyPosition(MacroAssembler* masm, // source address for System.arraycopy* intrinsics in `src_base`, // `dst_base` and `src_end` respectively. static void GenSystemArrayCopyAddresses(MacroAssembler* masm, - Primitive::Type type, + DataType::Type type, const Register& src, const Location& src_pos, const Register& dst, @@ -2263,10 +2263,10 @@ static void GenSystemArrayCopyAddresses(MacroAssembler* masm, const Register& dst_base, const Register& src_end) { // This routine is used by the SystemArrayCopy and the SystemArrayCopyChar intrinsics. - DCHECK(type == Primitive::kPrimNot || type == Primitive::kPrimChar) + DCHECK(type == DataType::Type::kReference || type == DataType::Type::kUint16) << "Unexpected element type: " << type; - const int32_t element_size = Primitive::ComponentSize(type); - const int32_t element_size_shift = Primitive::ComponentSizeShift(type); + const int32_t element_size = DataType::Size(type); + const int32_t element_size_shift = DataType::SizeShift(type); const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); if (src_pos.IsConstant()) { @@ -2353,7 +2353,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopyChar(HInvoke* invoke) { src_stop_addr = src_stop_addr.X(); GenSystemArrayCopyAddresses(masm, - Primitive::kPrimChar, + DataType::Type::kUint16, src, src_pos, dst, @@ -2364,7 +2364,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopyChar(HInvoke* invoke) { src_stop_addr); // Iterate over the arrays and do a raw copy of the chars. - const int32_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const int32_t char_size = DataType::Size(DataType::Type::kUint16); UseScratchRegisterScope temps(masm); Register tmp = temps.AcquireW(); vixl::aarch64::Label loop, done; @@ -2781,8 +2781,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { Register dst_curr_addr = temp2.X(); Register src_stop_addr = temp3.X(); vixl::aarch64::Label done; - const Primitive::Type type = Primitive::kPrimNot; - const int32_t element_size = Primitive::ComponentSize(type); + const DataType::Type type = DataType::Type::kReference; + const int32_t element_size = DataType::Size(type); if (length.IsRegister()) { // Don't enter the copy loop if the length is null. @@ -2957,7 +2957,7 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) { IntrinsicVisitor::ComputeIntegerValueOfLocations( invoke, codegen_, - calling_convention.GetReturnLocation(Primitive::kPrimNot), + calling_convention.GetReturnLocation(DataType::Type::kReference), Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); } @@ -2966,7 +2966,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); MacroAssembler* masm = GetVIXLAssembler(); - Register out = RegisterFrom(locations->Out(), Primitive::kPrimNot); + Register out = RegisterFrom(locations->Out(), DataType::Type::kReference); UseScratchRegisterScope temps(masm); Register temp = temps.AcquireW(); InvokeRuntimeCallingConvention calling_convention; @@ -2996,7 +2996,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) { codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); } } else { - Register in = RegisterFrom(locations->InAt(0), Primitive::kPrimInt); + Register in = RegisterFrom(locations->InAt(0), DataType::Type::kInt32); // Check bounds of our cache. __ Add(out.W(), in.W(), -info.low); __ Cmp(out.W(), info.high - info.low + 1); @@ -3007,8 +3007,8 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) { uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address)); MemOperand source = HeapOperand( - temp, out.X(), LSL, Primitive::ComponentSizeShift(Primitive::kPrimNot)); - codegen_->Load(Primitive::kPrimNot, out, source); + temp, out.X(), LSL, DataType::SizeShift(DataType::Type::kReference)); + codegen_->Load(DataType::Type::kReference, out, source); codegen_->GetAssembler()->MaybeUnpoisonHeapReference(out); __ B(&done); __ Bind(&allocate); @@ -3034,7 +3034,7 @@ void IntrinsicLocationsBuilderARM64::VisitThreadInterrupted(HInvoke* invoke) { void IntrinsicCodeGeneratorARM64::VisitThreadInterrupted(HInvoke* invoke) { MacroAssembler* masm = GetVIXLAssembler(); - Register out = RegisterFrom(invoke->GetLocations()->Out(), Primitive::kPrimInt); + Register out = RegisterFrom(invoke->GetLocations()->Out(), DataType::Type::kInt32); UseScratchRegisterScope temps(masm); Register temp = temps.AcquireX(); diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index e2494f0ce8..7ce576c307 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -126,16 +126,16 @@ class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL { // Compute base address for the System.arraycopy intrinsic in `base`. static void GenSystemArrayCopyBaseAddress(ArmVIXLAssembler* assembler, - Primitive::Type type, + DataType::Type type, const vixl32::Register& array, const Location& pos, const vixl32::Register& base) { // This routine is only used by the SystemArrayCopy intrinsic at the - // moment. We can allow Primitive::kPrimNot as `type` to implement + // moment. We can allow DataType::Type::kReference as `type` to implement // the SystemArrayCopyChar intrinsic. - DCHECK_EQ(type, Primitive::kPrimNot); - const int32_t element_size = Primitive::ComponentSize(type); - const uint32_t element_size_shift = Primitive::ComponentSizeShift(type); + DCHECK_EQ(type, DataType::Type::kReference); + const int32_t element_size = DataType::Size(type); + const uint32_t element_size_shift = DataType::SizeShift(type); const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); if (pos.IsConstant()) { @@ -149,16 +149,16 @@ static void GenSystemArrayCopyBaseAddress(ArmVIXLAssembler* assembler, // Compute end address for the System.arraycopy intrinsic in `end`. static void GenSystemArrayCopyEndAddress(ArmVIXLAssembler* assembler, - Primitive::Type type, + DataType::Type type, const Location& copy_length, const vixl32::Register& base, const vixl32::Register& end) { // This routine is only used by the SystemArrayCopy intrinsic at the - // moment. We can allow Primitive::kPrimNot as `type` to implement + // moment. We can allow DataType::Type::kReference as `type` to implement // the SystemArrayCopyChar intrinsic. - DCHECK_EQ(type, Primitive::kPrimNot); - const int32_t element_size = Primitive::ComponentSize(type); - const uint32_t element_size_shift = Primitive::ComponentSizeShift(type); + DCHECK_EQ(type, DataType::Type::kReference); + const int32_t element_size = DataType::Size(type); + const uint32_t element_size_shift = DataType::SizeShift(type); if (copy_length.IsConstant()) { int32_t constant = Int32ConstantFrom(copy_length); @@ -188,8 +188,8 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { DCHECK(instruction_->GetLocations()->Intrinsified()); DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy); - Primitive::Type type = Primitive::kPrimNot; - const int32_t element_size = Primitive::ComponentSize(type); + DataType::Type type = DataType::Type::kReference; + const int32_t element_size = DataType::Size(type); vixl32::Register dest = InputRegisterAt(instruction_, 2); Location dest_pos = locations->InAt(3); @@ -349,16 +349,16 @@ static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { } static void GenNumberOfLeadingZeros(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, CodeGeneratorARMVIXL* codegen) { ArmVIXLAssembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); Location in = locations->InAt(0); vixl32::Register out = RegisterFrom(locations->Out()); - DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong)); + DCHECK((type == DataType::Type::kInt32) || (type == DataType::Type::kInt64)); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { vixl32::Register in_reg_lo = LowRegisterFrom(in); vixl32::Register in_reg_hi = HighRegisterFrom(in); vixl32::Label end; @@ -380,7 +380,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - GenNumberOfLeadingZeros(invoke, Primitive::kPrimInt, codegen_); + GenNumberOfLeadingZeros(invoke, DataType::Type::kInt32, codegen_); } void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { @@ -388,19 +388,19 @@ void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* in } void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - GenNumberOfLeadingZeros(invoke, Primitive::kPrimLong, codegen_); + GenNumberOfLeadingZeros(invoke, DataType::Type::kInt64, codegen_); } static void GenNumberOfTrailingZeros(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, CodeGeneratorARMVIXL* codegen) { - DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong)); + DCHECK((type == DataType::Type::kInt32) || (type == DataType::Type::kInt64)); ArmVIXLAssembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); vixl32::Register out = RegisterFrom(locations->Out()); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0)); vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0)); vixl32::Label end; @@ -426,7 +426,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeros(invoke, Primitive::kPrimInt, codegen_); + GenNumberOfTrailingZeros(invoke, DataType::Type::kInt32, codegen_); } void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { @@ -434,7 +434,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* i } void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeros(invoke, Primitive::kPrimLong, codegen_); + GenNumberOfTrailingZeros(invoke, DataType::Type::kInt64, codegen_); } static void MathAbsFP(HInvoke* invoke, ArmVIXLAssembler* assembler) { @@ -963,7 +963,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) { } static void GenUnsafeGet(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, bool is_volatile, CodeGeneratorARMVIXL* codegen) { LocationSummary* locations = invoke->GetLocations(); @@ -975,7 +975,7 @@ static void GenUnsafeGet(HInvoke* invoke, Location trg_loc = locations->Out(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { vixl32::Register trg = RegisterFrom(trg_loc); __ Ldr(trg, MemOperand(base, offset)); if (is_volatile) { @@ -984,7 +984,7 @@ static void GenUnsafeGet(HInvoke* invoke, break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { vixl32::Register trg = RegisterFrom(trg_loc); if (kEmitCompilerReadBarrier) { if (kUseBakerReadBarrier) { @@ -1011,7 +1011,7 @@ static void GenUnsafeGet(HInvoke* invoke, break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { vixl32::Register trg_lo = LowRegisterFrom(trg_loc); vixl32::Register trg_hi = HighRegisterFrom(trg_loc); if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) { @@ -1036,7 +1036,7 @@ static void GenUnsafeGet(HInvoke* invoke, static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke, - Primitive::Type type) { + DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); @@ -1053,7 +1053,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, locations->SetInAt(2, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap)); - if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // We need a temporary register for the read barrier marking slow // path in CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier. locations->AddTemp(Location::RequiresRegister()); @@ -1061,46 +1061,46 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); } static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, const ArmInstructionSetFeatures& features, - Primitive::Type type, + DataType::Type type, bool is_volatile, HInvoke* invoke) { LocationSummary* locations = new (arena) LocationSummary(invoke, @@ -1111,13 +1111,13 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, locations->SetInAt(2, Location::RequiresRegister()); locations->SetInAt(3, Location::RequiresRegister()); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { // Potentially need temps for ldrexd-strexd loop. if (is_volatile && !features.HasAtomicLdrdAndStrd()) { locations->AddTemp(Location::RequiresRegister()); // Temp_lo. locations->AddTemp(Location::RequiresRegister()); // Temp_hi. } - } else if (type == Primitive::kPrimNot) { + } else if (type == DataType::Type::kReference) { // Temps for card-marking. locations->AddTemp(Location::RequiresRegister()); // Temp. locations->AddTemp(Location::RequiresRegister()); // Card. @@ -1125,38 +1125,44 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimInt, /* is_volatile */ false, invoke); + CreateIntIntIntIntToVoid( + arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimInt, /* is_volatile */ false, invoke); + CreateIntIntIntIntToVoid( + arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimInt, /* is_volatile */ true, invoke); + CreateIntIntIntIntToVoid( + arena_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimNot, /* is_volatile */ false, invoke); + CreateIntIntIntIntToVoid( + arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimNot, /* is_volatile */ false, invoke); + CreateIntIntIntIntToVoid( + arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimNot, /* is_volatile */ true, invoke); + CreateIntIntIntIntToVoid( + arena_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, Primitive::kPrimLong, /* is_volatile */ false, invoke); + arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, Primitive::kPrimLong, /* is_volatile */ false, invoke); + arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, Primitive::kPrimLong, /* is_volatile */ true, invoke); + arena_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke); } static void GenUnsafePut(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, bool is_volatile, bool is_ordered, CodeGeneratorARMVIXL* codegen) { @@ -1170,7 +1176,7 @@ static void GenUnsafePut(LocationSummary* locations, __ Dmb(vixl32::ISH); } - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { vixl32::Register value_lo = LowRegisterFrom(locations->InAt(3)); vixl32::Register value_hi = HighRegisterFrom(locations->InAt(3)); value = value_lo; @@ -1193,7 +1199,7 @@ static void GenUnsafePut(LocationSummary* locations, } else { value = RegisterFrom(locations->InAt(3)); vixl32::Register source = value; - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); __ Mov(temp, value); assembler->PoisonHeapReference(temp); @@ -1206,7 +1212,7 @@ static void GenUnsafePut(LocationSummary* locations, __ Dmb(vixl32::ISH); } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); vixl32::Register card = RegisterFrom(locations->GetTemp(1)); bool value_can_be_null = true; // TODO: Worth finding out this information? @@ -1216,63 +1222,63 @@ static void GenUnsafePut(LocationSummary* locations, void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePut(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ false, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ false, /* is_ordered */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ true, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ false, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ false, /* is_ordered */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ true, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLong(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ false, /* is_ordered */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ false, /* is_ordered */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ true, /* is_ordered */ false, codegen_); @@ -1280,7 +1286,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke, - Primitive::Type type) { + DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject); @@ -1299,7 +1305,7 @@ static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, // operations to potentially clobber the output. Likewise when // emitting a (Baker) read barrier, which may call. Location::OutputOverlap overlaps = - ((kPoisonHeapReferences && type == Primitive::kPrimNot) || can_call) + ((kPoisonHeapReferences && type == DataType::Type::kReference) || can_call) ? Location::kOutputOverlap : Location::kNoOutputOverlap; locations->SetOut(Location::RequiresRegister(), overlaps); @@ -1311,8 +1317,8 @@ static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, locations->AddTemp(Location::RequiresRegister()); // Temp 1. } -static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL* codegen) { - DCHECK_NE(type, Primitive::kPrimLong); +static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* codegen) { + DCHECK_NE(type, DataType::Type::kInt64); ArmVIXLAssembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -1330,7 +1336,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL* vixl32::Register tmp_ptr = RegisterFrom(tmp_ptr_loc); // Pointer to actual memory. vixl32::Register tmp = RegisterFrom(locations->GetTemp(1)); // Value in memory. - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); @@ -1362,7 +1368,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL* __ Add(tmp_ptr, base, offset); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { codegen->GetAssembler()->PoisonHeapReference(expected); if (value.Is(expected)) { // Do not poison `value`, as it is the same register as @@ -1409,7 +1415,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL* __ mov(cc, out, 0); } - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { codegen->GetAssembler()->UnpoisonHeapReference(expected); if (value.Is(expected)) { // Do not unpoison `value`, as it is the same register as @@ -1421,7 +1427,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL* } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, Primitive::kPrimInt); + CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kInt32); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) { // The only read barrier implementation supporting the @@ -1430,17 +1436,17 @@ void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, Primitive::kPrimNot); + CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) { - GenCas(invoke, Primitive::kPrimInt, codegen_); + GenCas(invoke, DataType::Type::kInt32, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); - GenCas(invoke, Primitive::kPrimNot, codegen_); + GenCas(invoke, DataType::Type::kReference, codegen_); } void IntrinsicLocationsBuilderARMVIXL::VisitStringCompareTo(HInvoke* invoke) { @@ -1558,7 +1564,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) { static_assert(IsAligned<8>(kObjectAlignment), "String data must be 8-byte aligned for unrolled CompareTo loop."); - const unsigned char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const unsigned char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); @@ -1645,7 +1651,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) { __ Bind(&different_compression); // Comparison for different compression style. - const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte); + const size_t c_char_size = DataType::Size(DataType::Type::kInt8); DCHECK_EQ(c_char_size, 1u); // We want to free up the temp3, currently holding `str.count`, for comparison. @@ -1943,7 +1949,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else if (code_point->GetType() != Primitive::kPrimChar) { + } else if (code_point->GetType() != DataType::Type::kUint16) { vixl32::Register char_reg = InputRegisterAt(invoke, 1); // 0xffff is not modified immediate but 0x10000 is, so use `>= 0x10000` instead of `> 0xffff`. __ Cmp(char_reg, static_cast<uint32_t>(std::numeric_limits<uint16_t>::max()) + 1); @@ -2450,8 +2456,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // Null constant length: not need to emit the loop code at all. } else { vixl32::Label done; - const Primitive::Type type = Primitive::kPrimNot; - const int32_t element_size = Primitive::ComponentSize(type); + const DataType::Type type = DataType::Type::kReference; + const int32_t element_size = DataType::Size(type); if (length.IsRegister()) { // Don't enter the copy loop if the length is null. @@ -2576,8 +2582,8 @@ static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { } DCHECK_EQ(invoke->GetNumberOfArguments(), 1U); - DCHECK_EQ(invoke->InputAt(0)->GetType(), Primitive::kPrimDouble); - DCHECK_EQ(invoke->GetType(), Primitive::kPrimDouble); + DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kFloat64); + DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64); LocationSummary* const locations = new (arena) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, @@ -2602,9 +2608,9 @@ static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) } DCHECK_EQ(invoke->GetNumberOfArguments(), 2U); - DCHECK_EQ(invoke->InputAt(0)->GetType(), Primitive::kPrimDouble); - DCHECK_EQ(invoke->InputAt(1)->GetType(), Primitive::kPrimDouble); - DCHECK_EQ(invoke->GetType(), Primitive::kPrimDouble); + DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kFloat64); + DCHECK_EQ(invoke->InputAt(1)->GetType(), DataType::Type::kFloat64); + DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64); LocationSummary* const locations = new (arena) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, @@ -2859,12 +2865,12 @@ void IntrinsicCodeGeneratorARMVIXL::VisitShortReverseBytes(HInvoke* invoke) { __ Revsh(OutputRegister(invoke), InputRegisterAt(invoke, 0)); } -static void GenBitCount(HInvoke* instr, Primitive::Type type, ArmVIXLAssembler* assembler) { - DCHECK(Primitive::IsIntOrLongType(type)) << type; - DCHECK_EQ(instr->GetType(), Primitive::kPrimInt); - DCHECK_EQ(Primitive::PrimitiveKind(instr->InputAt(0)->GetType()), type); +static void GenBitCount(HInvoke* instr, DataType::Type type, ArmVIXLAssembler* assembler) { + DCHECK(DataType::IsIntOrLongType(type)) << type; + DCHECK_EQ(instr->GetType(), DataType::Type::kInt32); + DCHECK_EQ(DataType::Kind(instr->InputAt(0)->GetType()), type); - bool is_long = type == Primitive::kPrimLong; + bool is_long = type == DataType::Type::kInt64; LocationSummary* locations = instr->GetLocations(); Location in = locations->InAt(0); vixl32::Register src_0 = is_long ? LowRegisterFrom(in) : RegisterFrom(in); @@ -2893,7 +2899,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitIntegerBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerBitCount(HInvoke* invoke) { - GenBitCount(invoke, Primitive::kPrimInt, GetAssembler()); + GenBitCount(invoke, DataType::Type::kInt32, GetAssembler()); } void IntrinsicLocationsBuilderARMVIXL::VisitLongBitCount(HInvoke* invoke) { @@ -2901,19 +2907,19 @@ void IntrinsicLocationsBuilderARMVIXL::VisitLongBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorARMVIXL::VisitLongBitCount(HInvoke* invoke) { - GenBitCount(invoke, Primitive::kPrimLong, GetAssembler()); + GenBitCount(invoke, DataType::Type::kInt64, GetAssembler()); } static void GenHighestOneBit(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, CodeGeneratorARMVIXL* codegen) { - DCHECK(Primitive::IsIntOrLongType(type)); + DCHECK(DataType::IsIntOrLongType(type)); ArmVIXLAssembler* assembler = codegen->GetAssembler(); UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); const vixl32::Register temp = temps.Acquire(); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { LocationSummary* locations = invoke->GetLocations(); Location in = locations->InAt(0); Location out = locations->Out(); @@ -2959,7 +2965,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) { - GenHighestOneBit(invoke, Primitive::kPrimInt, codegen_); + GenHighestOneBit(invoke, DataType::Type::kInt32, codegen_); } void IntrinsicLocationsBuilderARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) { @@ -2967,19 +2973,19 @@ void IntrinsicLocationsBuilderARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) { - GenHighestOneBit(invoke, Primitive::kPrimLong, codegen_); + GenHighestOneBit(invoke, DataType::Type::kInt64, codegen_); } static void GenLowestOneBit(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, CodeGeneratorARMVIXL* codegen) { - DCHECK(Primitive::IsIntOrLongType(type)); + DCHECK(DataType::IsIntOrLongType(type)); ArmVIXLAssembler* assembler = codegen->GetAssembler(); UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); const vixl32::Register temp = temps.Acquire(); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { LocationSummary* locations = invoke->GetLocations(); Location in = locations->InAt(0); Location out = locations->Out(); @@ -3024,7 +3030,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) { - GenLowestOneBit(invoke, Primitive::kPrimInt, codegen_); + GenLowestOneBit(invoke, DataType::Type::kInt32, codegen_); } void IntrinsicLocationsBuilderARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) { @@ -3032,7 +3038,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) { - GenLowestOneBit(invoke, Primitive::kPrimLong, codegen_); + GenLowestOneBit(invoke, DataType::Type::kInt64, codegen_); } void IntrinsicLocationsBuilderARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) { @@ -3056,7 +3062,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) LocationSummary* locations = invoke->GetLocations(); // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); // Location of data in char array buffer. @@ -3144,7 +3150,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) if (mirror::kUseStringCompression) { __ B(final_label); - const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte); + const size_t c_char_size = DataType::Size(DataType::Type::kInt8); DCHECK_EQ(c_char_size, 1u); // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time. __ Bind(&compressed_string_preloop); @@ -3285,7 +3291,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) { uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address)); - codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), temp, out); + codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out); assembler->MaybeUnpoisonHeapReference(out); __ B(&done); __ Bind(&allocate); diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index fe5579c8be..8847256532 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -61,16 +61,16 @@ inline bool IntrinsicCodeGeneratorMIPS::Is32BitFPU() const { #define __ codegen->GetAssembler()-> static void MoveFromReturnRegister(Location trg, - Primitive::Type type, + DataType::Type type, CodeGeneratorMIPS* codegen) { if (!trg.IsValid()) { - DCHECK_EQ(type, Primitive::kPrimVoid); + DCHECK_EQ(type, DataType::Type::kVoid); return; } - DCHECK_NE(type, Primitive::kPrimVoid); + DCHECK_NE(type, DataType::Type::kVoid); - if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) { + if (DataType::IsIntegralType(type) || type == DataType::Type::kReference) { Register trg_reg = trg.AsRegister<Register>(); if (trg_reg != V0) { __ Move(V0, trg_reg); @@ -78,7 +78,7 @@ static void MoveFromReturnRegister(Location trg, } else { FRegister trg_reg = trg.AsFpuRegister<FRegister>(); if (trg_reg != F0) { - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ MovS(F0, trg_reg); } else { __ MovD(F0, trg_reg); @@ -247,17 +247,17 @@ static void CreateIntToIntLocations(ArenaAllocator* arena, } static void GenReverse(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, bool isR2OrNewer, bool isR6, bool reverseBits, MipsAssembler* assembler) { - DCHECK(type == Primitive::kPrimShort || - type == Primitive::kPrimInt || - type == Primitive::kPrimLong); - DCHECK(type != Primitive::kPrimShort || !reverseBits); + DCHECK(type == DataType::Type::kInt16 || + type == DataType::Type::kInt32 || + type == DataType::Type::kInt64); + DCHECK(type != DataType::Type::kInt16 || !reverseBits); - if (type == Primitive::kPrimShort) { + if (type == DataType::Type::kInt16) { Register in = locations->InAt(0).AsRegister<Register>(); Register out = locations->Out().AsRegister<Register>(); @@ -271,7 +271,7 @@ static void GenReverse(LocationSummary* locations, __ Srl(out, out, 24); __ Or(out, out, TMP); } - } else if (type == Primitive::kPrimInt) { + } else if (type == DataType::Type::kInt32) { Register in = locations->InAt(0).AsRegister<Register>(); Register out = locations->Out().AsRegister<Register>(); @@ -316,7 +316,7 @@ static void GenReverse(LocationSummary* locations, __ Or(out, TMP, out); } } - } else if (type == Primitive::kPrimLong) { + } else if (type == DataType::Type::kInt64) { Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); Register out_lo = locations->Out().AsRegisterPairLow<Register>(); @@ -407,7 +407,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerReverseBytes(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) { GenReverse(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, IsR2OrNewer(), IsR6(), /* reverseBits */ false, @@ -421,7 +421,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongReverseBytes(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) { GenReverse(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, IsR2OrNewer(), IsR6(), /* reverseBits */ false, @@ -435,7 +435,7 @@ void IntrinsicLocationsBuilderMIPS::VisitShortReverseBytes(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) { GenReverse(invoke->GetLocations(), - Primitive::kPrimShort, + DataType::Type::kInt16, IsR2OrNewer(), IsR6(), /* reverseBits */ false, @@ -584,7 +584,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerReverse(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) { GenReverse(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, IsR2OrNewer(), IsR6(), /* reverseBits */ true, @@ -598,7 +598,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongReverse(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) { GenReverse(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, IsR2OrNewer(), IsR6(), /* reverseBits */ true, @@ -614,7 +614,7 @@ static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { } static void GenBitCount(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, bool isR6, MipsAssembler* assembler) { Register out = locations->Out().AsRegister<Register>(); @@ -641,7 +641,7 @@ static void GenBitCount(LocationSummary* locations, // instructions compared to a loop-based algorithm which required 47 // instructions. - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { Register in = locations->InAt(0).AsRegister<Register>(); __ Srl(TMP, in, 1); @@ -665,7 +665,7 @@ static void GenBitCount(LocationSummary* locations, } __ Srl(out, out, 24); } else { - DCHECK_EQ(type, Primitive::kPrimLong); + DCHECK_EQ(type, DataType::Type::kInt64); Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); Register tmp_hi = locations->GetTemp(0).AsRegister<Register>(); @@ -729,7 +729,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) { - GenBitCount(invoke->GetLocations(), Primitive::kPrimInt, IsR6(), GetAssembler()); + GenBitCount(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), GetAssembler()); } // int java.lang.Long.bitCount(int) @@ -744,7 +744,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitLongBitCount(HInvoke* invoke) { - GenBitCount(invoke->GetLocations(), Primitive::kPrimLong, IsR6(), GetAssembler()); + GenBitCount(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), GetAssembler()); } static void MathAbsFP(LocationSummary* locations, @@ -865,7 +865,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAbsLong(HInvoke* invoke) { static void GenMinMaxFP(LocationSummary* locations, bool is_min, - Primitive::Type type, + DataType::Type type, bool is_R6, MipsAssembler* assembler) { FRegister out = locations->Out().AsFpuRegister<FRegister>(); @@ -884,7 +884,7 @@ static void GenMinMaxFP(LocationSummary* locations, // returned. This is why there is extra logic preceding the use of // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a // NaN, return the NaN, otherwise return the min/max. - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ CmpUnD(FTMP, a, b); __ Bc1eqz(FTMP, &noNaNs); @@ -907,7 +907,7 @@ static void GenMinMaxFP(LocationSummary* locations, __ MaxD(out, a, b); } } else { - DCHECK_EQ(type, Primitive::kPrimFloat); + DCHECK_EQ(type, DataType::Type::kFloat32); __ CmpUnS(FTMP, a, b); __ Bc1eqz(FTMP, &noNaNs); @@ -938,16 +938,16 @@ static void GenMinMaxFP(LocationSummary* locations, MipsLabel select; MipsLabel done; - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ CunD(a, b); } else { - DCHECK_EQ(type, Primitive::kPrimFloat); + DCHECK_EQ(type, DataType::Type::kFloat32); __ CunS(a, b); } __ Bc1f(&ordered); // a or b (or both) is a NaN. Return one, which is a NaN. - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ CeqD(b, b); } else { __ CeqS(b, b); @@ -959,7 +959,7 @@ static void GenMinMaxFP(LocationSummary* locations, // Neither is a NaN. // a == b? (-0.0 compares equal with +0.0) // If equal, handle zeroes, else compare further. - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ CeqD(a, b); } else { __ CeqS(a, b); @@ -967,7 +967,7 @@ static void GenMinMaxFP(LocationSummary* locations, __ Bc1f(&compare); // a == b either bit for bit or one is -0.0 and the other is +0.0. - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ MoveFromFpuHigh(TMP, a); __ MoveFromFpuHigh(AT, b); } else { @@ -983,7 +983,7 @@ static void GenMinMaxFP(LocationSummary* locations, __ And(TMP, TMP, AT); } - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ Mfc1(AT, a); __ Mtc1(AT, out); __ MoveToFpuHigh(TMP, out); @@ -994,7 +994,7 @@ static void GenMinMaxFP(LocationSummary* locations, __ Bind(&compare); - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { if (is_min) { // return (a <= b) ? a : b; __ ColeD(a, b); @@ -1014,7 +1014,7 @@ static void GenMinMaxFP(LocationSummary* locations, __ Bind(&select); - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ MovtD(out, a); __ MovfD(out, b); } else { @@ -1043,7 +1043,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) { GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, - Primitive::kPrimDouble, + DataType::Type::kFloat64, IsR6(), GetAssembler()); } @@ -1056,7 +1056,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathMinFloatFloat(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) { GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, - Primitive::kPrimFloat, + DataType::Type::kFloat32, IsR6(), GetAssembler()); } @@ -1069,7 +1069,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) { GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, - Primitive::kPrimDouble, + DataType::Type::kFloat64, IsR6(), GetAssembler()); } @@ -1082,7 +1082,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) { GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, - Primitive::kPrimFloat, + DataType::Type::kFloat32, IsR6(), GetAssembler()); } @@ -1098,7 +1098,7 @@ static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { static void GenMinMax(LocationSummary* locations, bool is_min, - Primitive::Type type, + DataType::Type type, bool is_R6, MipsAssembler* assembler) { if (is_R6) { @@ -1125,7 +1125,7 @@ static void GenMinMax(LocationSummary* locations, // as the output register; the else clause also handles the case // where the output register is distinct from both the first, and the // second input registers. - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>(); Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>(); @@ -1168,7 +1168,7 @@ static void GenMinMax(LocationSummary* locations, __ Or(out_hi, out_hi, AT); } } else { - DCHECK_EQ(type, Primitive::kPrimInt); + DCHECK_EQ(type, DataType::Type::kInt32); Register a = locations->InAt(0).AsRegister<Register>(); Register b = locations->InAt(1).AsRegister<Register>(); Register out = locations->Out().AsRegister<Register>(); @@ -1190,7 +1190,7 @@ static void GenMinMax(LocationSummary* locations, } } } else { - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>(); Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>(); @@ -1234,7 +1234,7 @@ static void GenMinMax(LocationSummary* locations, } } } else { - DCHECK_EQ(type, Primitive::kPrimInt); + DCHECK_EQ(type, DataType::Type::kInt32); Register a = locations->InAt(0).AsRegister<Register>(); Register b = locations->InAt(1).AsRegister<Register>(); Register out = locations->Out().AsRegister<Register>(); @@ -1273,7 +1273,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathMinIntInt(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) { GenMinMax(invoke->GetLocations(), /* is_min */ true, - Primitive::kPrimInt, + DataType::Type::kInt32, IsR6(), GetAssembler()); } @@ -1286,7 +1286,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathMinLongLong(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) { GenMinMax(invoke->GetLocations(), /* is_min */ true, - Primitive::kPrimLong, + DataType::Type::kInt64, IsR6(), GetAssembler()); } @@ -1299,7 +1299,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathMaxIntInt(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) { GenMinMax(invoke->GetLocations(), /* is_min */ false, - Primitive::kPrimInt, + DataType::Type::kInt32, IsR6(), GetAssembler()); } @@ -1312,7 +1312,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathMaxLongLong(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) { GenMinMax(invoke->GetLocations(), /* is_min */ false, - Primitive::kPrimLong, + DataType::Type::kInt64, IsR6(), GetAssembler()); } @@ -1519,7 +1519,7 @@ void IntrinsicCodeGeneratorMIPS::VisitThreadCurrentThread(HInvoke* invoke) { static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke, - Primitive::Type type) { + DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); @@ -1536,7 +1536,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, locations->SetInAt(2, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap)); - if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // We need a temporary register for the read barrier marking slow // path in InstructionCodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier. locations->AddTemp(Location::RequiresRegister()); @@ -1546,14 +1546,14 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, // Note that the caller must supply a properly aligned memory address. // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur). static void GenUnsafeGet(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, bool is_volatile, bool is_R6, CodeGeneratorMIPS* codegen) { LocationSummary* locations = invoke->GetLocations(); - DCHECK((type == Primitive::kPrimInt) || - (type == Primitive::kPrimLong) || - (type == Primitive::kPrimNot)) << type; + DCHECK((type == DataType::Type::kInt32) || + (type == DataType::Type::kInt64) || + (type == DataType::Type::kReference)) << type; MipsAssembler* assembler = codegen->GetAssembler(); // Target register. Location trg_loc = locations->Out(); @@ -1566,12 +1566,12 @@ static void GenUnsafeGet(HInvoke* invoke, Location offset_loc = locations->InAt(2); Register offset_lo = offset_loc.AsRegisterPairLow<Register>(); - if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == Primitive::kPrimNot))) { + if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == DataType::Type::kReference))) { __ Addu(TMP, base, offset_lo); } switch (type) { - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register trg_lo = trg_loc.AsRegisterPairLow<Register>(); Register trg_hi = trg_loc.AsRegisterPairHigh<Register>(); CHECK(!is_volatile); // TODO: support atomic 8-byte volatile loads. @@ -1587,7 +1587,7 @@ static void GenUnsafeGet(HInvoke* invoke, break; } - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { Register trg = trg_loc.AsRegister<Register>(); if (is_R6) { __ Lw(trg, TMP, 0); @@ -1601,7 +1601,7 @@ static void GenUnsafeGet(HInvoke* invoke, break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { Register trg = trg_loc.AsRegister<Register>(); if (kEmitCompilerReadBarrier) { if (kUseBakerReadBarrier) { @@ -1657,47 +1657,47 @@ static void GenUnsafeGet(HInvoke* invoke, // int sun.misc.Unsafe.getInt(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, IsR6(), codegen_); } // int sun.misc.Unsafe.getIntVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, IsR6(), codegen_); } // long sun.misc.Unsafe.getLong(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, IsR6(), codegen_); } // Object sun.misc.Unsafe.getObject(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, IsR6(), codegen_); } // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_); } static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -1713,14 +1713,14 @@ static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* in // Note that the caller must supply a properly aligned memory address. // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur). static void GenUnsafePut(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, bool is_volatile, bool is_ordered, bool is_R6, CodeGeneratorMIPS* codegen) { - DCHECK((type == Primitive::kPrimInt) || - (type == Primitive::kPrimLong) || - (type == Primitive::kPrimNot)) << type; + DCHECK((type == DataType::Type::kInt32) || + (type == DataType::Type::kInt64) || + (type == DataType::Type::kReference)) << type; MipsAssembler* assembler = codegen->GetAssembler(); // Object pointer. Register base = locations->InAt(1).AsRegister<Register>(); @@ -1733,10 +1733,10 @@ static void GenUnsafePut(LocationSummary* locations, if (is_volatile || is_ordered) { __ Sync(0); } - if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) { + if ((type == DataType::Type::kInt32) || (type == DataType::Type::kReference)) { Register value = locations->InAt(3).AsRegister<Register>(); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { __ PoisonHeapReference(AT, value); value = AT; } @@ -1766,7 +1766,7 @@ static void GenUnsafePut(LocationSummary* locations, __ Sync(0); } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { bool value_can_be_null = true; // TODO: Worth finding out this information? codegen->MarkGCCard(base, locations->InAt(3).AsRegister<Register>(), value_can_be_null); } @@ -1779,7 +1779,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ false, /* is_ordered */ false, IsR6(), @@ -1793,7 +1793,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ false, /* is_ordered */ true, IsR6(), @@ -1807,7 +1807,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ true, /* is_ordered */ false, IsR6(), @@ -1821,7 +1821,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ false, /* is_ordered */ false, IsR6(), @@ -1835,7 +1835,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ false, /* is_ordered */ true, IsR6(), @@ -1849,7 +1849,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ true, /* is_ordered */ false, IsR6(), @@ -1863,7 +1863,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ false, /* is_ordered */ false, IsR6(), @@ -1877,7 +1877,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ false, /* is_ordered */ true, IsR6(), @@ -1908,7 +1908,7 @@ static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* // Note that the caller must supply a properly aligned memory address. // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur). -static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS* codegen) { +static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS* codegen) { MipsAssembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); bool isR6 = codegen->GetInstructionSetFeatures().IsR6(); @@ -1924,7 +1924,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS* cod DCHECK_NE(offset_lo, out); DCHECK_NE(expected, out); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); @@ -1954,7 +1954,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS* cod MipsLabel loop_head, exit_loop; __ Addu(TMP, base, offset_lo); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { __ PoisonHeapReference(expected); // Do not poison `value`, if it is the same register as // `expected`, which has just been poisoned. @@ -1970,7 +1970,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS* cod __ Sync(0); __ Bind(&loop_head); - if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) { + if ((type == DataType::Type::kInt32) || (type == DataType::Type::kReference)) { if (isR6) { __ LlR6(out, TMP); } else { @@ -1988,11 +1988,11 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS* cod // in the case that the store fails. Whether the // store succeeds, or fails, it will load the // correct Boolean value into the 'out' register. - // This test isn't really necessary. We only support Primitive::kPrimInt, - // Primitive::kPrimNot, and we already verified that we're working on one + // This test isn't really necessary. We only support DataType::Type::kInt, + // DataType::Type::kReference, and we already verified that we're working on one // of those two types. It's left here in case the code needs to support // other types in the future. - if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) { + if ((type == DataType::Type::kInt32) || (type == DataType::Type::kReference)) { if (isR6) { __ ScR6(out, TMP); } else { @@ -2004,7 +2004,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS* cod __ Bind(&exit_loop); __ Sync(0); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { __ UnpoisonHeapReference(expected); // Do not unpoison `value`, if it is the same register as // `expected`, which has just been unpoisoned. @@ -2020,7 +2020,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASInt(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASInt(HInvoke* invoke) { - GenCas(invoke, Primitive::kPrimInt, codegen_); + GenCas(invoke, DataType::Type::kInt32, codegen_); } // boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x) @@ -2039,7 +2039,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) { // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); - GenCas(invoke, Primitive::kPrimNot, codegen_); + GenCas(invoke, DataType::Type::kReference, codegen_); } // int java.lang.String.compareTo(String anotherString) @@ -2050,7 +2050,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); } @@ -2218,7 +2218,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else if (code_point->GetType() != Primitive::kPrimChar) { + } else if (code_point->GetType() != DataType::Type::kUint16) { Register char_reg = locations->InAt(1).AsRegister<Register>(); // The "bltu" conditional branch tests to see if the character value // fits in a valid 16-bit (MIPS halfword) value. If it doesn't then @@ -2256,7 +2256,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); // Need a temp for slow-path codepoint compare, and need to send start-index=0. @@ -2282,7 +2282,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); // Need a temp for slow-path codepoint compare. @@ -2307,7 +2307,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invok locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); } @@ -2332,7 +2332,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromChars(HInvoke* invok locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); } @@ -2353,7 +2353,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invo kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); } @@ -2370,16 +2370,16 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromString(HInvoke* invoke) } static void GenIsInfinite(LocationSummary* locations, - const Primitive::Type type, + const DataType::Type type, const bool isR6, MipsAssembler* assembler) { FRegister in = locations->InAt(0).AsFpuRegister<FRegister>(); Register out = locations->Out().AsRegister<Register>(); - DCHECK(type == Primitive::kPrimFloat || type == Primitive::kPrimDouble); + DCHECK(type == DataType::Type::kFloat32 || type == DataType::Type::kFloat64); if (isR6) { - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ ClassD(FTMP, in); } else { __ ClassS(FTMP, in); @@ -2389,7 +2389,7 @@ static void GenIsInfinite(LocationSummary* locations, __ Sltu(out, ZERO, out); } else { // If one, or more, of the exponent bits is zero, then the number can't be infinite. - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ MoveFromFpuHigh(TMP, in); __ LoadConst32(AT, High32Bits(kPositiveInfinityDouble)); } else { @@ -2400,7 +2400,7 @@ static void GenIsInfinite(LocationSummary* locations, __ Sll(TMP, TMP, 1); - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ Mfc1(AT, in); __ Or(TMP, TMP, AT); } @@ -2415,7 +2415,7 @@ void IntrinsicLocationsBuilderMIPS::VisitFloatIsInfinite(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) { - GenIsInfinite(invoke->GetLocations(), Primitive::kPrimFloat, IsR6(), GetAssembler()); + GenIsInfinite(invoke->GetLocations(), DataType::Type::kFloat32, IsR6(), GetAssembler()); } // boolean java.lang.Double.isInfinite(double) @@ -2424,16 +2424,16 @@ void IntrinsicLocationsBuilderMIPS::VisitDoubleIsInfinite(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitDoubleIsInfinite(HInvoke* invoke) { - GenIsInfinite(invoke->GetLocations(), Primitive::kPrimDouble, IsR6(), GetAssembler()); + GenIsInfinite(invoke->GetLocations(), DataType::Type::kFloat64, IsR6(), GetAssembler()); } static void GenHighestOneBit(LocationSummary* locations, - const Primitive::Type type, + const DataType::Type type, bool isR6, MipsAssembler* assembler) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); Register out_lo = locations->Out().AsRegisterPairLow<Register>(); @@ -2480,7 +2480,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) { - GenHighestOneBit(invoke->GetLocations(), Primitive::kPrimInt, IsR6(), GetAssembler()); + GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), GetAssembler()); } // long java.lang.Long.highestOneBit(long) @@ -2489,16 +2489,16 @@ void IntrinsicLocationsBuilderMIPS::VisitLongHighestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitLongHighestOneBit(HInvoke* invoke) { - GenHighestOneBit(invoke->GetLocations(), Primitive::kPrimLong, IsR6(), GetAssembler()); + GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), GetAssembler()); } static void GenLowestOneBit(LocationSummary* locations, - const Primitive::Type type, + const DataType::Type type, bool isR6, MipsAssembler* assembler) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); Register out_lo = locations->Out().AsRegisterPairLow<Register>(); @@ -2528,7 +2528,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) { - GenLowestOneBit(invoke->GetLocations(), Primitive::kPrimInt, IsR6(), GetAssembler()); + GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), GetAssembler()); } // long java.lang.Long.lowestOneBit(long) @@ -2537,7 +2537,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongLowestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) { - GenLowestOneBit(invoke->GetLocations(), Primitive::kPrimLong, IsR6(), GetAssembler()); + GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), GetAssembler()); } // int java.lang.Math.round(float) @@ -2686,9 +2686,9 @@ void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); - const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar); + const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16); Register srcObj = locations->InAt(0).AsRegister<Register>(); Register srcBegin = locations->InAt(1).AsRegister<Register>(); @@ -2764,7 +2764,7 @@ static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimDouble)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64)); } static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -2775,7 +2775,7 @@ static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimDouble)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64)); } static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorMIPS* codegen, QuickEntrypointEnum entry) { @@ -3112,10 +3112,10 @@ void IntrinsicCodeGeneratorMIPS::VisitSystemArrayCopyChar(HInvoke* invoke) { // Okay, everything checks out. Finally time to do the copy. // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); - const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar); + const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16); const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value(); @@ -3154,7 +3154,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerValueOf(HInvoke* invoke) { IntrinsicVisitor::ComputeIntegerValueOfLocations( invoke, codegen_, - calling_convention.GetReturnLocation(Primitive::kPrimNot), + calling_convention.GetReturnLocation(DataType::Type::kReference), Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index 80448f1389..d0234d8271 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -49,16 +49,16 @@ ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() { #define __ codegen->GetAssembler()-> static void MoveFromReturnRegister(Location trg, - Primitive::Type type, + DataType::Type type, CodeGeneratorMIPS64* codegen) { if (!trg.IsValid()) { - DCHECK_EQ(type, Primitive::kPrimVoid); + DCHECK_EQ(type, DataType::Type::kVoid); return; } - DCHECK_NE(type, Primitive::kPrimVoid); + DCHECK_NE(type, DataType::Type::kVoid); - if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) { + if (DataType::IsIntegralType(type) || type == DataType::Type::kReference) { GpuRegister trg_reg = trg.AsRegister<GpuRegister>(); if (trg_reg != V0) { __ Move(V0, trg_reg); @@ -66,7 +66,7 @@ static void MoveFromReturnRegister(Location trg, } else { FpuRegister trg_reg = trg.AsFpuRegister<FpuRegister>(); if (trg_reg != F0) { - if (type == Primitive::kPrimFloat) { + if (type == DataType::Type::kFloat32) { __ MovS(F0, trg_reg); } else { __ MovD(F0, trg_reg); @@ -224,21 +224,21 @@ static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { } static void GenReverseBytes(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, Mips64Assembler* assembler) { GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); GpuRegister out = locations->Out().AsRegister<GpuRegister>(); switch (type) { - case Primitive::kPrimShort: + case DataType::Type::kInt16: __ Dsbh(out, in); __ Seh(out, out); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ Rotr(out, in, 16); __ Wsbh(out, out); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ Dsbh(out, in); __ Dshd(out, out); break; @@ -254,7 +254,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) } void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } // long java.lang.Long.reverseBytes(long) @@ -263,7 +263,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongReverseBytes(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitLongReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } // short java.lang.Short.reverseBytes(short) @@ -272,7 +272,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitShortReverseBytes(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler()); } static void GenNumberOfLeadingZeroes(LocationSummary* locations, @@ -344,14 +344,14 @@ void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invok } static void GenReverse(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, Mips64Assembler* assembler) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); GpuRegister out = locations->Out().AsRegister<GpuRegister>(); - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { __ Rotr(out, in, 16); __ Wsbh(out, out); __ Bitswap(out, out); @@ -368,7 +368,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverse(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverse(HInvoke* invoke) { - GenReverse(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenReverse(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } // long java.lang.Long.reverse(long) @@ -377,7 +377,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongReverse(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitLongReverse(HInvoke* invoke) { - GenReverse(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenReverse(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -389,12 +389,12 @@ static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { } static void GenBitCount(LocationSummary* locations, - const Primitive::Type type, + const DataType::Type type, Mips64Assembler* assembler) { GpuRegister out = locations->Out().AsRegister<GpuRegister>(); GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel // @@ -419,7 +419,7 @@ static void GenBitCount(LocationSummary* locations, // number of instructions executed even when a large number of bits // are set. - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { __ Srl(TMP, in, 1); __ LoadConst32(AT, 0x55555555); __ And(TMP, TMP, AT); @@ -436,7 +436,7 @@ static void GenBitCount(LocationSummary* locations, __ LoadConst32(TMP, 0x01010101); __ MulR6(out, out, TMP); __ Srl(out, out, 24); - } else if (type == Primitive::kPrimLong) { + } else if (type == DataType::Type::kInt64) { __ Dsrl(TMP, in, 1); __ LoadConst64(AT, 0x5555555555555555L); __ And(TMP, TMP, AT); @@ -462,7 +462,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) { - GenBitCount(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenBitCount(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } // int java.lang.Long.bitCount(long) @@ -471,7 +471,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitLongBitCount(HInvoke* invoke) { - GenBitCount(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenBitCount(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } static void MathAbsFP(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) { @@ -546,7 +546,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAbsLong(HInvoke* invoke) { static void GenMinMaxFP(LocationSummary* locations, bool is_min, - Primitive::Type type, + DataType::Type type, Mips64Assembler* assembler) { FpuRegister a = locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister b = locations->InAt(1).AsFpuRegister<FpuRegister>(); @@ -563,7 +563,7 @@ static void GenMinMaxFP(LocationSummary* locations, // returned. This is why there is extra logic preceding the use of // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a // NaN, return the NaN, otherwise return the min/max. - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ CmpUnD(FTMP, a, b); __ Bc1eqz(FTMP, &noNaNs); @@ -586,7 +586,7 @@ static void GenMinMaxFP(LocationSummary* locations, __ MaxD(out, a, b); } } else { - DCHECK_EQ(type, Primitive::kPrimFloat); + DCHECK_EQ(type, DataType::Type::kFloat32); __ CmpUnS(FTMP, a, b); __ Bc1eqz(FTMP, &noNaNs); @@ -628,7 +628,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) } void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) { - GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, Primitive::kPrimDouble, GetAssembler()); + GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, DataType::Type::kFloat64, GetAssembler()); } // float java.lang.Math.min(float, float) @@ -637,7 +637,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) { - GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, Primitive::kPrimFloat, GetAssembler()); + GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, DataType::Type::kFloat32, GetAssembler()); } // double java.lang.Math.max(double, double) @@ -646,7 +646,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) } void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) { - GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, Primitive::kPrimDouble, GetAssembler()); + GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, DataType::Type::kFloat64, GetAssembler()); } // float java.lang.Math.max(float, float) @@ -655,7 +655,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) { - GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, Primitive::kPrimFloat, GetAssembler()); + GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, DataType::Type::kFloat32, GetAssembler()); } static void GenMinMax(LocationSummary* locations, @@ -885,12 +885,12 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) { GenRoundingMode(invoke->GetLocations(), kCeil, GetAssembler()); } -static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, Primitive::Type type) { +static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, DataType::Type type) { FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>(); FpuRegister half = locations->GetTemp(0).AsFpuRegister<FpuRegister>(); GpuRegister out = locations->Out().AsRegister<GpuRegister>(); - DCHECK(type == Primitive::kPrimFloat || type == Primitive::kPrimDouble); + DCHECK(type == DataType::Type::kFloat32 || type == DataType::Type::kFloat64); Mips64Label done; @@ -903,7 +903,7 @@ static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, Pri // return out; // out = floor(in); - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ FloorLD(FTMP, in); __ Dmfc1(out, FTMP); } else { @@ -912,7 +912,7 @@ static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, Pri } // if (out != MAX_VALUE && out != MIN_VALUE) - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ Daddiu(TMP, out, 1); __ Dati(TMP, 0x8000); // TMP = out + 0x8000 0000 0000 0001 // or out - 0x7FFF FFFF FFFF FFFF. @@ -933,7 +933,7 @@ static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, Pri } // TMP = (0.5 <= (in - out)) ? -1 : 0; - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ Cvtdl(FTMP, FTMP); // Convert output of floor.l.d back to "double". __ LoadConst64(AT, bit_cast<int64_t, double>(0.5)); __ SubD(FTMP, in, FTMP); @@ -950,7 +950,7 @@ static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, Pri } // Return out -= TMP. - if (type == Primitive::kPrimDouble) { + if (type == DataType::Type::kFloat64) { __ Dsubu(out, out, TMP); } else { __ Subu(out, out, TMP); @@ -970,7 +970,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitMathRoundFloat(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitMathRoundFloat(HInvoke* invoke) { - GenRound(invoke->GetLocations(), GetAssembler(), Primitive::kPrimFloat); + GenRound(invoke->GetLocations(), GetAssembler(), DataType::Type::kFloat32); } // long java.lang.Math.round(double) @@ -984,7 +984,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitMathRoundDouble(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitMathRoundDouble(HInvoke* invoke) { - GenRound(invoke->GetLocations(), GetAssembler(), Primitive::kPrimDouble); + GenRound(invoke->GetLocations(), GetAssembler(), DataType::Type::kFloat64); } // byte libcore.io.Memory.peekByte(long address) @@ -1119,7 +1119,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) { static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke, - Primitive::Type type) { + DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); @@ -1136,7 +1136,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, locations->SetInAt(2, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap)); - if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // We need a temporary register for the read barrier marking slow // path in InstructionCodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier. locations->AddTemp(Location::RequiresRegister()); @@ -1146,13 +1146,13 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, // Note that the caller must supply a properly aligned memory address. // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur). static void GenUnsafeGet(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, bool is_volatile, CodeGeneratorMIPS64* codegen) { LocationSummary* locations = invoke->GetLocations(); - DCHECK((type == Primitive::kPrimInt) || - (type == Primitive::kPrimLong) || - (type == Primitive::kPrimNot)) << type; + DCHECK((type == DataType::Type::kInt32) || + (type == DataType::Type::kInt64) || + (type == DataType::Type::kReference)) << type; Mips64Assembler* assembler = codegen->GetAssembler(); // Target register. Location trg_loc = locations->Out(); @@ -1164,26 +1164,26 @@ static void GenUnsafeGet(HInvoke* invoke, Location offset_loc = locations->InAt(2); GpuRegister offset = offset_loc.AsRegister<GpuRegister>(); - if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == Primitive::kPrimNot))) { + if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == DataType::Type::kReference))) { __ Daddu(TMP, base, offset); } switch (type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ Ld(trg, TMP, 0); if (is_volatile) { __ Sync(0); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ Lw(trg, TMP, 0); if (is_volatile) { __ Sync(0); } break; - case Primitive::kPrimNot: + case DataType::Type::kReference: if (kEmitCompilerReadBarrier) { if (kUseBakerReadBarrier) { Location temp = locations->GetTemp(0); @@ -1227,56 +1227,56 @@ static void GenUnsafeGet(HInvoke* invoke, // int sun.misc.Unsafe.getInt(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); } // int sun.misc.Unsafe.getIntVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); } // long sun.misc.Unsafe.getLong(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); } // long sun.misc.Unsafe.getLongVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); } // Object sun.misc.Unsafe.getObject(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); } // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); } static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) { @@ -1292,13 +1292,13 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) { // Note that the caller must supply a properly aligned memory address. // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur). static void GenUnsafePut(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, bool is_volatile, bool is_ordered, CodeGeneratorMIPS64* codegen) { - DCHECK((type == Primitive::kPrimInt) || - (type == Primitive::kPrimLong) || - (type == Primitive::kPrimNot)); + DCHECK((type == DataType::Type::kInt32) || + (type == DataType::Type::kInt64) || + (type == DataType::Type::kReference)); Mips64Assembler* assembler = codegen->GetAssembler(); // Object pointer. GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>(); @@ -1311,9 +1311,9 @@ static void GenUnsafePut(LocationSummary* locations, __ Sync(0); } switch (type) { - case Primitive::kPrimInt: - case Primitive::kPrimNot: - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + case DataType::Type::kInt32: + case DataType::Type::kReference: + if (kPoisonHeapReferences && type == DataType::Type::kReference) { __ PoisonHeapReference(AT, value); __ Sw(AT, TMP, 0); } else { @@ -1321,7 +1321,7 @@ static void GenUnsafePut(LocationSummary* locations, } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ Sd(value, TMP, 0); break; @@ -1333,7 +1333,7 @@ static void GenUnsafePut(LocationSummary* locations, __ Sync(0); } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { bool value_can_be_null = true; // TODO: Worth finding out this information? codegen->MarkGCCard(base, value, value_can_be_null); } @@ -1346,7 +1346,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ false, /* is_ordered */ false, codegen_); @@ -1359,7 +1359,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ false, /* is_ordered */ true, codegen_); @@ -1372,7 +1372,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimInt, + DataType::Type::kInt32, /* is_volatile */ true, /* is_ordered */ false, codegen_); @@ -1385,7 +1385,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ false, /* is_ordered */ false, codegen_); @@ -1398,7 +1398,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invok void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ false, /* is_ordered */ true, codegen_); @@ -1411,7 +1411,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invo void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimNot, + DataType::Type::kReference, /* is_volatile */ true, /* is_ordered */ false, codegen_); @@ -1424,7 +1424,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ false, /* is_ordered */ false, codegen_); @@ -1437,7 +1437,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ false, /* is_ordered */ true, codegen_); @@ -1450,7 +1450,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), - Primitive::kPrimLong, + DataType::Type::kInt64, /* is_volatile */ true, /* is_ordered */ false, codegen_); @@ -1480,7 +1480,7 @@ static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* // Note that the caller must supply a properly aligned memory address. // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur). -static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS64* codegen) { +static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS64* codegen) { Mips64Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>(); @@ -1495,7 +1495,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS64* c DCHECK_NE(offset, out); DCHECK_NE(expected, out); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); @@ -1525,7 +1525,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS64* c Mips64Label loop_head, exit_loop; __ Daddu(TMP, base, offset); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { __ PoisonHeapReference(expected); // Do not poison `value`, if it is the same register as // `expected`, which has just been poisoned. @@ -1541,13 +1541,13 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS64* c __ Sync(0); __ Bind(&loop_head); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { __ Lld(out, TMP); } else { // Note: We will need a read barrier here, when read barrier // support is added to the MIPS64 back end. __ Ll(out, TMP); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // The LL instruction sign-extends the 32-bit value, but // 32-bit references must be zero-extended. Zero-extend `out`. __ Dext(out, out, 0, 32); @@ -1561,7 +1561,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS64* c // in the case that the store fails. Whether the // store succeeds, or fails, it will load the // correct Boolean value into the 'out' register. - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { __ Scd(out, TMP); } else { __ Sc(out, TMP); @@ -1571,7 +1571,7 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS64* c __ Bind(&exit_loop); __ Sync(0); - if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + if (kPoisonHeapReferences && type == DataType::Type::kReference) { __ UnpoisonHeapReference(expected); // Do not unpoison `value`, if it is the same register as // `expected`, which has just been unpoisoned. @@ -1587,7 +1587,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) { - GenCas(invoke, Primitive::kPrimInt, codegen_); + GenCas(invoke, DataType::Type::kInt32, codegen_); } // boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x) @@ -1596,7 +1596,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) { - GenCas(invoke, Primitive::kPrimLong, codegen_); + GenCas(invoke, DataType::Type::kInt64, codegen_); } // boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x) @@ -1615,7 +1615,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) { // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); - GenCas(invoke, Primitive::kPrimNot, codegen_); + GenCas(invoke, DataType::Type::kReference, codegen_); } // int java.lang.String.compareTo(String anotherString) @@ -1626,7 +1626,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); } @@ -1790,7 +1790,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else if (code_point->GetType() != Primitive::kPrimChar) { + } else if (code_point->GetType() != DataType::Type::kUint16) { GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>(); __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max()); slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke); @@ -1822,7 +1822,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); // Need a temp for slow-path codepoint compare, and need to send start-index=0. @@ -1844,7 +1844,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); } @@ -1863,7 +1863,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* inv locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); } @@ -1890,7 +1890,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* inv locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); } @@ -1912,7 +1912,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* in kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); } @@ -1985,9 +1985,9 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) { LocationSummary* locations = invoke->GetLocations(); // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); - const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar); + const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16); GpuRegister srcObj = locations->InAt(0).AsRegister<GpuRegister>(); GpuRegister srcBegin = locations->InAt(1).AsRegister<GpuRegister>(); @@ -2213,10 +2213,10 @@ void IntrinsicCodeGeneratorMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke) { // Okay, everything checks out. Finally time to do the copy. // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); - const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar); + const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16); const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value(); @@ -2250,14 +2250,14 @@ void IntrinsicCodeGeneratorMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke) { } static void GenHighestOneBit(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, Mips64Assembler* assembler) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << PrettyDescriptor(type); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64) << type; GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); GpuRegister out = locations->Out().AsRegister<GpuRegister>(); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { __ Dclz(TMP, in); __ LoadConst64(AT, INT64_C(0x8000000000000000)); __ Dsrlv(AT, AT, TMP); @@ -2281,7 +2281,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) } void IntrinsicCodeGeneratorMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) { - GenHighestOneBit(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } // long java.lang.Long.highestOneBit(long) @@ -2290,18 +2290,18 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongHighestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitLongHighestOneBit(HInvoke* invoke) { - GenHighestOneBit(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } static void GenLowestOneBit(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, Mips64Assembler* assembler) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << PrettyDescriptor(type); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64) << type; GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); GpuRegister out = locations->Out().AsRegister<GpuRegister>(); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { __ Dsubu(TMP, ZERO, in); } else { __ Subu(TMP, ZERO, in); @@ -2315,7 +2315,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) } void IntrinsicCodeGeneratorMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) { - GenLowestOneBit(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } // long java.lang.Long.lowestOneBit(long) @@ -2324,7 +2324,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongLowestOneBit(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitLongLowestOneBit(HInvoke* invoke) { - GenLowestOneBit(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -2334,7 +2334,7 @@ static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimDouble)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64)); } static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -2345,7 +2345,7 @@ static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimDouble)); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64)); } static void GenFPToFPCall(HInvoke* invoke, @@ -2533,7 +2533,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerValueOf(HInvoke* invoke) { IntrinsicVisitor::ComputeIntegerValueOfLocations( invoke, codegen_, - calling_convention.GetReturnLocation(Primitive::kPrimNot), + calling_convention.GetReturnLocation(DataType::Type::kReference), Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index abd9014438..a5916228a8 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -97,7 +97,7 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode { DCHECK(instruction_->GetLocations()->Intrinsified()); DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy); - int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); + int32_t element_size = DataType::Size(DataType::Type::kReference); uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value(); Register src = locations->InAt(0).AsRegister<Register>(); @@ -282,17 +282,17 @@ static void CreateLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) { } static void GenReverseBytes(LocationSummary* locations, - Primitive::Type size, + DataType::Type size, X86Assembler* assembler) { Register out = locations->Out().AsRegister<Register>(); switch (size) { - case Primitive::kPrimShort: + case DataType::Type::kInt16: // TODO: Can be done with an xchg of 8b registers. This is straight from Quick. __ bswapl(out); __ sarl(out, Immediate(16)); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ bswapl(out); break; default: @@ -306,7 +306,7 @@ void IntrinsicLocationsBuilderX86::VisitIntegerReverseBytes(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } void IntrinsicLocationsBuilderX86::VisitLongReverseBytes(HInvoke* invoke) { @@ -335,7 +335,7 @@ void IntrinsicLocationsBuilderX86::VisitShortReverseBytes(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler()); } @@ -1307,7 +1307,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopyChar(HInvoke* invoke) { // Okay, everything checks out. Finally time to do the copy. // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value(); @@ -1540,7 +1540,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else if (code_point->GetType() != Primitive::kPrimChar) { + } else if (code_point->GetType() != DataType::Type::kUint16) { __ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max())); slow_path = new (allocator) IntrinsicSlowPathX86(invoke); codegen->AddSlowPath(slow_path); @@ -1766,7 +1766,7 @@ void IntrinsicCodeGeneratorX86::VisitStringGetCharsNoCheck(HInvoke* invoke) { X86Assembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); - size_t char_component_size = Primitive::ComponentSize(Primitive::kPrimChar); + size_t char_component_size = DataType::Size(DataType::Type::kUint16); // Location of data in char array buffer. const uint32_t data_offset = mirror::Array::DataOffset(char_component_size).Uint32Value(); // Location of char array data in string. @@ -1782,7 +1782,7 @@ void IntrinsicCodeGeneratorX86::VisitStringGetCharsNoCheck(HInvoke* invoke) { Register dstBegin = locations->InAt(4).AsRegister<Register>(); // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); // Compute the number of chars (words) to move. @@ -1802,7 +1802,7 @@ void IntrinsicCodeGeneratorX86::VisitStringGetCharsNoCheck(HInvoke* invoke) { if (mirror::kUseStringCompression) { // Location of count in string const uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); - const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte); + const size_t c_char_size = DataType::Size(DataType::Type::kInt8); DCHECK_EQ(c_char_size, 1u); __ pushl(EAX); __ cfi().AdjustCFAOffset(stack_adjust); @@ -1849,22 +1849,22 @@ void IntrinsicCodeGeneratorX86::VisitStringGetCharsNoCheck(HInvoke* invoke) { __ cfi().AdjustCFAOffset(-stack_adjust); } -static void GenPeek(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) { +static void GenPeek(LocationSummary* locations, DataType::Type size, X86Assembler* assembler) { Register address = locations->InAt(0).AsRegisterPairLow<Register>(); Location out_loc = locations->Out(); // x86 allows unaligned access. We do not have to check the input or use specific instructions // to avoid a SIGBUS. switch (size) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: __ movsxb(out_loc.AsRegister<Register>(), Address(address, 0)); break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: __ movsxw(out_loc.AsRegister<Register>(), Address(address, 0)); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ movl(out_loc.AsRegister<Register>(), Address(address, 0)); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ movl(out_loc.AsRegisterPairLow<Register>(), Address(address, 0)); __ movl(out_loc.AsRegisterPairHigh<Register>(), Address(address, 4)); break; @@ -1879,7 +1879,7 @@ void IntrinsicLocationsBuilderX86::VisitMemoryPeekByte(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) { - GenPeek(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler()); + GenPeek(invoke->GetLocations(), DataType::Type::kInt8, GetAssembler()); } void IntrinsicLocationsBuilderX86::VisitMemoryPeekIntNative(HInvoke* invoke) { @@ -1887,7 +1887,7 @@ void IntrinsicLocationsBuilderX86::VisitMemoryPeekIntNative(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) { - GenPeek(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenPeek(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } void IntrinsicLocationsBuilderX86::VisitMemoryPeekLongNative(HInvoke* invoke) { @@ -1895,7 +1895,7 @@ void IntrinsicLocationsBuilderX86::VisitMemoryPeekLongNative(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) { - GenPeek(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenPeek(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } void IntrinsicLocationsBuilderX86::VisitMemoryPeekShortNative(HInvoke* invoke) { @@ -1903,30 +1903,30 @@ void IntrinsicLocationsBuilderX86::VisitMemoryPeekShortNative(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86::VisitMemoryPeekShortNative(HInvoke* invoke) { - GenPeek(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); + GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler()); } -static void CreateLongIntToVoidLocations(ArenaAllocator* arena, Primitive::Type size, +static void CreateLongIntToVoidLocations(ArenaAllocator* arena, DataType::Type size, HInvoke* invoke) { LocationSummary* locations = new (arena) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); HInstruction* value = invoke->InputAt(1); - if (size == Primitive::kPrimByte) { + if (size == DataType::Type::kInt8) { locations->SetInAt(1, Location::ByteRegisterOrConstant(EDX, value)); } else { locations->SetInAt(1, Location::RegisterOrConstant(value)); } } -static void GenPoke(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) { +static void GenPoke(LocationSummary* locations, DataType::Type size, X86Assembler* assembler) { Register address = locations->InAt(0).AsRegisterPairLow<Register>(); Location value_loc = locations->InAt(1); // x86 allows unaligned access. We do not have to check the input or use specific instructions // to avoid a SIGBUS. switch (size) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: if (value_loc.IsConstant()) { __ movb(Address(address, 0), Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue())); @@ -1934,7 +1934,7 @@ static void GenPoke(LocationSummary* locations, Primitive::Type size, X86Assembl __ movb(Address(address, 0), value_loc.AsRegister<ByteRegister>()); } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: if (value_loc.IsConstant()) { __ movw(Address(address, 0), Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue())); @@ -1942,7 +1942,7 @@ static void GenPoke(LocationSummary* locations, Primitive::Type size, X86Assembl __ movw(Address(address, 0), value_loc.AsRegister<Register>()); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: if (value_loc.IsConstant()) { __ movl(Address(address, 0), Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue())); @@ -1950,7 +1950,7 @@ static void GenPoke(LocationSummary* locations, Primitive::Type size, X86Assembl __ movl(Address(address, 0), value_loc.AsRegister<Register>()); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (value_loc.IsConstant()) { int64_t value = value_loc.GetConstant()->AsLongConstant()->GetValue(); __ movl(Address(address, 0), Immediate(Low32Bits(value))); @@ -1967,35 +1967,35 @@ static void GenPoke(LocationSummary* locations, Primitive::Type size, X86Assembl } void IntrinsicLocationsBuilderX86::VisitMemoryPokeByte(HInvoke* invoke) { - CreateLongIntToVoidLocations(arena_, Primitive::kPrimByte, invoke); + CreateLongIntToVoidLocations(arena_, DataType::Type::kInt8, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) { - GenPoke(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler()); + GenPoke(invoke->GetLocations(), DataType::Type::kInt8, GetAssembler()); } void IntrinsicLocationsBuilderX86::VisitMemoryPokeIntNative(HInvoke* invoke) { - CreateLongIntToVoidLocations(arena_, Primitive::kPrimInt, invoke); + CreateLongIntToVoidLocations(arena_, DataType::Type::kInt32, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) { - GenPoke(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenPoke(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } void IntrinsicLocationsBuilderX86::VisitMemoryPokeLongNative(HInvoke* invoke) { - CreateLongIntToVoidLocations(arena_, Primitive::kPrimLong, invoke); + CreateLongIntToVoidLocations(arena_, DataType::Type::kInt64, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) { - GenPoke(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenPoke(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } void IntrinsicLocationsBuilderX86::VisitMemoryPokeShortNative(HInvoke* invoke) { - CreateLongIntToVoidLocations(arena_, Primitive::kPrimShort, invoke); + CreateLongIntToVoidLocations(arena_, DataType::Type::kInt16, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) { - GenPoke(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); + GenPoke(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler()); } void IntrinsicLocationsBuilderX86::VisitThreadCurrentThread(HInvoke* invoke) { @@ -2011,7 +2011,7 @@ void IntrinsicCodeGeneratorX86::VisitThreadCurrentThread(HInvoke* invoke) { } static void GenUnsafeGet(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, bool is_volatile, CodeGeneratorX86* codegen) { X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler()); @@ -2023,13 +2023,13 @@ static void GenUnsafeGet(HInvoke* invoke, Location output_loc = locations->Out(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { Register output = output_loc.AsRegister<Register>(); __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0)); break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { Register output = output_loc.AsRegister<Register>(); if (kEmitCompilerReadBarrier) { if (kUseBakerReadBarrier) { @@ -2048,7 +2048,7 @@ static void GenUnsafeGet(HInvoke* invoke, break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { Register output_lo = output_loc.AsRegisterPairLow<Register>(); Register output_hi = output_loc.AsRegisterPairHigh<Register>(); if (is_volatile) { @@ -2073,7 +2073,7 @@ static void GenUnsafeGet(HInvoke* invoke, static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke, - Primitive::Type type, + DataType::Type type, bool is_volatile) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || @@ -2089,7 +2089,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { if (is_volatile) { // Need to use XMM to read volatile. locations->AddTemp(Location::RequiresFpuRegister()); @@ -2104,47 +2104,48 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt, /* is_volatile */ false); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt, /* is_volatile */ true); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong, /* is_volatile */ false); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong, /* is_volatile */ true); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot, /* is_volatile */ false); + CreateIntIntIntToIntLocations( + arena_, invoke, DataType::Type::kReference, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot, /* is_volatile */ true); + CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference, /* is_volatile */ true); } void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); } static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, - Primitive::Type type, + DataType::Type type, HInvoke* invoke, bool is_volatile) { LocationSummary* locations = new (arena) LocationSummary(invoke, @@ -2154,12 +2155,12 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); locations->SetInAt(3, Location::RequiresRegister()); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // Need temp registers for card-marking. locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. // Ensure the value is in a byte register. locations->AddTemp(Location::RegisterLocation(ECX)); - } else if (type == Primitive::kPrimLong && is_volatile) { + } else if (type == DataType::Type::kInt64 && is_volatile) { locations->AddTemp(Location::RequiresFpuRegister()); locations->AddTemp(Location::RequiresFpuRegister()); } @@ -2167,45 +2168,45 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, Primitive::kPrimInt, invoke, /* is_volatile */ false); + arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, Primitive::kPrimInt, invoke, /* is_volatile */ false); + arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, Primitive::kPrimInt, invoke, /* is_volatile */ true); + arena_, DataType::Type::kInt32, invoke, /* is_volatile */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, Primitive::kPrimNot, invoke, /* is_volatile */ false); + arena_, DataType::Type::kReference, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, Primitive::kPrimNot, invoke, /* is_volatile */ false); + arena_, DataType::Type::kReference, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, Primitive::kPrimNot, invoke, /* is_volatile */ true); + arena_, DataType::Type::kReference, invoke, /* is_volatile */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, Primitive::kPrimLong, invoke, /* is_volatile */ false); + arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, Primitive::kPrimLong, invoke, /* is_volatile */ false); + arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, Primitive::kPrimLong, invoke, /* is_volatile */ true); + arena_, DataType::Type::kInt64, invoke, /* is_volatile */ true); } // We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86 // memory model. static void GenUnsafePut(LocationSummary* locations, - Primitive::Type type, + DataType::Type type, bool is_volatile, CodeGeneratorX86* codegen) { X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler()); @@ -2213,7 +2214,7 @@ static void GenUnsafePut(LocationSummary* locations, Register offset = locations->InAt(2).AsRegisterPairLow<Register>(); Location value_loc = locations->InAt(3); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { Register value_lo = value_loc.AsRegisterPairLow<Register>(); Register value_hi = value_loc.AsRegisterPairHigh<Register>(); if (is_volatile) { @@ -2227,7 +2228,7 @@ static void GenUnsafePut(LocationSummary* locations, __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value_lo); __ movl(Address(base, offset, ScaleFactor::TIMES_1, 4), value_hi); } - } else if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + } else if (kPoisonHeapReferences && type == DataType::Type::kReference) { Register temp = locations->GetTemp(0).AsRegister<Register>(); __ movl(temp, value_loc.AsRegister<Register>()); __ PoisonHeapReference(temp); @@ -2240,7 +2241,7 @@ static void GenUnsafePut(LocationSummary* locations, codegen->MemoryFence(); } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { bool value_can_be_null = true; // TODO: Worth finding out this information? codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(), locations->GetTemp(1).AsRegister<Register>(), @@ -2251,35 +2252,38 @@ static void GenUnsafePut(LocationSummary* locations, } void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, /* is_volatile */ true, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, /* is_volatile */ false, codegen_); + GenUnsafePut( + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, /* is_volatile */ false, codegen_); + GenUnsafePut( + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, /* is_volatile */ true, codegen_); + GenUnsafePut( + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ true, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_); } static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, - Primitive::Type type, + DataType::Type type, HInvoke* invoke) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && @@ -2296,7 +2300,7 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, locations->SetInAt(2, Location::RequiresRegister()); // Expected value must be in EAX or EDX:EAX. // For long, new value must be in ECX:EBX. - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { locations->SetInAt(3, Location::RegisterPairLocation(EAX, EDX)); locations->SetInAt(4, Location::RegisterPairLocation(EBX, ECX)); } else { @@ -2306,7 +2310,7 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, // Force a byte register for the output. locations->SetOut(Location::RegisterLocation(EAX)); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // Need temporary registers for card-marking, and possibly for // (Baker) read barrier. locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. @@ -2316,11 +2320,11 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimInt, invoke); + CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimLong, invoke); + CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke); } void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) { @@ -2330,10 +2334,10 @@ void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke); + CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke); } -static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) { +static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) { X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler()); LocationSummary* locations = invoke->GetLocations(); @@ -2345,7 +2349,7 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* code // The address of the field within the holding object. Address field_addr(base, offset, ScaleFactor::TIMES_1, 0); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); @@ -2426,12 +2430,12 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* code // `expected`, as it is the same as register `out` (EAX). } } else { - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { // Ensure the expected value is in EAX (required by the CMPXCHG // instruction). DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX); __ LockCmpxchgl(field_addr, locations->InAt(4).AsRegister<Register>()); - } else if (type == Primitive::kPrimLong) { + } else if (type == DataType::Type::kInt64) { // Ensure the expected value is in EAX:EDX and that the new // value is in EBX:ECX (required by the CMPXCHG8B instruction). DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX); @@ -2453,11 +2457,11 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* code } void IntrinsicCodeGeneratorX86::VisitUnsafeCASInt(HInvoke* invoke) { - GenCAS(Primitive::kPrimInt, invoke, codegen_); + GenCAS(DataType::Type::kInt32, invoke, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeCASLong(HInvoke* invoke) { - GenCAS(Primitive::kPrimLong, invoke, codegen_); + GenCAS(DataType::Type::kInt64, invoke, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) { @@ -2465,7 +2469,7 @@ void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) { // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); - GenCAS(Primitive::kPrimNot, invoke, codegen_); + GenCAS(DataType::Type::kReference, invoke, codegen_); } void IntrinsicLocationsBuilderX86::VisitIntegerReverse(HInvoke* invoke) { @@ -2824,16 +2828,16 @@ static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) // Compute base address for the System.arraycopy intrinsic in `base`. static void GenSystemArrayCopyBaseAddress(X86Assembler* assembler, - Primitive::Type type, + DataType::Type type, const Register& array, const Location& pos, const Register& base) { // This routine is only used by the SystemArrayCopy intrinsic at the - // moment. We can allow Primitive::kPrimNot as `type` to implement + // moment. We can allow DataType::Type::kReference as `type` to implement // the SystemArrayCopyChar intrinsic. - DCHECK_EQ(type, Primitive::kPrimNot); - const int32_t element_size = Primitive::ComponentSize(type); - const ScaleFactor scale_factor = static_cast<ScaleFactor>(Primitive::ComponentSizeShift(type)); + DCHECK_EQ(type, DataType::Type::kReference); + const int32_t element_size = DataType::Size(type); + const ScaleFactor scale_factor = static_cast<ScaleFactor>(DataType::SizeShift(type)); const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); if (pos.IsConstant()) { @@ -2846,16 +2850,16 @@ static void GenSystemArrayCopyBaseAddress(X86Assembler* assembler, // Compute end source address for the System.arraycopy intrinsic in `end`. static void GenSystemArrayCopyEndAddress(X86Assembler* assembler, - Primitive::Type type, + DataType::Type type, const Location& copy_length, const Register& base, const Register& end) { // This routine is only used by the SystemArrayCopy intrinsic at the - // moment. We can allow Primitive::kPrimNot as `type` to implement + // moment. We can allow DataType::Type::kReference as `type` to implement // the SystemArrayCopyChar intrinsic. - DCHECK_EQ(type, Primitive::kPrimNot); - const int32_t element_size = Primitive::ComponentSize(type); - const ScaleFactor scale_factor = static_cast<ScaleFactor>(Primitive::ComponentSizeShift(type)); + DCHECK_EQ(type, DataType::Type::kReference); + const int32_t element_size = DataType::Size(type); + const ScaleFactor scale_factor = static_cast<ScaleFactor>(DataType::SizeShift(type)); if (copy_length.IsConstant()) { int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue(); @@ -3169,8 +3173,8 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel()); } - const Primitive::Type type = Primitive::kPrimNot; - const int32_t element_size = Primitive::ComponentSize(type); + const DataType::Type type = DataType::Type::kReference; + const int32_t element_size = DataType::Size(type); // Compute the base source address in `temp1`. GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 7798c0d99e..a2545ee3d8 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -90,7 +90,7 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode { DCHECK(instruction_->GetLocations()->Intrinsified()); DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy); - int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); + int32_t element_size = DataType::Size(DataType::Type::kReference); CpuRegister src_curr_addr = locations->GetTemp(0).AsRegister<CpuRegister>(); CpuRegister dst_curr_addr = locations->GetTemp(1).AsRegister<CpuRegister>(); @@ -193,20 +193,20 @@ static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { } static void GenReverseBytes(LocationSummary* locations, - Primitive::Type size, + DataType::Type size, X86_64Assembler* assembler) { CpuRegister out = locations->Out().AsRegister<CpuRegister>(); switch (size) { - case Primitive::kPrimShort: + case DataType::Type::kInt16: // TODO: Can be done with an xchg of 8b registers. This is straight from Quick. __ bswapl(out); __ sarl(out, Immediate(16)); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ bswapl(out); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ bswapq(out); break; default: @@ -220,7 +220,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerReverseBytes(HInvoke* invoke) } void IntrinsicCodeGeneratorX86_64::VisitIntegerReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitLongReverseBytes(HInvoke* invoke) { @@ -228,7 +228,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongReverseBytes(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86_64::VisitLongReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitShortReverseBytes(HInvoke* invoke) { @@ -236,7 +236,7 @@ void IntrinsicLocationsBuilderX86_64::VisitShortReverseBytes(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86_64::VisitShortReverseBytes(HInvoke* invoke) { - GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); + GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler()); } @@ -1084,7 +1084,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) { // Okay, everything checks out. Finally time to do the copy. // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value(); @@ -1125,7 +1125,7 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // source address for the System.arraycopy intrinsic in `src_base`, // `dst_base` and `src_end` respectively. static void GenSystemArrayCopyAddresses(X86_64Assembler* assembler, - Primitive::Type type, + DataType::Type type, const CpuRegister& src, const Location& src_pos, const CpuRegister& dst, @@ -1135,9 +1135,9 @@ static void GenSystemArrayCopyAddresses(X86_64Assembler* assembler, const CpuRegister& dst_base, const CpuRegister& src_end) { // This routine is only used by the SystemArrayCopy intrinsic. - DCHECK_EQ(type, Primitive::kPrimNot); - const int32_t element_size = Primitive::ComponentSize(type); - const ScaleFactor scale_factor = static_cast<ScaleFactor>(Primitive::ComponentSizeShift(type)); + DCHECK_EQ(type, DataType::Type::kReference); + const int32_t element_size = DataType::Size(type); + const ScaleFactor scale_factor = static_cast<ScaleFactor>(DataType::SizeShift(type)); const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); if (src_pos.IsConstant()) { @@ -1410,8 +1410,8 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel()); } - const Primitive::Type type = Primitive::kPrimNot; - const int32_t element_size = Primitive::ComponentSize(type); + const DataType::Type type = DataType::Type::kReference; + const int32_t element_size = DataType::Size(type); // Compute base source address, base destination address, and end // source address in `temp1`, `temp2` and `temp3` respectively. @@ -1705,7 +1705,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else if (code_point->GetType() != Primitive::kPrimChar) { + } else if (code_point->GetType() != DataType::Type::kUint16) { __ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max())); slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke); codegen->AddSlowPath(slow_path); @@ -1922,7 +1922,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) { X86_64Assembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); - size_t char_component_size = Primitive::ComponentSize(Primitive::kPrimChar); + size_t char_component_size = DataType::Size(DataType::Type::kUint16); // Location of data in char array buffer. const uint32_t data_offset = mirror::Array::DataOffset(char_component_size).Uint32Value(); // Location of char array data in string. @@ -1938,7 +1938,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) { CpuRegister dstBegin = locations->InAt(4).AsRegister<CpuRegister>(); // Check assumption that sizeof(Char) is 2 (used in scaling below). - const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + const size_t char_size = DataType::Size(DataType::Type::kUint16); DCHECK_EQ(char_size, 2u); NearLabel done; @@ -1952,7 +1952,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) { } if (mirror::kUseStringCompression) { NearLabel copy_uncompressed, copy_loop; - const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte); + const size_t c_char_size = DataType::Size(DataType::Type::kInt8); DCHECK_EQ(c_char_size, 1u); // Location of count in string. const uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); @@ -1993,22 +1993,22 @@ void IntrinsicCodeGeneratorX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) { __ Bind(&done); } -static void GenPeek(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) { +static void GenPeek(LocationSummary* locations, DataType::Type size, X86_64Assembler* assembler) { CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>(); CpuRegister out = locations->Out().AsRegister<CpuRegister>(); // == address, here for clarity. // x86 allows unaligned access. We do not have to check the input or use specific instructions // to avoid a SIGBUS. switch (size) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: __ movsxb(out, Address(address, 0)); break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: __ movsxw(out, Address(address, 0)); break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ movl(out, Address(address, 0)); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ movq(out, Address(address, 0)); break; default: @@ -2022,7 +2022,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekByte(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekByte(HInvoke* invoke) { - GenPeek(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler()); + GenPeek(invoke->GetLocations(), DataType::Type::kInt8, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) { @@ -2030,7 +2030,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) } void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) { - GenPeek(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenPeek(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) { @@ -2038,7 +2038,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) } void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) { - GenPeek(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenPeek(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) { @@ -2046,7 +2046,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekShortNative(HInvoke* invoke } void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) { - GenPeek(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); + GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler()); } static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -2057,13 +2057,13 @@ static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) locations->SetInAt(1, Location::RegisterOrInt32Constant(invoke->InputAt(1))); } -static void GenPoke(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) { +static void GenPoke(LocationSummary* locations, DataType::Type size, X86_64Assembler* assembler) { CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>(); Location value = locations->InAt(1); // x86 allows unaligned access. We do not have to check the input or use specific instructions // to avoid a SIGBUS. switch (size) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: if (value.IsConstant()) { __ movb(Address(address, 0), Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant()))); @@ -2071,7 +2071,7 @@ static void GenPoke(LocationSummary* locations, Primitive::Type size, X86_64Asse __ movb(Address(address, 0), value.AsRegister<CpuRegister>()); } break; - case Primitive::kPrimShort: + case DataType::Type::kInt16: if (value.IsConstant()) { __ movw(Address(address, 0), Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant()))); @@ -2079,7 +2079,7 @@ static void GenPoke(LocationSummary* locations, Primitive::Type size, X86_64Asse __ movw(Address(address, 0), value.AsRegister<CpuRegister>()); } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: if (value.IsConstant()) { __ movl(Address(address, 0), Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant()))); @@ -2087,7 +2087,7 @@ static void GenPoke(LocationSummary* locations, Primitive::Type size, X86_64Asse __ movl(Address(address, 0), value.AsRegister<CpuRegister>()); } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (value.IsConstant()) { int64_t v = value.GetConstant()->AsLongConstant()->GetValue(); DCHECK(IsInt<32>(v)); @@ -2108,7 +2108,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeByte(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeByte(HInvoke* invoke) { - GenPoke(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler()); + GenPoke(invoke->GetLocations(), DataType::Type::kInt8, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) { @@ -2116,7 +2116,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) } void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) { - GenPoke(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); + GenPoke(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) { @@ -2124,7 +2124,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) } void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) { - GenPoke(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); + GenPoke(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) { @@ -2132,7 +2132,7 @@ void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeShortNative(HInvoke* invoke } void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) { - GenPoke(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); + GenPoke(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitThreadCurrentThread(HInvoke* invoke) { @@ -2149,7 +2149,7 @@ void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) { } static void GenUnsafeGet(HInvoke* invoke, - Primitive::Type type, + DataType::Type type, bool is_volatile ATTRIBUTE_UNUSED, CodeGeneratorX86_64* codegen) { X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler()); @@ -2162,11 +2162,11 @@ static void GenUnsafeGet(HInvoke* invoke, CpuRegister output = output_loc.AsRegister<CpuRegister>(); switch (type) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0)); break; - case Primitive::kPrimNot: { + case DataType::Type::kReference: { if (kEmitCompilerReadBarrier) { if (kUseBakerReadBarrier) { Address src(base, offset, ScaleFactor::TIMES_1, 0); @@ -2184,7 +2184,7 @@ static void GenUnsafeGet(HInvoke* invoke, break; } - case Primitive::kPrimLong: + case DataType::Type::kInt64: __ movq(output, Address(base, offset, ScaleFactor::TIMES_1, 0)); break; @@ -2234,27 +2234,27 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invo void IntrinsicCodeGeneratorX86_64::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); } static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, - Primitive::Type type, + DataType::Type type, HInvoke* invoke) { LocationSummary* locations = new (arena) LocationSummary(invoke, LocationSummary::kNoCall, @@ -2263,7 +2263,7 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); locations->SetInAt(3, Location::RequiresRegister()); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // Need temp registers for card-marking. locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. locations->AddTemp(Location::RequiresRegister()); @@ -2271,45 +2271,45 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86_64::VisitUnsafePut(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObject(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLong(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke); } // We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86 // memory model. -static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool is_volatile, +static void GenUnsafePut(LocationSummary* locations, DataType::Type type, bool is_volatile, CodeGeneratorX86_64* codegen) { X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler()); CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>(); CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>(); CpuRegister value = locations->InAt(3).AsRegister<CpuRegister>(); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { __ movq(Address(base, offset, ScaleFactor::TIMES_1, 0), value); - } else if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + } else if (kPoisonHeapReferences && type == DataType::Type::kReference) { CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); __ movl(temp, value); __ PoisonHeapReference(temp); @@ -2322,7 +2322,7 @@ static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool codegen->MemoryFence(); } - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { bool value_can_be_null = true; // TODO: Worth finding out this information? codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(), locations->GetTemp(1).AsRegister<CpuRegister>(), @@ -2333,35 +2333,38 @@ static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool } void IntrinsicCodeGeneratorX86_64::VisitUnsafePut(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, /* is_volatile */ true, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObject(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, /* is_volatile */ false, codegen_); + GenUnsafePut( + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, /* is_volatile */ false, codegen_); + GenUnsafePut( + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, /* is_volatile */ true, codegen_); + GenUnsafePut( + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLong(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ true, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_); } static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, - Primitive::Type type, + DataType::Type type, HInvoke* invoke) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && @@ -2379,7 +2382,7 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, locations->SetInAt(4, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // Need temporary registers for card-marking, and possibly for // (Baker) read barrier. locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. @@ -2388,11 +2391,11 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimInt, invoke); + CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASLong(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimLong, invoke); + CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) { @@ -2402,10 +2405,10 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke); + CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke); } -static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) { +static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) { X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler()); LocationSummary* locations = invoke->GetLocations(); @@ -2418,7 +2421,7 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* c Location out_loc = locations->Out(); CpuRegister out = out_loc.AsRegister<CpuRegister>(); - if (type == Primitive::kPrimNot) { + if (type == DataType::Type::kReference) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); @@ -2500,9 +2503,9 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* c __ UnpoisonHeapReference(expected); } } else { - if (type == Primitive::kPrimInt) { + if (type == DataType::Type::kInt32) { __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value); - } else if (type == Primitive::kPrimLong) { + } else if (type == DataType::Type::kInt64) { __ LockCmpxchgq(Address(base, offset, TIMES_1, 0), value); } else { LOG(FATAL) << "Unexpected CAS type " << type; @@ -2518,11 +2521,11 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* c } void IntrinsicCodeGeneratorX86_64::VisitUnsafeCASInt(HInvoke* invoke) { - GenCAS(Primitive::kPrimInt, invoke, codegen_); + GenCAS(DataType::Type::kInt32, invoke, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeCASLong(HInvoke* invoke) { - GenCAS(Primitive::kPrimLong, invoke, codegen_); + GenCAS(DataType::Type::kInt64, invoke, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeCASObject(HInvoke* invoke) { @@ -2530,7 +2533,7 @@ void IntrinsicCodeGeneratorX86_64::VisitUnsafeCASObject(HInvoke* invoke) { // UnsafeCASObject intrinsic is the Baker-style read barriers. DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); - GenCAS(Primitive::kPrimNot, invoke, codegen_); + GenCAS(DataType::Type::kReference, invoke, codegen_); } void IntrinsicLocationsBuilderX86_64::VisitIntegerReverse(HInvoke* invoke) { diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc index 8967d7cef2..0617e60cfe 100644 --- a/compiler/optimizing/licm_test.cc +++ b/compiler/optimizing/licm_test.cc @@ -78,7 +78,7 @@ class LICMTest : public CommonCompilerTest { parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimNot); + DataType::Type::kReference); entry_->AddInstruction(parameter_); int_constant_ = graph_->GetIntConstant(42); float_constant_ = graph_->GetFloatConstant(42.0f); @@ -125,7 +125,7 @@ TEST_F(LICMTest, FieldHoisting) { // Populate the loop with instructions: set/get field with different types. HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_, nullptr, - Primitive::kPrimLong, + DataType::Type::kInt64, MemberOffset(10), false, kUnknownFieldIndex, @@ -134,7 +134,7 @@ TEST_F(LICMTest, FieldHoisting) { 0); loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); HInstruction* set_field = new (&allocator_) HInstanceFieldSet( - parameter_, int_constant_, nullptr, Primitive::kPrimInt, MemberOffset(20), + parameter_, int_constant_, nullptr, DataType::Type::kInt32, MemberOffset(20), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0); loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); @@ -152,7 +152,7 @@ TEST_F(LICMTest, NoFieldHoisting) { ScopedNullHandle<mirror::DexCache> dex_cache; HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_, nullptr, - Primitive::kPrimLong, + DataType::Type::kInt64, MemberOffset(10), false, kUnknownFieldIndex, @@ -163,7 +163,7 @@ TEST_F(LICMTest, NoFieldHoisting) { HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_, get_field, nullptr, - Primitive::kPrimLong, + DataType::Type::kInt64, MemberOffset(10), false, kUnknownFieldIndex, @@ -184,10 +184,10 @@ TEST_F(LICMTest, ArrayHoisting) { // Populate the loop with instructions: set/get array with different types. HInstruction* get_array = new (&allocator_) HArrayGet( - parameter_, int_constant_, Primitive::kPrimInt, 0); + parameter_, int_constant_, DataType::Type::kInt32, 0); loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); HInstruction* set_array = new (&allocator_) HArraySet( - parameter_, int_constant_, float_constant_, Primitive::kPrimFloat, 0); + parameter_, int_constant_, float_constant_, DataType::Type::kFloat32, 0); loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); EXPECT_EQ(get_array->GetBlock(), loop_body_); @@ -202,10 +202,10 @@ TEST_F(LICMTest, NoArrayHoisting) { // Populate the loop with instructions: set/get array with same types. HInstruction* get_array = new (&allocator_) HArrayGet( - parameter_, int_constant_, Primitive::kPrimFloat, 0); + parameter_, int_constant_, DataType::Type::kFloat32, 0); loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); HInstruction* set_array = new (&allocator_) HArraySet( - parameter_, get_array, float_constant_, Primitive::kPrimFloat, 0); + parameter_, get_array, float_constant_, DataType::Type::kFloat32, 0); loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); EXPECT_EQ(get_array->GetBlock(), loop_body_); diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h index 02bc254729..d46b904c9e 100644 --- a/compiler/optimizing/load_store_analysis.h +++ b/compiler/optimizing/load_store_analysis.h @@ -369,7 +369,7 @@ class HeapLocationCollector : public HGraphVisitor { } void CreateReferenceInfoForReferenceType(HInstruction* instruction) { - if (instruction->GetType() != Primitive::kPrimNot) { + if (instruction->GetType() != DataType::Type::kReference) { return; } DCHECK(FindReferenceInfoOf(instruction) == nullptr); diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc index 81344b52f6..0df2f27e82 100644 --- a/compiler/optimizing/load_store_analysis_test.cc +++ b/compiler/optimizing/load_store_analysis_test.cc @@ -49,16 +49,17 @@ TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) { // array_set1 ArraySet [array, c1, c3] // array_set2 ArraySet [array, index, c3] HInstruction* array = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); HInstruction* index = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(1), 1, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); HInstruction* c1 = graph_->GetIntConstant(1); HInstruction* c2 = graph_->GetIntConstant(2); HInstruction* c3 = graph_->GetIntConstant(3); - HInstruction* array_get1 = new (&allocator_) HArrayGet(array, c1, Primitive::kPrimInt, 0); - HInstruction* array_get2 = new (&allocator_) HArrayGet(array, c2, Primitive::kPrimInt, 0); - HInstruction* array_set1 = new (&allocator_) HArraySet(array, c1, c3, Primitive::kPrimInt, 0); - HInstruction* array_set2 = new (&allocator_) HArraySet(array, index, c3, Primitive::kPrimInt, 0); + HInstruction* array_get1 = new (&allocator_) HArrayGet(array, c1, DataType::Type::kInt32, 0); + HInstruction* array_get2 = new (&allocator_) HArrayGet(array, c2, DataType::Type::kInt32, 0); + HInstruction* array_set1 = new (&allocator_) HArraySet(array, c1, c3, DataType::Type::kInt32, 0); + HInstruction* array_set2 = + new (&allocator_) HArraySet(array, index, c3, DataType::Type::kInt32, 0); entry->AddInstruction(array); entry->AddInstruction(index); entry->AddInstruction(array_get1); @@ -121,11 +122,11 @@ TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) { HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimNot); + DataType::Type::kReference); HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object, c1, nullptr, - Primitive::kPrimInt, + DataType::Type::kInt32, MemberOffset(10), false, kUnknownFieldIndex, @@ -134,7 +135,7 @@ TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) { 0); HInstanceFieldGet* get_field10 = new (&allocator_) HInstanceFieldGet(object, nullptr, - Primitive::kPrimInt, + DataType::Type::kInt32, MemberOffset(10), false, kUnknownFieldIndex, @@ -143,7 +144,7 @@ TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) { 0); HInstanceFieldGet* get_field20 = new (&allocator_) HInstanceFieldGet(object, nullptr, - Primitive::kPrimInt, + DataType::Type::kInt32, MemberOffset(20), false, kUnknownFieldIndex, @@ -191,26 +192,28 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) { graph_->BuildDominatorTree(); HInstruction* array = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); HInstruction* index = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(1), 1, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); HInstruction* c0 = graph_->GetIntConstant(0); HInstruction* c1 = graph_->GetIntConstant(1); HInstruction* c_neg1 = graph_->GetIntConstant(-1); - HInstruction* add0 = new (&allocator_) HAdd(Primitive::kPrimInt, index, c0); - HInstruction* add1 = new (&allocator_) HAdd(Primitive::kPrimInt, index, c1); - HInstruction* sub0 = new (&allocator_) HSub(Primitive::kPrimInt, index, c0); - HInstruction* sub1 = new (&allocator_) HSub(Primitive::kPrimInt, index, c1); - HInstruction* sub_neg1 = new (&allocator_) HSub(Primitive::kPrimInt, index, c_neg1); - HInstruction* rev_sub1 = new (&allocator_) HSub(Primitive::kPrimInt, c1, index); - HInstruction* arr_set1 = new (&allocator_) HArraySet(array, c0, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set2 = new (&allocator_) HArraySet(array, c1, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set3 = new (&allocator_) HArraySet(array, add0, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set4 = new (&allocator_) HArraySet(array, add1, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set5 = new (&allocator_) HArraySet(array, sub0, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set6 = new (&allocator_) HArraySet(array, sub1, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set7 = new (&allocator_) HArraySet(array, rev_sub1, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set8 = new (&allocator_) HArraySet(array, sub_neg1, c0, Primitive::kPrimInt, 0); + HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0); + HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c1); + HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0); + HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c1); + HInstruction* sub_neg1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c_neg1); + HInstruction* rev_sub1 = new (&allocator_) HSub(DataType::Type::kInt32, c1, index); + HInstruction* arr_set1 = new (&allocator_) HArraySet(array, c0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set2 = new (&allocator_) HArraySet(array, c1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set3 = new (&allocator_) HArraySet(array, add0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set4 = new (&allocator_) HArraySet(array, add1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set5 = new (&allocator_) HArraySet(array, sub0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set6 = new (&allocator_) HArraySet(array, sub1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set7 = + new (&allocator_) HArraySet(array, rev_sub1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set8 = + new (&allocator_) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0); entry->AddInstruction(array); entry->AddInstruction(index); @@ -275,9 +278,9 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) { graph_->BuildDominatorTree(); HInstruction* array = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); HInstruction* index = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(1), 1, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); HInstruction* c0 = graph_->GetIntConstant(0); HInstruction* c_0x80000000 = graph_->GetIntConstant(0x80000000); @@ -287,34 +290,41 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) { HInstruction* c_0x80000001 = graph_->GetIntConstant(0x80000001); // `index+0x80000000` and `index-0x80000000` array indices MAY alias. - HInstruction* add_0x80000000 = new (&allocator_) HAdd(Primitive::kPrimInt, index, c_0x80000000); - HInstruction* sub_0x80000000 = new (&allocator_) HSub(Primitive::kPrimInt, index, c_0x80000000); + HInstruction* add_0x80000000 = new (&allocator_) HAdd( + DataType::Type::kInt32, index, c_0x80000000); + HInstruction* sub_0x80000000 = new (&allocator_) HSub( + DataType::Type::kInt32, index, c_0x80000000); HInstruction* arr_set_1 = new (&allocator_) HArraySet( - array, add_0x80000000, c0, Primitive::kPrimInt, 0); + array, add_0x80000000, c0, DataType::Type::kInt32, 0); HInstruction* arr_set_2 = new (&allocator_) HArraySet( - array, sub_0x80000000, c0, Primitive::kPrimInt, 0); + array, sub_0x80000000, c0, DataType::Type::kInt32, 0); // `index+0x10` and `index-0xFFFFFFF0` array indices MAY alias. - HInstruction* add_0x10 = new (&allocator_) HAdd(Primitive::kPrimInt, index, c_0x10); - HInstruction* sub_0xFFFFFFF0 = new (&allocator_) HSub(Primitive::kPrimInt, index, c_0xFFFFFFF0); + HInstruction* add_0x10 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c_0x10); + HInstruction* sub_0xFFFFFFF0 = new (&allocator_) HSub( + DataType::Type::kInt32, index, c_0xFFFFFFF0); HInstruction* arr_set_3 = new (&allocator_) HArraySet( - array, add_0x10, c0, Primitive::kPrimInt, 0); + array, add_0x10, c0, DataType::Type::kInt32, 0); HInstruction* arr_set_4 = new (&allocator_) HArraySet( - array, sub_0xFFFFFFF0, c0, Primitive::kPrimInt, 0); + array, sub_0xFFFFFFF0, c0, DataType::Type::kInt32, 0); // `index+0x7FFFFFFF` and `index-0x80000001` array indices MAY alias. - HInstruction* add_0x7FFFFFFF = new (&allocator_) HAdd(Primitive::kPrimInt, index, c_0x7FFFFFFF); - HInstruction* sub_0x80000001 = new (&allocator_) HSub(Primitive::kPrimInt, index, c_0x80000001); + HInstruction* add_0x7FFFFFFF = new (&allocator_) HAdd( + DataType::Type::kInt32, index, c_0x7FFFFFFF); + HInstruction* sub_0x80000001 = new (&allocator_) HSub( + DataType::Type::kInt32, index, c_0x80000001); HInstruction* arr_set_5 = new (&allocator_) HArraySet( - array, add_0x7FFFFFFF, c0, Primitive::kPrimInt, 0); + array, add_0x7FFFFFFF, c0, DataType::Type::kInt32, 0); HInstruction* arr_set_6 = new (&allocator_) HArraySet( - array, sub_0x80000001, c0, Primitive::kPrimInt, 0); + array, sub_0x80000001, c0, DataType::Type::kInt32, 0); // `index+0` and `index-0` array indices MAY alias. - HInstruction* add_0 = new (&allocator_) HAdd(Primitive::kPrimInt, index, c0); - HInstruction* sub_0 = new (&allocator_) HSub(Primitive::kPrimInt, index, c0); - HInstruction* arr_set_7 = new (&allocator_) HArraySet(array, add_0, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set_8 = new (&allocator_) HArraySet(array, sub_0, c0, Primitive::kPrimInt, 0); + HInstruction* add_0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0); + HInstruction* sub_0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0); + HInstruction* arr_set_7 = new (&allocator_) HArraySet( + array, add_0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_8 = new (&allocator_) HArraySet( + array, sub_0, c0, DataType::Type::kInt32, 0); entry->AddInstruction(array); entry->AddInstruction(index); diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 8a9acf108c..bd14f2b142 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -271,21 +271,21 @@ class LSEVisitor : public HGraphVisitor { } } - HInstruction* GetDefaultValue(Primitive::Type type) { + HInstruction* GetDefaultValue(DataType::Type type) { switch (type) { - case Primitive::kPrimNot: + case DataType::Type::kReference: return GetGraph()->GetNullConstant(); - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: return GetGraph()->GetIntConstant(0); - case Primitive::kPrimLong: + case DataType::Type::kInt64: return GetGraph()->GetLongConstant(0); - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: return GetGraph()->GetFloatConstant(0); - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: return GetGraph()->GetDoubleConstant(0); default: UNREACHABLE(); @@ -328,8 +328,7 @@ class LSEVisitor : public HGraphVisitor { // This acts like GVN but with better aliasing analysis. heap_values[idx] = instruction; } else { - if (Primitive::PrimitiveKind(heap_value->GetType()) - != Primitive::PrimitiveKind(instruction->GetType())) { + if (DataType::Kind(heap_value->GetType()) != DataType::Kind(instruction->GetType())) { // The only situation where the same heap location has different type is when // we do an array get on an instruction that originates from the null constant // (the null could be behind a field access, an array access, a null check or diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc index 6f8743bd53..6c918a3ac2 100644 --- a/compiler/optimizing/loop_optimization.cc +++ b/compiler/optimizing/loop_optimization.cc @@ -75,7 +75,7 @@ static bool IsEarlyExit(HLoopInformation* loop_info) { // denotes if result is long, and thus sign extension from int can be included. // Returns the promoted operand on success. static bool IsSignExtensionAndGet(HInstruction* instruction, - Primitive::Type type, + DataType::Type type, /*out*/ HInstruction** operand, bool to64 = false) { // Accept any already wider constant that would be handled properly by sign @@ -84,20 +84,20 @@ static bool IsSignExtensionAndGet(HInstruction* instruction, int64_t value = 0; if (IsInt64AndGet(instruction, /*out*/ &value)) { switch (type) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: if (IsInt<8>(value)) { *operand = instruction; return true; } return false; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: if (IsInt<16>(value)) { *operand = instruction; return true; } return false; - case Primitive::kPrimInt: + case DataType::Type::kInt32: if (IsInt<32>(value)) { *operand = instruction; return to64; @@ -113,11 +113,11 @@ static bool IsSignExtensionAndGet(HInstruction* instruction, instruction->IsStaticFieldGet() || instruction->IsInstanceFieldGet())) { switch (type) { - case Primitive::kPrimByte: - case Primitive::kPrimShort: + case DataType::Type::kInt8: + case DataType::Type::kInt16: *operand = instruction; return true; - case Primitive::kPrimInt: + case DataType::Type::kInt32: *operand = instruction; return to64; default: @@ -125,7 +125,7 @@ static bool IsSignExtensionAndGet(HInstruction* instruction, } } // Explicit type conversion to long. - if (instruction->IsTypeConversion() && instruction->GetType() == Primitive::kPrimLong) { + if (instruction->IsTypeConversion() && instruction->GetType() == DataType::Type::kInt64) { return IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, /*to64*/ true); } return false; @@ -135,7 +135,7 @@ static bool IsSignExtensionAndGet(HInstruction* instruction, // denotes if result is long, and thus zero extension from int can be included. // Returns the promoted operand on success. static bool IsZeroExtensionAndGet(HInstruction* instruction, - Primitive::Type type, + DataType::Type type, /*out*/ HInstruction** operand, bool to64 = false) { // Accept any already wider constant that would be handled properly by zero @@ -144,20 +144,20 @@ static bool IsZeroExtensionAndGet(HInstruction* instruction, int64_t value = 0; if (IsInt64AndGet(instruction, /*out*/ &value)) { switch (type) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: if (IsUint<8>(value)) { *operand = instruction; return true; } return false; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: if (IsUint<16>(value)) { *operand = instruction; return true; } return false; - case Primitive::kPrimInt: + case DataType::Type::kInt32: if (IsUint<32>(value)) { *operand = instruction; return to64; @@ -172,7 +172,7 @@ static bool IsZeroExtensionAndGet(HInstruction* instruction, if (instruction->GetType() == type && (instruction->IsArrayGet() || instruction->IsStaticFieldGet() || instruction->IsInstanceFieldGet())) { - if (type == Primitive::kPrimChar) { + if (type == DataType::Type::kUint16) { *operand = instruction; return true; } @@ -189,19 +189,19 @@ static bool IsZeroExtensionAndGet(HInstruction* instruction, (IsInt64AndGet(b, /*out*/ &mask) && (IsSignExtensionAndGet(a, type, /*out*/ operand) || IsZeroExtensionAndGet(a, type, /*out*/ operand)))) { switch ((*operand)->GetType()) { - case Primitive::kPrimByte: + case DataType::Type::kInt8: return mask == std::numeric_limits<uint8_t>::max(); - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: return mask == std::numeric_limits<uint16_t>::max(); - case Primitive::kPrimInt: + case DataType::Type::kInt32: return mask == std::numeric_limits<uint32_t>::max() && to64; default: return false; } } } // Explicit type conversion to long. - if (instruction->IsTypeConversion() && instruction->GetType() == Primitive::kPrimLong) { + if (instruction->IsTypeConversion() && instruction->GetType() == DataType::Type::kInt64) { return IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, /*to64*/ true); } return false; @@ -211,7 +211,7 @@ static bool IsZeroExtensionAndGet(HInstruction* instruction, // Returns true on success and sets is_unsigned accordingly. static bool IsNarrowerOperands(HInstruction* a, HInstruction* b, - Primitive::Type type, + DataType::Type type, /*out*/ HInstruction** r, /*out*/ HInstruction** s, /*out*/ bool* is_unsigned) { @@ -227,7 +227,7 @@ static bool IsNarrowerOperands(HInstruction* a, // As above, single operand. static bool IsNarrowerOperand(HInstruction* a, - Primitive::Type type, + DataType::Type type, /*out*/ HInstruction** r, /*out*/ bool* is_unsigned) { if (IsSignExtensionAndGet(a, type, r)) { @@ -241,44 +241,44 @@ static bool IsNarrowerOperand(HInstruction* a, } // Compute relative vector length based on type difference. -static size_t GetOtherVL(Primitive::Type other_type, Primitive::Type vector_type, size_t vl) { +static size_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, size_t vl) { switch (other_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: switch (vector_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: return vl; + case DataType::Type::kBool: + case DataType::Type::kInt8: return vl; default: break; } return vl; - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: switch (vector_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: return vl >> 1; - case Primitive::kPrimChar: - case Primitive::kPrimShort: return vl; + case DataType::Type::kBool: + case DataType::Type::kInt8: return vl >> 1; + case DataType::Type::kUint16: + case DataType::Type::kInt16: return vl; default: break; } break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: switch (vector_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: return vl >> 2; - case Primitive::kPrimChar: - case Primitive::kPrimShort: return vl >> 1; - case Primitive::kPrimInt: return vl; + case DataType::Type::kBool: + case DataType::Type::kInt8: return vl >> 2; + case DataType::Type::kUint16: + case DataType::Type::kInt16: return vl >> 1; + case DataType::Type::kInt32: return vl; default: break; } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: switch (vector_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: return vl >> 3; - case Primitive::kPrimChar: - case Primitive::kPrimShort: return vl >> 2; - case Primitive::kPrimInt: return vl >> 1; - case Primitive::kPrimLong: return vl; + case DataType::Type::kBool: + case DataType::Type::kInt8: return vl >> 3; + case DataType::Type::kUint16: + case DataType::Type::kInt16: return vl >> 2; + case DataType::Type::kInt32: return vl >> 1; + case DataType::Type::kInt64: return vl; default: break; } break; @@ -815,8 +815,9 @@ void HLoopOptimization::Vectorize(LoopNode* node, vector_body_ = block; // Loop induction type. - Primitive::Type induc_type = main_phi->GetType(); - DCHECK(induc_type == Primitive::kPrimInt || induc_type == Primitive::kPrimLong) << induc_type; + DataType::Type induc_type = main_phi->GetType(); + DCHECK(induc_type == DataType::Type::kInt32 || induc_type == DataType::Type::kInt64) + << induc_type; // Generate dynamic loop peeling trip count, if needed, under the assumption // that the Android runtime guarantees at least "component size" alignment: @@ -939,7 +940,7 @@ void HLoopOptimization::GenerateNewLoop(LoopNode* node, HInstruction* step, uint32_t unroll) { DCHECK(unroll == 1 || vector_mode_ == kVector); - Primitive::Type induc_type = lo->GetType(); + DataType::Type induc_type = lo->GetType(); // Prepare new loop. vector_preheader_ = new_preheader, vector_header_ = vector_preheader_->GetSingleSuccessor(); @@ -1003,7 +1004,7 @@ bool HLoopOptimization::VectorizeDef(LoopNode* node, // (4) vectorizable right-hand-side value. uint64_t restrictions = kNone; if (instruction->IsArraySet()) { - Primitive::Type type = instruction->AsArraySet()->GetComponentType(); + DataType::Type type = instruction->AsArraySet()->GetComponentType(); HInstruction* base = instruction->InputAt(0); HInstruction* index = instruction->InputAt(1); HInstruction* value = instruction->InputAt(2); @@ -1027,7 +1028,7 @@ bool HLoopOptimization::VectorizeDef(LoopNode* node, // (2) vectorizable right-hand-side value. auto redit = reductions_->find(instruction); if (redit != reductions_->end()) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); // Recognize SAD idiom or direct reduction. if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) || (TrySetVectorType(type, &restrictions) && @@ -1054,7 +1055,7 @@ bool HLoopOptimization::VectorizeDef(LoopNode* node, bool HLoopOptimization::VectorizeUse(LoopNode* node, HInstruction* instruction, bool generate_code, - Primitive::Type type, + DataType::Type type, uint64_t restrictions) { // Accept anything for which code has already been generated. if (generate_code) { @@ -1115,12 +1116,12 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node, // Accept particular type conversions. HTypeConversion* conversion = instruction->AsTypeConversion(); HInstruction* opa = conversion->InputAt(0); - Primitive::Type from = conversion->GetInputType(); - Primitive::Type to = conversion->GetResultType(); - if (Primitive::IsIntegralType(from) && Primitive::IsIntegralType(to)) { - size_t size_vec = Primitive::ComponentSize(type); - size_t size_from = Primitive::ComponentSize(from); - size_t size_to = Primitive::ComponentSize(to); + DataType::Type from = conversion->GetInputType(); + DataType::Type to = conversion->GetResultType(); + if (DataType::IsIntegralType(from) && DataType::IsIntegralType(to)) { + size_t size_vec = DataType::Size(type); + size_t size_from = DataType::Size(from); + size_t size_to = DataType::Size(to); // Accept an integral conversion // (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or // (1b) widening from at least vector type, and @@ -1140,7 +1141,7 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node, } return true; } - } else if (to == Primitive::kPrimFloat && from == Primitive::kPrimInt) { + } else if (to == DataType::Type::kFloat32 && from == DataType::Type::kInt32) { DCHECK_EQ(to, type); // Accept int to float conversion for // (1) supported int, @@ -1215,7 +1216,7 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node, if (VectorizeUse(node, r, generate_code, type, restrictions) && IsInt64AndGet(opb, /*out*/ &distance)) { // Restrict shift distance to packed data type width. - int64_t max_distance = Primitive::ComponentSize(type) * 8; + int64_t max_distance = DataType::Size(type) * 8; if (0 <= distance && distance < max_distance) { if (generate_code) { GenerateVecOp(instruction, vector_map_->Get(r), opb, type); @@ -1298,7 +1299,7 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node, return false; } -bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restrictions) { +bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) { const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures(); switch (compiler_driver_->GetInstructionSet()) { case kArm: @@ -1306,15 +1307,15 @@ bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restric // Allow vectorization for all ARM devices, because Android assumes that // ARM 32-bit always supports advanced SIMD (64-bit SIMD). switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: *restrictions |= kNoDiv | kNoReduction; return TrySetVectorLength(8); - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction; return TrySetVectorLength(4); - case Primitive::kPrimInt: + case DataType::Type::kInt32: *restrictions |= kNoDiv | kNoReduction; return TrySetVectorLength(2); default: @@ -1325,24 +1326,24 @@ bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restric // Allow vectorization for all ARM devices, because Android assumes that // ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD). switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: *restrictions |= kNoDiv; return TrySetVectorLength(16); - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: *restrictions |= kNoDiv; return TrySetVectorLength(8); - case Primitive::kPrimInt: + case DataType::Type::kInt32: *restrictions |= kNoDiv; return TrySetVectorLength(4); - case Primitive::kPrimLong: + case DataType::Type::kInt64: *restrictions |= kNoDiv | kNoMul | kNoMinMax; return TrySetVectorLength(2); - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: *restrictions |= kNoReduction; return TrySetVectorLength(4); - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: *restrictions |= kNoReduction; return TrySetVectorLength(2); default: @@ -1353,25 +1354,25 @@ bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restric // Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD). if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: *restrictions |= kNoMul | kNoDiv | kNoShift | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD; return TrySetVectorLength(16); - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: *restrictions |= kNoDiv | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD; return TrySetVectorLength(8); - case Primitive::kPrimInt: + case DataType::Type::kInt32: *restrictions |= kNoDiv | kNoSAD; return TrySetVectorLength(4); - case Primitive::kPrimLong: + case DataType::Type::kInt64: *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs | kNoMinMax | kNoSAD; return TrySetVectorLength(2); - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: *restrictions |= kNoMinMax | kNoReduction; // minmax: -0.0 vs +0.0 return TrySetVectorLength(4); - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: *restrictions |= kNoMinMax | kNoReduction; // minmax: -0.0 vs +0.0 return TrySetVectorLength(2); default: @@ -1382,24 +1383,24 @@ bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restric case kMips: if (features->AsMipsInstructionSetFeatures()->HasMsa()) { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: *restrictions |= kNoDiv | kNoReduction | kNoSAD; return TrySetVectorLength(16); - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoSAD; return TrySetVectorLength(8); - case Primitive::kPrimInt: + case DataType::Type::kInt32: *restrictions |= kNoDiv | kNoReduction | kNoSAD; return TrySetVectorLength(4); - case Primitive::kPrimLong: + case DataType::Type::kInt64: *restrictions |= kNoDiv | kNoReduction | kNoSAD; return TrySetVectorLength(2); - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: *restrictions |= kNoMinMax | kNoReduction; // min/max(x, NaN) return TrySetVectorLength(4); - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: *restrictions |= kNoMinMax | kNoReduction; // min/max(x, NaN) return TrySetVectorLength(2); default: @@ -1410,24 +1411,24 @@ bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restric case kMips64: if (features->AsMips64InstructionSetFeatures()->HasMsa()) { switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: + case DataType::Type::kBool: + case DataType::Type::kInt8: *restrictions |= kNoDiv | kNoReduction | kNoSAD; return TrySetVectorLength(16); - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kUint16: + case DataType::Type::kInt16: *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoSAD; return TrySetVectorLength(8); - case Primitive::kPrimInt: + case DataType::Type::kInt32: *restrictions |= kNoDiv | kNoReduction | kNoSAD; return TrySetVectorLength(4); - case Primitive::kPrimLong: + case DataType::Type::kInt64: *restrictions |= kNoDiv | kNoReduction | kNoSAD; return TrySetVectorLength(2); - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: *restrictions |= kNoMinMax | kNoReduction; // min/max(x, NaN) return TrySetVectorLength(4); - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: *restrictions |= kNoMinMax | kNoReduction; // min/max(x, NaN) return TrySetVectorLength(2); default: @@ -1452,7 +1453,7 @@ bool HLoopOptimization::TrySetVectorLength(uint32_t length) { return vector_length_ == length; } -void HLoopOptimization::GenerateVecInv(HInstruction* org, Primitive::Type type) { +void HLoopOptimization::GenerateVecInv(HInstruction* org, DataType::Type type) { if (vector_map_->find(org) == vector_map_->end()) { // In scalar code, just use a self pass-through for scalar invariants // (viz. expression remains itself). @@ -1468,9 +1469,9 @@ void HLoopOptimization::GenerateVecInv(HInstruction* org, Primitive::Type type) } else { // Generates ReplicateScalar( (optional_type_conv) org ). HInstruction* input = org; - Primitive::Type input_type = input->GetType(); - if (type != input_type && (type == Primitive::kPrimLong || - input_type == Primitive::kPrimLong)) { + DataType::Type input_type = input->GetType(); + if (type != input_type && (type == DataType::Type::kInt64 || + input_type == DataType::Type::kInt64)) { input = Insert(vector_preheader_, new (global_allocator_) HTypeConversion(type, input, kNoDexPc)); } @@ -1487,7 +1488,7 @@ void HLoopOptimization::GenerateVecSub(HInstruction* org, HInstruction* offset) HInstruction* subscript = vector_index_; int64_t value = 0; if (!IsInt64AndGet(offset, &value) || value != 0) { - subscript = new (global_allocator_) HAdd(Primitive::kPrimInt, subscript, offset); + subscript = new (global_allocator_) HAdd(DataType::Type::kInt32, subscript, offset); if (org->IsPhi()) { Insert(vector_body_, subscript); // lacks layout placeholder } @@ -1500,7 +1501,7 @@ void HLoopOptimization::GenerateVecMem(HInstruction* org, HInstruction* opa, HInstruction* opb, HInstruction* offset, - Primitive::Type type) { + DataType::Type type) { HInstruction* vector = nullptr; if (vector_mode_ == kVector) { // Vector store or load. @@ -1570,7 +1571,7 @@ void HLoopOptimization::GenerateVecReductionPhiInputs(HPhi* phi, HInstruction* r // Generate a [initial, 0, .., 0] vector. HVecOperation* red_vector = new_red->AsVecOperation(); size_t vector_length = red_vector->GetVectorLength(); - Primitive::Type type = red_vector->GetPackedType(); + DataType::Type type = red_vector->GetPackedType(); new_init = Insert(vector_preheader_, new (global_allocator_) HVecSetScalars(global_allocator_, &new_init, @@ -1594,7 +1595,7 @@ HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruct if (input->IsVecOperation()) { HVecOperation* input_vector = input->AsVecOperation(); size_t vector_length = input_vector->GetVectorLength(); - Primitive::Type type = input_vector->GetPackedType(); + DataType::Type type = input_vector->GetPackedType(); HVecReduce::ReductionKind kind = GetReductionKind(input_vector); HBasicBlock* exit = instruction->GetBlock()->GetSuccessors()[0]; // Generate a vector reduction and scalar extract @@ -1624,10 +1625,10 @@ HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruct void HLoopOptimization::GenerateVecOp(HInstruction* org, HInstruction* opa, HInstruction* opb, - Primitive::Type type, + DataType::Type type, bool is_unsigned) { HInstruction* vector = nullptr; - Primitive::Type org_type = org->GetType(); + DataType::Type org_type = org->GetType(); switch (org->GetKind()) { case HInstruction::kNeg: DCHECK(opb == nullptr); @@ -1779,7 +1780,7 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org, bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node, HInstruction* instruction, bool generate_code, - Primitive::Type type, + DataType::Type type, uint64_t restrictions) { // Test for top level arithmetic shift right x >> 1 or logical shift right x >>> 1 // (note whether the sign bit in wider precision is shifted in has no effect @@ -1853,12 +1854,12 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node, bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node, HInstruction* instruction, bool generate_code, - Primitive::Type reduction_type, + DataType::Type reduction_type, uint64_t restrictions) { // Filter integral "q += ABS(a - b);" reduction, where ABS and SUB // are done in the same precision (either int or long). if (!instruction->IsAdd() || - (reduction_type != Primitive::kPrimInt && reduction_type != Primitive::kPrimLong)) { + (reduction_type != DataType::Type::kInt32 && reduction_type != DataType::Type::kInt64)) { return false; } HInstruction* q = instruction->InputAt(0); @@ -1882,7 +1883,7 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node, HInstruction* r = a; HInstruction* s = b; bool is_unsigned = false; - Primitive::Type sub_type = a->GetType(); + DataType::Type sub_type = a->GetType(); if (a->IsTypeConversion()) { sub_type = a->InputAt(0)->GetType(); } else if (b->IsTypeConversion()) { diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h index ae2ea76f47..6e6e3873f9 100644 --- a/compiler/optimizing/loop_optimization.h +++ b/compiler/optimizing/loop_optimization.h @@ -91,7 +91,7 @@ class HLoopOptimization : public HOptimization { * Representation of a unit-stride array reference. */ struct ArrayReference { - ArrayReference(HInstruction* b, HInstruction* o, Primitive::Type t, bool l) + ArrayReference(HInstruction* b, HInstruction* o, DataType::Type t, bool l) : base(b), offset(o), type(t), lhs(l) { } bool operator<(const ArrayReference& other) const { return @@ -103,7 +103,7 @@ class HLoopOptimization : public HOptimization { } HInstruction* base; // base address HInstruction* offset; // offset + i - Primitive::Type type; // component type + DataType::Type type; // component type bool lhs; // def/use }; @@ -147,36 +147,36 @@ class HLoopOptimization : public HOptimization { bool VectorizeUse(LoopNode* node, HInstruction* instruction, bool generate_code, - Primitive::Type type, + DataType::Type type, uint64_t restrictions); - bool TrySetVectorType(Primitive::Type type, /*out*/ uint64_t* restrictions); + bool TrySetVectorType(DataType::Type type, /*out*/ uint64_t* restrictions); bool TrySetVectorLength(uint32_t length); - void GenerateVecInv(HInstruction* org, Primitive::Type type); + void GenerateVecInv(HInstruction* org, DataType::Type type); void GenerateVecSub(HInstruction* org, HInstruction* offset); void GenerateVecMem(HInstruction* org, HInstruction* opa, HInstruction* opb, HInstruction* offset, - Primitive::Type type); + DataType::Type type); void GenerateVecReductionPhi(HPhi* phi); void GenerateVecReductionPhiInputs(HPhi* phi, HInstruction* reduction); HInstruction* ReduceAndExtractIfNeeded(HInstruction* instruction); void GenerateVecOp(HInstruction* org, HInstruction* opa, HInstruction* opb, - Primitive::Type type, + DataType::Type type, bool is_unsigned = false); // Vectorization idioms. bool VectorizeHalvingAddIdiom(LoopNode* node, HInstruction* instruction, bool generate_code, - Primitive::Type type, + DataType::Type type, uint64_t restrictions); bool VectorizeSADIdiom(LoopNode* node, HInstruction* instruction, bool generate_code, - Primitive::Type type, + DataType::Type type, uint64_t restrictions); // Vectorization heuristics. diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc index 1c5603d00f..95718ae388 100644 --- a/compiler/optimizing/loop_optimization_test.cc +++ b/compiler/optimizing/loop_optimization_test.cc @@ -51,7 +51,7 @@ class LoopOptimizationTest : public CommonCompilerTest { parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimInt); + DataType::Type::kInt32); entry_block_->AddInstruction(parameter_); return_block_->AddInstruction(new (&allocator_) HReturnVoid()); exit_block_->AddInstruction(new (&allocator_) HExit()); @@ -216,8 +216,8 @@ TEST_F(LoopOptimizationTest, SimplifyLoop) { header->AddInstruction(new (&allocator_) HIf(parameter_)); body->AddInstruction(new (&allocator_) HGoto()); - HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt); - HInstruction* add = new (&allocator_) HAdd(Primitive::kPrimInt, phi, parameter_); + HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); + HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, parameter_); header->AddPhi(phi); body->AddInstruction(add); diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 9cff6b005b..41ea998a8c 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -564,7 +564,7 @@ HCurrentMethod* HGraph::GetCurrentMethod() { // id and/or any invariants the graph is assuming when adding new instructions. if ((cached_current_method_ == nullptr) || (cached_current_method_->GetBlock() == nullptr)) { cached_current_method_ = new (arena_) HCurrentMethod( - Is64BitInstructionSet(instruction_set_) ? Primitive::kPrimLong : Primitive::kPrimInt, + Is64BitInstructionSet(instruction_set_) ? DataType::Type::kInt64 : DataType::Type::kInt32, entry_block_->GetDexPc()); if (entry_block_->GetFirstInstruction() == nullptr) { entry_block_->AddInstruction(cached_current_method_); @@ -585,19 +585,19 @@ std::string HGraph::PrettyMethod(bool with_signature) const { return dex_file_.PrettyMethod(method_idx_, with_signature); } -HConstant* HGraph::GetConstant(Primitive::Type type, int64_t value, uint32_t dex_pc) { +HConstant* HGraph::GetConstant(DataType::Type type, int64_t value, uint32_t dex_pc) { switch (type) { - case Primitive::Type::kPrimBoolean: + case DataType::Type::kBool: DCHECK(IsUint<1>(value)); FALLTHROUGH_INTENDED; - case Primitive::Type::kPrimByte: - case Primitive::Type::kPrimChar: - case Primitive::Type::kPrimShort: - case Primitive::Type::kPrimInt: - DCHECK(IsInt(Primitive::ComponentSize(type) * kBitsPerByte, value)); + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + DCHECK(IsInt(DataType::Size(type) * kBitsPerByte, value)); return GetIntConstant(static_cast<int32_t>(value), dex_pc); - case Primitive::Type::kPrimLong: + case DataType::Type::kInt64: return GetLongConstant(value, dex_pc); default: @@ -838,9 +838,9 @@ void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial, // We can only replace a control flow instruction with another control flow instruction. DCHECK(replacement->IsControlFlow()); DCHECK_EQ(replacement->GetId(), -1); - DCHECK_EQ(replacement->GetType(), Primitive::kPrimVoid); + DCHECK_EQ(replacement->GetType(), DataType::Type::kVoid); DCHECK_EQ(initial->GetBlock(), this); - DCHECK_EQ(initial->GetType(), Primitive::kPrimVoid); + DCHECK_EQ(initial->GetType(), DataType::Type::kVoid); DCHECK(initial->GetUses().empty()); DCHECK(initial->GetEnvUses().empty()); replacement->SetBlock(this); @@ -1219,7 +1219,7 @@ void HVariableInputSizeInstruction::RemoveAllInputs() { size_t HConstructorFence::RemoveConstructorFences(HInstruction* instruction) { DCHECK(instruction->GetBlock() != nullptr); // Removing constructor fences only makes sense for instructions with an object return type. - DCHECK_EQ(Primitive::kPrimNot, instruction->GetType()); + DCHECK_EQ(DataType::Type::kReference, instruction->GetType()); // Return how many instructions were removed for statistic purposes. size_t remove_count = 0; @@ -1382,11 +1382,11 @@ HConstant* HTypeConversion::TryStaticEvaluation() const { if (GetInput()->IsIntConstant()) { int32_t value = GetInput()->AsIntConstant()->GetValue(); switch (GetResultType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: return graph->GetLongConstant(static_cast<int64_t>(value), GetDexPc()); - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: return graph->GetFloatConstant(static_cast<float>(value), GetDexPc()); - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: return graph->GetDoubleConstant(static_cast<double>(value), GetDexPc()); default: return nullptr; @@ -1394,11 +1394,11 @@ HConstant* HTypeConversion::TryStaticEvaluation() const { } else if (GetInput()->IsLongConstant()) { int64_t value = GetInput()->AsLongConstant()->GetValue(); switch (GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: return graph->GetIntConstant(static_cast<int32_t>(value), GetDexPc()); - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: return graph->GetFloatConstant(static_cast<float>(value), GetDexPc()); - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: return graph->GetDoubleConstant(static_cast<double>(value), GetDexPc()); default: return nullptr; @@ -1406,7 +1406,7 @@ HConstant* HTypeConversion::TryStaticEvaluation() const { } else if (GetInput()->IsFloatConstant()) { float value = GetInput()->AsFloatConstant()->GetValue(); switch (GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: if (std::isnan(value)) return graph->GetIntConstant(0, GetDexPc()); if (value >= kPrimIntMax) @@ -1414,7 +1414,7 @@ HConstant* HTypeConversion::TryStaticEvaluation() const { if (value <= kPrimIntMin) return graph->GetIntConstant(kPrimIntMin, GetDexPc()); return graph->GetIntConstant(static_cast<int32_t>(value), GetDexPc()); - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (std::isnan(value)) return graph->GetLongConstant(0, GetDexPc()); if (value >= kPrimLongMax) @@ -1422,7 +1422,7 @@ HConstant* HTypeConversion::TryStaticEvaluation() const { if (value <= kPrimLongMin) return graph->GetLongConstant(kPrimLongMin, GetDexPc()); return graph->GetLongConstant(static_cast<int64_t>(value), GetDexPc()); - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: return graph->GetDoubleConstant(static_cast<double>(value), GetDexPc()); default: return nullptr; @@ -1430,7 +1430,7 @@ HConstant* HTypeConversion::TryStaticEvaluation() const { } else if (GetInput()->IsDoubleConstant()) { double value = GetInput()->AsDoubleConstant()->GetValue(); switch (GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: if (std::isnan(value)) return graph->GetIntConstant(0, GetDexPc()); if (value >= kPrimIntMax) @@ -1438,7 +1438,7 @@ HConstant* HTypeConversion::TryStaticEvaluation() const { if (value <= kPrimLongMin) return graph->GetIntConstant(kPrimIntMin, GetDexPc()); return graph->GetIntConstant(static_cast<int32_t>(value), GetDexPc()); - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (std::isnan(value)) return graph->GetLongConstant(0, GetDexPc()); if (value >= kPrimLongMax) @@ -1446,7 +1446,7 @@ HConstant* HTypeConversion::TryStaticEvaluation() const { if (value <= kPrimLongMin) return graph->GetLongConstant(kPrimLongMin, GetDexPc()); return graph->GetLongConstant(static_cast<int64_t>(value), GetDexPc()); - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: return graph->GetFloatConstant(static_cast<float>(value), GetDexPc()); default: return nullptr; @@ -2604,7 +2604,7 @@ static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo uppe void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) { if (kIsDebugBuild) { - DCHECK_EQ(GetType(), Primitive::kPrimNot); + DCHECK_EQ(GetType(), DataType::Type::kReference); ScopedObjectAccess soa(Thread::Current()); DCHECK(rti.IsValid()) << "Invalid RTI for " << DebugName(); if (IsBoundType()) { @@ -2893,7 +2893,7 @@ HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* ArenaAllocator* allocator = GetArena(); if (cond->IsCondition() && - !Primitive::IsFloatingPointType(cond->InputAt(0)->GetType())) { + !DataType::IsFloatingPointType(cond->InputAt(0)->GetType())) { // Can't reverse floating point conditions. We have to use HBooleanNot in that case. HInstruction* lhs = cond->InputAt(0); HInstruction* rhs = cond->InputAt(1); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 6bc5111de2..c49cee3284 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -28,6 +28,7 @@ #include "base/iteration_range.h" #include "base/stl_util.h" #include "base/transform_array_ref.h" +#include "data_type.h" #include "deoptimization_kind.h" #include "dex_file.h" #include "dex_file_types.h" @@ -40,7 +41,6 @@ #include "method_reference.h" #include "mirror/class.h" #include "offsets.h" -#include "primitive.h" #include "utils/intrusive_forward_list.h" namespace art { @@ -511,7 +511,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { // Returns a constant of the given type and value. If it does not exist // already, it is created and inserted into the graph. This method is only for // integral types. - HConstant* GetConstant(Primitive::Type type, int64_t value, uint32_t dex_pc = kNoDexPc); + HConstant* GetConstant(DataType::Type type, int64_t value, uint32_t dex_pc = kNoDexPc); // TODO: This is problematic for the consistency of reference type propagation // because it can be created anytime after the pass and thus it will be left @@ -1567,7 +1567,7 @@ using HConstInputsRef = TransformArrayRef<const HUserRecord<HInstruction*>, HInp * The internal representation uses 38-bit and is described in the table below. * The first line indicates the side effect, and for field/array accesses the * second line indicates the type of the access (in the order of the - * Primitive::Type enum). + * DataType::Type enum). * The two numbered lines below indicate the bit position in the bitfield (read * vertically). * @@ -1616,23 +1616,23 @@ class SideEffects : public ValueObject { return SideEffects(kAllReads); } - static SideEffects FieldWriteOfType(Primitive::Type type, bool is_volatile) { + static SideEffects FieldWriteOfType(DataType::Type type, bool is_volatile) { return is_volatile ? AllWritesAndReads() : SideEffects(TypeFlag(type, kFieldWriteOffset)); } - static SideEffects ArrayWriteOfType(Primitive::Type type) { + static SideEffects ArrayWriteOfType(DataType::Type type) { return SideEffects(TypeFlag(type, kArrayWriteOffset)); } - static SideEffects FieldReadOfType(Primitive::Type type, bool is_volatile) { + static SideEffects FieldReadOfType(DataType::Type type, bool is_volatile) { return is_volatile ? AllWritesAndReads() : SideEffects(TypeFlag(type, kFieldReadOffset)); } - static SideEffects ArrayReadOfType(Primitive::Type type) { + static SideEffects ArrayReadOfType(DataType::Type type) { return SideEffects(TypeFlag(type, kArrayReadOffset)); } @@ -1761,13 +1761,13 @@ class SideEffects : public ValueObject { ((1ULL << (kLastBitForReads + 1 - kFieldReadOffset)) - 1) << kFieldReadOffset; // Translates type to bit flag. - static uint64_t TypeFlag(Primitive::Type type, int offset) { - CHECK_NE(type, Primitive::kPrimVoid); + static uint64_t TypeFlag(DataType::Type type, int offset) { + CHECK_NE(type, DataType::Type::kVoid); const uint64_t one = 1; - const int shift = type; // 0-based consecutive enum + const int shift = static_cast<int>(type); // 0-based consecutive enum DCHECK_LE(kFieldWriteOffset, shift); DCHECK_LT(shift, kArrayWriteOffset); - return one << (type + offset); + return one << (shift + offset); } // Private constructor on direct flags value. @@ -1956,7 +1956,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { virtual void Accept(HGraphVisitor* visitor) = 0; virtual const char* DebugName() const = 0; - virtual Primitive::Type GetType() const { return Primitive::kPrimVoid; } + virtual DataType::Type GetType() const { return DataType::Type::kVoid; } virtual bool NeedsEnvironment() const { return false; } @@ -1977,7 +1977,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { // simplifies the null check elimination. // TODO: Consider merging can_be_null into ReferenceTypeInfo. virtual bool CanBeNull() const { - DCHECK_EQ(GetType(), Primitive::kPrimNot) << "CanBeNull only applies to reference types"; + DCHECK_EQ(GetType(), DataType::Type::kReference) << "CanBeNull only applies to reference types"; return true; } @@ -1986,13 +1986,13 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { } virtual bool IsActualObject() const { - return GetType() == Primitive::kPrimNot; + return GetType() == DataType::Type::kReference; } void SetReferenceTypeInfo(ReferenceTypeInfo rti); ReferenceTypeInfo GetReferenceTypeInfo() const { - DCHECK_EQ(GetType(), Primitive::kPrimNot); + DCHECK_EQ(GetType(), DataType::Type::kReference); return ReferenceTypeInfo::CreateUnchecked(reference_type_handle_, GetPackedFlag<kFlagReferenceTypeIsExact>()); } @@ -2505,24 +2505,24 @@ class HTemplateInstruction<0>: public HInstruction { template<intptr_t N> class HExpression : public HTemplateInstruction<N> { public: - HExpression<N>(Primitive::Type type, SideEffects side_effects, uint32_t dex_pc) + HExpression<N>(DataType::Type type, SideEffects side_effects, uint32_t dex_pc) : HTemplateInstruction<N>(side_effects, dex_pc) { this->template SetPackedField<TypeField>(type); } virtual ~HExpression() {} - Primitive::Type GetType() const OVERRIDE { + DataType::Type GetType() const OVERRIDE { return TypeField::Decode(this->GetPackedFields()); } protected: static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldTypeSize = - MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast)); + MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast)); static constexpr size_t kNumberOfExpressionPackedBits = kFieldType + kFieldTypeSize; static_assert(kNumberOfExpressionPackedBits <= HInstruction::kMaxNumberOfPackedBits, "Too many packed fields."); - using TypeField = BitField<Primitive::Type, kFieldType, kFieldTypeSize>; + using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>; }; // Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow @@ -2562,7 +2562,7 @@ class HPhi FINAL : public HVariableInputSizeInstruction { HPhi(ArenaAllocator* arena, uint32_t reg_number, size_t number_of_inputs, - Primitive::Type type, + DataType::Type type, uint32_t dex_pc = kNoDexPc) : HVariableInputSizeInstruction( SideEffects::None(), @@ -2572,7 +2572,7 @@ class HPhi FINAL : public HVariableInputSizeInstruction { kArenaAllocPhiInputs), reg_number_(reg_number) { SetPackedField<TypeField>(ToPhiType(type)); - DCHECK_NE(GetType(), Primitive::kPrimVoid); + DCHECK_NE(GetType(), DataType::Type::kVoid); // Phis are constructed live and marked dead if conflicting or unused. // Individual steps of SsaBuilder should assume that if a phi has been // marked dead, it can be ignored and will be removed by SsaPhiElimination. @@ -2581,21 +2581,21 @@ class HPhi FINAL : public HVariableInputSizeInstruction { } // Returns a type equivalent to the given `type`, but that a `HPhi` can hold. - static Primitive::Type ToPhiType(Primitive::Type type) { - return Primitive::PrimitiveKind(type); + static DataType::Type ToPhiType(DataType::Type type) { + return DataType::Kind(type); } bool IsCatchPhi() const { return GetBlock()->IsCatchBlock(); } - Primitive::Type GetType() const OVERRIDE { return GetPackedField<TypeField>(); } - void SetType(Primitive::Type new_type) { + DataType::Type GetType() const OVERRIDE { return GetPackedField<TypeField>(); } + void SetType(DataType::Type new_type) { // Make sure that only valid type changes occur. The following are allowed: // (1) int -> float/ref (primitive type propagation), // (2) long -> double (primitive type propagation). DCHECK(GetType() == new_type || - (GetType() == Primitive::kPrimInt && new_type == Primitive::kPrimFloat) || - (GetType() == Primitive::kPrimInt && new_type == Primitive::kPrimNot) || - (GetType() == Primitive::kPrimLong && new_type == Primitive::kPrimDouble)); + (GetType() == DataType::Type::kInt32 && new_type == DataType::Type::kFloat32) || + (GetType() == DataType::Type::kInt32 && new_type == DataType::Type::kReference) || + (GetType() == DataType::Type::kInt64 && new_type == DataType::Type::kFloat64)); SetPackedField<TypeField>(new_type); } @@ -2645,12 +2645,12 @@ class HPhi FINAL : public HVariableInputSizeInstruction { private: static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldTypeSize = - MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast)); + MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast)); static constexpr size_t kFlagIsLive = kFieldType + kFieldTypeSize; static constexpr size_t kFlagCanBeNull = kFlagIsLive + 1; static constexpr size_t kNumberOfPhiPackedBits = kFlagCanBeNull + 1; static_assert(kNumberOfPhiPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); - using TypeField = BitField<Primitive::Type, kFieldType, kFieldTypeSize>; + using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>; const uint32_t reg_number_; @@ -2691,7 +2691,7 @@ class HGoto FINAL : public HTemplateInstruction<0> { class HConstant : public HExpression<0> { public: - explicit HConstant(Primitive::Type type, uint32_t dex_pc = kNoDexPc) + explicit HConstant(DataType::Type type, uint32_t dex_pc = kNoDexPc) : HExpression(type, SideEffects::None(), dex_pc) {} bool CanBeMoved() const OVERRIDE { return true; } @@ -2729,7 +2729,8 @@ class HNullConstant FINAL : public HConstant { DECLARE_INSTRUCTION(NullConstant); private: - explicit HNullConstant(uint32_t dex_pc = kNoDexPc) : HConstant(Primitive::kPrimNot, dex_pc) {} + explicit HNullConstant(uint32_t dex_pc = kNoDexPc) + : HConstant(DataType::Type::kReference, dex_pc) {} friend class HGraph; DISALLOW_COPY_AND_ASSIGN(HNullConstant); @@ -2766,9 +2767,9 @@ class HIntConstant FINAL : public HConstant { private: explicit HIntConstant(int32_t value, uint32_t dex_pc = kNoDexPc) - : HConstant(Primitive::kPrimInt, dex_pc), value_(value) {} + : HConstant(DataType::Type::kInt32, dex_pc), value_(value) {} explicit HIntConstant(bool value, uint32_t dex_pc = kNoDexPc) - : HConstant(Primitive::kPrimInt, dex_pc), value_(value ? 1 : 0) {} + : HConstant(DataType::Type::kInt32, dex_pc), value_(value ? 1 : 0) {} const int32_t value_; @@ -2800,7 +2801,7 @@ class HLongConstant FINAL : public HConstant { private: explicit HLongConstant(int64_t value, uint32_t dex_pc = kNoDexPc) - : HConstant(Primitive::kPrimLong, dex_pc), value_(value) {} + : HConstant(DataType::Type::kInt64, dex_pc), value_(value) {} const int64_t value_; @@ -2849,9 +2850,9 @@ class HFloatConstant FINAL : public HConstant { private: explicit HFloatConstant(float value, uint32_t dex_pc = kNoDexPc) - : HConstant(Primitive::kPrimFloat, dex_pc), value_(value) {} + : HConstant(DataType::Type::kFloat32, dex_pc), value_(value) {} explicit HFloatConstant(int32_t value, uint32_t dex_pc = kNoDexPc) - : HConstant(Primitive::kPrimFloat, dex_pc), value_(bit_cast<float, int32_t>(value)) {} + : HConstant(DataType::Type::kFloat32, dex_pc), value_(bit_cast<float, int32_t>(value)) {} const float value_; @@ -2900,9 +2901,9 @@ class HDoubleConstant FINAL : public HConstant { private: explicit HDoubleConstant(double value, uint32_t dex_pc = kNoDexPc) - : HConstant(Primitive::kPrimDouble, dex_pc), value_(value) {} + : HConstant(DataType::Type::kFloat64, dex_pc), value_(value) {} explicit HDoubleConstant(int64_t value, uint32_t dex_pc = kNoDexPc) - : HConstant(Primitive::kPrimDouble, dex_pc), value_(bit_cast<double, int64_t>(value)) {} + : HConstant(DataType::Type::kFloat64, dex_pc), value_(bit_cast<double, int64_t>(value)) {} const double value_; @@ -3051,8 +3052,8 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { DeoptimizationKind GetDeoptimizationKind() const { return GetPackedField<DeoptimizeKindField>(); } - Primitive::Type GetType() const OVERRIDE { - return GuardsAnInput() ? GuardedInput()->GetType() : Primitive::kPrimVoid; + DataType::Type GetType() const OVERRIDE { + return GuardsAnInput() ? GuardedInput()->GetType() : DataType::Type::kVoid; } bool GuardsAnInput() const { @@ -3098,7 +3099,7 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction { : HVariableInputSizeInstruction(SideEffects::None(), dex_pc, arena, 0, kArenaAllocCHA) { } - Primitive::Type GetType() const OVERRIDE { return Primitive::kPrimInt; } + DataType::Type GetType() const OVERRIDE { return DataType::Type::kInt32; } // We do all CHA guard elimination/motion in a single pass, after which there is no // further guard elimination/motion since a guard might have been used for justification @@ -3117,7 +3118,7 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction { // instructions that work with the dex cache. class HCurrentMethod FINAL : public HExpression<0> { public: - explicit HCurrentMethod(Primitive::Type type, uint32_t dex_pc = kNoDexPc) + explicit HCurrentMethod(DataType::Type type, uint32_t dex_pc = kNoDexPc) : HExpression(type, SideEffects::None(), dex_pc) {} DECLARE_INSTRUCTION(CurrentMethod); @@ -3136,7 +3137,7 @@ class HClassTableGet FINAL : public HExpression<1> { kLast = kIMTable }; HClassTableGet(HInstruction* cls, - Primitive::Type type, + DataType::Type type, TableKind kind, size_t index, uint32_t dex_pc) @@ -3208,13 +3209,13 @@ class HPackedSwitch FINAL : public HTemplateInstruction<1> { class HUnaryOperation : public HExpression<1> { public: - HUnaryOperation(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) + HUnaryOperation(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) : HExpression(result_type, SideEffects::None(), dex_pc) { SetRawInputAt(0, input); } HInstruction* GetInput() const { return InputAt(0); } - Primitive::Type GetResultType() const { return GetType(); } + DataType::Type GetResultType() const { return GetType(); } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { @@ -3240,7 +3241,7 @@ class HUnaryOperation : public HExpression<1> { class HBinaryOperation : public HExpression<2> { public: - HBinaryOperation(Primitive::Type result_type, + HBinaryOperation(DataType::Type result_type, HInstruction* left, HInstruction* right, SideEffects side_effects = SideEffects::None(), @@ -3252,7 +3253,7 @@ class HBinaryOperation : public HExpression<2> { HInstruction* GetLeft() const { return InputAt(0); } HInstruction* GetRight() const { return InputAt(1); } - Primitive::Type GetResultType() const { return GetType(); } + DataType::Type GetResultType() const { return GetType(); } virtual bool IsCommutative() const { return false; } @@ -3342,7 +3343,7 @@ std::ostream& operator<<(std::ostream& os, const ComparisonBias& rhs); class HCondition : public HBinaryOperation { public: HCondition(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) - : HBinaryOperation(Primitive::kPrimBoolean, first, second, SideEffects::None(), dex_pc) { + : HBinaryOperation(DataType::Type::kBool, first, second, SideEffects::None(), dex_pc) { SetPackedField<ComparisonBiasField>(ComparisonBias::kNoBias); } @@ -3367,7 +3368,7 @@ class HCondition : public HBinaryOperation { } bool IsFPConditionTrueIfNaN() const { - DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); + DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); IfCondition if_cond = GetCondition(); if (if_cond == kCondNE) { return true; @@ -3378,7 +3379,7 @@ class HCondition : public HBinaryOperation { } bool IsFPConditionFalseIfNaN() const { - DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); + DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); IfCondition if_cond = GetCondition(); if (if_cond == kCondEQ) { return true; @@ -3404,7 +3405,7 @@ class HCondition : public HBinaryOperation { template <typename T> int32_t CompareFP(T x, T y) const { - DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); + DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); DCHECK_NE(GetBias(), ComparisonBias::kNoBias); // Handle the bias. return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compare(x, y); @@ -3821,20 +3822,20 @@ class HCompare FINAL : public HBinaryOperation { public: // Note that `comparison_type` is the type of comparison performed // between the comparison's inputs, not the type of the instantiated - // HCompare instruction (which is always Primitive::kPrimInt). - HCompare(Primitive::Type comparison_type, + // HCompare instruction (which is always DataType::Type::kInt). + HCompare(DataType::Type comparison_type, HInstruction* first, HInstruction* second, ComparisonBias bias, uint32_t dex_pc) - : HBinaryOperation(Primitive::kPrimInt, + : HBinaryOperation(DataType::Type::kInt32, first, second, SideEffectsForArchRuntimeCalls(comparison_type), dex_pc) { SetPackedField<ComparisonBiasField>(bias); - DCHECK_EQ(comparison_type, Primitive::PrimitiveKind(first->GetType())); - DCHECK_EQ(comparison_type, Primitive::PrimitiveKind(second->GetType())); + DCHECK_EQ(comparison_type, DataType::Kind(first->GetType())); + DCHECK_EQ(comparison_type, DataType::Kind(second->GetType())); } template <typename T> @@ -3842,7 +3843,7 @@ class HCompare FINAL : public HBinaryOperation { template <typename T> int32_t ComputeFP(T x, T y) const { - DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); + DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); DCHECK_NE(GetBias(), ComparisonBias::kNoBias); // Handle the bias. return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compute(x, y); @@ -3875,11 +3876,11 @@ class HCompare FINAL : public HBinaryOperation { // Does this compare instruction have a "gt bias" (vs an "lt bias")? // Only meaningful for floating-point comparisons. bool IsGtBias() const { - DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); + DCHECK(DataType::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); return GetBias() == ComparisonBias::kGtBias; } - static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type type ATTRIBUTE_UNUSED) { + static SideEffects SideEffectsForArchRuntimeCalls(DataType::Type type ATTRIBUTE_UNUSED) { // Comparisons do not require a runtime call in any back end. return SideEffects::None(); } @@ -3914,7 +3915,7 @@ class HNewInstance FINAL : public HExpression<1> { const DexFile& dex_file, bool finalizable, QuickEntrypointEnum entrypoint) - : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc), + : HExpression(DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc), type_index_(type_index), dex_file_(dex_file), entrypoint_(entrypoint) { @@ -4002,7 +4003,7 @@ class HInvoke : public HVariableInputSizeInstruction { // inputs at the end of their list of inputs. uint32_t GetNumberOfArguments() const { return number_of_arguments_; } - Primitive::Type GetType() const OVERRIDE { return GetPackedField<ReturnTypeField>(); } + DataType::Type GetType() const OVERRIDE { return GetPackedField<ReturnTypeField>(); } uint32_t GetDexMethodIndex() const { return dex_method_index_; } @@ -4055,17 +4056,17 @@ class HInvoke : public HVariableInputSizeInstruction { static constexpr size_t kFieldReturnType = kFieldInvokeType + kFieldInvokeTypeSize; static constexpr size_t kFieldReturnTypeSize = - MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast)); + MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast)); static constexpr size_t kFlagCanThrow = kFieldReturnType + kFieldReturnTypeSize; static constexpr size_t kNumberOfInvokePackedBits = kFlagCanThrow + 1; static_assert(kNumberOfInvokePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); using InvokeTypeField = BitField<InvokeType, kFieldInvokeType, kFieldInvokeTypeSize>; - using ReturnTypeField = BitField<Primitive::Type, kFieldReturnType, kFieldReturnTypeSize>; + using ReturnTypeField = BitField<DataType::Type, kFieldReturnType, kFieldReturnTypeSize>; HInvoke(ArenaAllocator* arena, uint32_t number_of_arguments, uint32_t number_of_other_inputs, - Primitive::Type return_type, + DataType::Type return_type, uint32_t dex_pc, uint32_t dex_method_index, ArtMethod* resolved_method, @@ -4102,7 +4103,7 @@ class HInvokeUnresolved FINAL : public HInvoke { public: HInvokeUnresolved(ArenaAllocator* arena, uint32_t number_of_arguments, - Primitive::Type return_type, + DataType::Type return_type, uint32_t dex_pc, uint32_t dex_method_index, InvokeType invoke_type) @@ -4126,7 +4127,7 @@ class HInvokePolymorphic FINAL : public HInvoke { public: HInvokePolymorphic(ArenaAllocator* arena, uint32_t number_of_arguments, - Primitive::Type return_type, + DataType::Type return_type, uint32_t dex_pc, uint32_t dex_method_index) : HInvoke(arena, @@ -4203,7 +4204,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { HInvokeStaticOrDirect(ArenaAllocator* arena, uint32_t number_of_arguments, - Primitive::Type return_type, + DataType::Type return_type, uint32_t dex_pc, uint32_t method_index, ArtMethod* resolved_method, @@ -4281,7 +4282,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { } bool CanBeNull() const OVERRIDE { - return GetPackedField<ReturnTypeField>() == Primitive::kPrimNot && !IsStringInit(); + return GetPackedField<ReturnTypeField>() == DataType::Type::kReference && !IsStringInit(); } // Get the index of the special input, if any. @@ -4398,7 +4399,7 @@ class HInvokeVirtual FINAL : public HInvoke { public: HInvokeVirtual(ArenaAllocator* arena, uint32_t number_of_arguments, - Primitive::Type return_type, + DataType::Type return_type, uint32_t dex_pc, uint32_t dex_method_index, ArtMethod* resolved_method, @@ -4446,7 +4447,7 @@ class HInvokeInterface FINAL : public HInvoke { public: HInvokeInterface(ArenaAllocator* arena, uint32_t number_of_arguments, - Primitive::Type return_type, + DataType::Type return_type, uint32_t dex_pc, uint32_t dex_method_index, ArtMethod* resolved_method, @@ -4485,9 +4486,9 @@ class HInvokeInterface FINAL : public HInvoke { class HNeg FINAL : public HUnaryOperation { public: - HNeg(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) + HNeg(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) : HUnaryOperation(result_type, input, dex_pc) { - DCHECK_EQ(result_type, Primitive::PrimitiveKind(input->GetType())); + DCHECK_EQ(result_type, DataType::Kind(input->GetType())); } template <typename T> static T Compute(T x) { return -x; } @@ -4514,7 +4515,7 @@ class HNeg FINAL : public HUnaryOperation { class HNewArray FINAL : public HExpression<2> { public: HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc) - : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc) { + : HExpression(DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) { SetRawInputAt(0, cls); SetRawInputAt(1, length); } @@ -4544,7 +4545,7 @@ class HNewArray FINAL : public HExpression<2> { class HAdd FINAL : public HBinaryOperation { public: - HAdd(Primitive::Type result_type, + HAdd(DataType::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc = kNoDexPc) @@ -4579,7 +4580,7 @@ class HAdd FINAL : public HBinaryOperation { class HSub FINAL : public HBinaryOperation { public: - HSub(Primitive::Type result_type, + HSub(DataType::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc = kNoDexPc) @@ -4612,7 +4613,7 @@ class HSub FINAL : public HBinaryOperation { class HMul FINAL : public HBinaryOperation { public: - HMul(Primitive::Type result_type, + HMul(DataType::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc = kNoDexPc) @@ -4647,7 +4648,7 @@ class HMul FINAL : public HBinaryOperation { class HDiv FINAL : public HBinaryOperation { public: - HDiv(Primitive::Type result_type, + HDiv(DataType::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc) @@ -4655,7 +4656,7 @@ class HDiv FINAL : public HBinaryOperation { template <typename T> T ComputeIntegral(T x, T y) const { - DCHECK(!Primitive::IsFloatingPointType(GetType())) << GetType(); + DCHECK(!DataType::IsFloatingPointType(GetType())) << GetType(); // Our graph structure ensures we never have 0 for `y` during // constant folding. DCHECK_NE(y, 0); @@ -4665,7 +4666,7 @@ class HDiv FINAL : public HBinaryOperation { template <typename T> T ComputeFP(T x, T y) const { - DCHECK(Primitive::IsFloatingPointType(GetType())) << GetType(); + DCHECK(DataType::IsFloatingPointType(GetType())) << GetType(); return x / y; } @@ -4694,7 +4695,7 @@ class HDiv FINAL : public HBinaryOperation { class HRem FINAL : public HBinaryOperation { public: - HRem(Primitive::Type result_type, + HRem(DataType::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc) @@ -4702,7 +4703,7 @@ class HRem FINAL : public HBinaryOperation { template <typename T> T ComputeIntegral(T x, T y) const { - DCHECK(!Primitive::IsFloatingPointType(GetType())) << GetType(); + DCHECK(!DataType::IsFloatingPointType(GetType())) << GetType(); // Our graph structure ensures we never have 0 for `y` during // constant folding. DCHECK_NE(y, 0); @@ -4712,7 +4713,7 @@ class HRem FINAL : public HBinaryOperation { template <typename T> T ComputeFP(T x, T y) const { - DCHECK(Primitive::IsFloatingPointType(GetType())) << GetType(); + DCHECK(DataType::IsFloatingPointType(GetType())) << GetType(); return std::fmod(x, y); } @@ -4748,7 +4749,7 @@ class HDivZeroCheck FINAL : public HExpression<1> { SetRawInputAt(0, value); } - Primitive::Type GetType() const OVERRIDE { return InputAt(0)->GetType(); } + DataType::Type GetType() const OVERRIDE { return InputAt(0)->GetType(); } bool CanBeMoved() const OVERRIDE { return true; } @@ -4767,13 +4768,13 @@ class HDivZeroCheck FINAL : public HExpression<1> { class HShl FINAL : public HBinaryOperation { public: - HShl(Primitive::Type result_type, + HShl(DataType::Type result_type, HInstruction* value, HInstruction* distance, uint32_t dex_pc = kNoDexPc) : HBinaryOperation(result_type, value, distance, SideEffects::None(), dex_pc) { - DCHECK_EQ(result_type, Primitive::PrimitiveKind(value->GetType())); - DCHECK_EQ(Primitive::kPrimInt, Primitive::PrimitiveKind(distance->GetType())); + DCHECK_EQ(result_type, DataType::Kind(value->GetType())); + DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(distance->GetType())); } template <typename T> @@ -4813,13 +4814,13 @@ class HShl FINAL : public HBinaryOperation { class HShr FINAL : public HBinaryOperation { public: - HShr(Primitive::Type result_type, + HShr(DataType::Type result_type, HInstruction* value, HInstruction* distance, uint32_t dex_pc = kNoDexPc) : HBinaryOperation(result_type, value, distance, SideEffects::None(), dex_pc) { - DCHECK_EQ(result_type, Primitive::PrimitiveKind(value->GetType())); - DCHECK_EQ(Primitive::kPrimInt, Primitive::PrimitiveKind(distance->GetType())); + DCHECK_EQ(result_type, DataType::Kind(value->GetType())); + DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(distance->GetType())); } template <typename T> @@ -4859,13 +4860,13 @@ class HShr FINAL : public HBinaryOperation { class HUShr FINAL : public HBinaryOperation { public: - HUShr(Primitive::Type result_type, + HUShr(DataType::Type result_type, HInstruction* value, HInstruction* distance, uint32_t dex_pc = kNoDexPc) : HBinaryOperation(result_type, value, distance, SideEffects::None(), dex_pc) { - DCHECK_EQ(result_type, Primitive::PrimitiveKind(value->GetType())); - DCHECK_EQ(Primitive::kPrimInt, Primitive::PrimitiveKind(distance->GetType())); + DCHECK_EQ(result_type, DataType::Kind(value->GetType())); + DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(distance->GetType())); } template <typename T> @@ -4907,7 +4908,7 @@ class HUShr FINAL : public HBinaryOperation { class HAnd FINAL : public HBinaryOperation { public: - HAnd(Primitive::Type result_type, + HAnd(DataType::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc = kNoDexPc) @@ -4944,7 +4945,7 @@ class HAnd FINAL : public HBinaryOperation { class HOr FINAL : public HBinaryOperation { public: - HOr(Primitive::Type result_type, + HOr(DataType::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc = kNoDexPc) @@ -4981,7 +4982,7 @@ class HOr FINAL : public HBinaryOperation { class HXor FINAL : public HBinaryOperation { public: - HXor(Primitive::Type result_type, + HXor(DataType::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc = kNoDexPc) @@ -5018,10 +5019,10 @@ class HXor FINAL : public HBinaryOperation { class HRor FINAL : public HBinaryOperation { public: - HRor(Primitive::Type result_type, HInstruction* value, HInstruction* distance) + HRor(DataType::Type result_type, HInstruction* value, HInstruction* distance) : HBinaryOperation(result_type, value, distance) { - DCHECK_EQ(result_type, Primitive::PrimitiveKind(value->GetType())); - DCHECK_EQ(Primitive::kPrimInt, Primitive::PrimitiveKind(distance->GetType())); + DCHECK_EQ(result_type, DataType::Kind(value->GetType())); + DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(distance->GetType())); } template <typename T> @@ -5074,7 +5075,7 @@ class HParameterValue FINAL : public HExpression<0> { HParameterValue(const DexFile& dex_file, dex::TypeIndex type_index, uint8_t index, - Primitive::Type parameter_type, + DataType::Type parameter_type, bool is_this = false) : HExpression(parameter_type, SideEffects::None(), kNoDexPc), dex_file_(dex_file), @@ -5113,7 +5114,7 @@ class HParameterValue FINAL : public HExpression<0> { class HNot FINAL : public HUnaryOperation { public: - HNot(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) + HNot(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) : HUnaryOperation(result_type, input, dex_pc) {} bool CanBeMoved() const OVERRIDE { return true; } @@ -5147,7 +5148,7 @@ class HNot FINAL : public HUnaryOperation { class HBooleanNot FINAL : public HUnaryOperation { public: explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc) - : HUnaryOperation(Primitive::Type::kPrimBoolean, input, dex_pc) {} + : HUnaryOperation(DataType::Type::kBool, input, dex_pc) {} bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { @@ -5184,16 +5185,16 @@ class HBooleanNot FINAL : public HUnaryOperation { class HTypeConversion FINAL : public HExpression<1> { public: // Instantiate a type conversion of `input` to `result_type`. - HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc) + HTypeConversion(DataType::Type result_type, HInstruction* input, uint32_t dex_pc) : HExpression(result_type, SideEffects::None(), dex_pc) { SetRawInputAt(0, input); // Invariant: We should never generate a conversion to a Boolean value. - DCHECK_NE(Primitive::kPrimBoolean, result_type); + DCHECK_NE(DataType::Type::kBool, result_type); } HInstruction* GetInput() const { return InputAt(0); } - Primitive::Type GetInputType() const { return GetInput()->GetType(); } - Primitive::Type GetResultType() const { return GetType(); } + DataType::Type GetInputType() const { return GetInput()->GetType(); } + DataType::Type GetResultType() const { return GetType(); } bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { @@ -5245,7 +5246,7 @@ class FieldInfo : public ValueObject { public: FieldInfo(ArtField* field, MemberOffset field_offset, - Primitive::Type field_type, + DataType::Type field_type, bool is_volatile, uint32_t index, uint16_t declaring_class_def_index, @@ -5260,7 +5261,7 @@ class FieldInfo : public ValueObject { ArtField* GetField() const { return field_; } MemberOffset GetFieldOffset() const { return field_offset_; } - Primitive::Type GetFieldType() const { return field_type_; } + DataType::Type GetFieldType() const { return field_type_; } uint32_t GetFieldIndex() const { return index_; } uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;} const DexFile& GetDexFile() const { return dex_file_; } @@ -5269,7 +5270,7 @@ class FieldInfo : public ValueObject { private: ArtField* const field_; const MemberOffset field_offset_; - const Primitive::Type field_type_; + const DataType::Type field_type_; const bool is_volatile_; const uint32_t index_; const uint16_t declaring_class_def_index_; @@ -5280,7 +5281,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> { public: HInstanceFieldGet(HInstruction* value, ArtField* field, - Primitive::Type field_type, + DataType::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, @@ -5315,7 +5316,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> { const FieldInfo& GetFieldInfo() const { return field_info_; } MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); } - Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); } + DataType::Type GetFieldType() const { return field_info_.GetFieldType(); } bool IsVolatile() const { return field_info_.IsVolatile(); } DECLARE_INSTRUCTION(InstanceFieldGet); @@ -5331,7 +5332,7 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { HInstanceFieldSet(HInstruction* object, HInstruction* value, ArtField* field, - Primitive::Type field_type, + DataType::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, @@ -5357,7 +5358,7 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { const FieldInfo& GetFieldInfo() const { return field_info_; } MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); } - Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); } + DataType::Type GetFieldType() const { return field_info_.GetFieldType(); } bool IsVolatile() const { return field_info_.IsVolatile(); } HInstruction* GetValue() const { return InputAt(1); } bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); } @@ -5380,7 +5381,7 @@ class HArrayGet FINAL : public HExpression<2> { public: HArrayGet(HInstruction* array, HInstruction* index, - Primitive::Type type, + DataType::Type type, uint32_t dex_pc, bool is_string_char_at = false) : HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) { @@ -5414,11 +5415,11 @@ class HArrayGet FINAL : public HExpression<2> { DCHECK_EQ(GetBlock(), other->GetBlock()); DCHECK_EQ(GetArray(), other->GetArray()); DCHECK_EQ(GetIndex(), other->GetIndex()); - if (Primitive::IsIntOrLongType(GetType())) { - DCHECK(Primitive::IsFloatingPointType(other->GetType())) << other->GetType(); + if (DataType::IsIntOrLongType(GetType())) { + DCHECK(DataType::IsFloatingPointType(other->GetType())) << other->GetType(); } else { - DCHECK(Primitive::IsFloatingPointType(GetType())) << GetType(); - DCHECK(Primitive::IsIntOrLongType(other->GetType())) << other->GetType(); + DCHECK(DataType::IsFloatingPointType(GetType())) << GetType(); + DCHECK(DataType::IsIntOrLongType(other->GetType())) << other->GetType(); } } return result; @@ -5450,11 +5451,11 @@ class HArraySet FINAL : public HTemplateInstruction<3> { HArraySet(HInstruction* array, HInstruction* index, HInstruction* value, - Primitive::Type expected_component_type, + DataType::Type expected_component_type, uint32_t dex_pc) : HTemplateInstruction(SideEffects::None(), dex_pc) { SetPackedField<ExpectedComponentTypeField>(expected_component_type); - SetPackedFlag<kFlagNeedsTypeCheck>(value->GetType() == Primitive::kPrimNot); + SetPackedFlag<kFlagNeedsTypeCheck>(value->GetType() == DataType::Type::kReference); SetPackedFlag<kFlagValueCanBeNull>(true); SetPackedFlag<kFlagStaticTypeOfArrayIsObjectArray>(false); SetRawInputAt(0, array); @@ -5499,29 +5500,30 @@ class HArraySet FINAL : public HTemplateInstruction<3> { HInstruction* GetIndex() const { return InputAt(1); } HInstruction* GetValue() const { return InputAt(2); } - Primitive::Type GetComponentType() const { + DataType::Type GetComponentType() const { // The Dex format does not type floating point index operations. Since the // `expected_component_type_` is set during building and can therefore not // be correct, we also check what is the value type. If it is a floating // point type, we must use that type. - Primitive::Type value_type = GetValue()->GetType(); - return ((value_type == Primitive::kPrimFloat) || (value_type == Primitive::kPrimDouble)) + DataType::Type value_type = GetValue()->GetType(); + return ((value_type == DataType::Type::kFloat32) || (value_type == DataType::Type::kFloat64)) ? value_type : GetRawExpectedComponentType(); } - Primitive::Type GetRawExpectedComponentType() const { + DataType::Type GetRawExpectedComponentType() const { return GetPackedField<ExpectedComponentTypeField>(); } void ComputeSideEffects() { - Primitive::Type type = GetComponentType(); + DataType::Type type = GetComponentType(); SetSideEffects(SideEffects::ArrayWriteOfType(type).Union( SideEffectsForArchRuntimeCalls(type))); } - static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type value_type) { - return (value_type == Primitive::kPrimNot) ? SideEffects::CanTriggerGC() : SideEffects::None(); + static SideEffects SideEffectsForArchRuntimeCalls(DataType::Type value_type) { + return (value_type == DataType::Type::kReference) ? SideEffects::CanTriggerGC() + : SideEffects::None(); } DECLARE_INSTRUCTION(ArraySet); @@ -5529,7 +5531,7 @@ class HArraySet FINAL : public HTemplateInstruction<3> { private: static constexpr size_t kFieldExpectedComponentType = kNumberOfGenericPackedBits; static constexpr size_t kFieldExpectedComponentTypeSize = - MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast)); + MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast)); static constexpr size_t kFlagNeedsTypeCheck = kFieldExpectedComponentType + kFieldExpectedComponentTypeSize; static constexpr size_t kFlagValueCanBeNull = kFlagNeedsTypeCheck + 1; @@ -5540,7 +5542,7 @@ class HArraySet FINAL : public HTemplateInstruction<3> { kFlagStaticTypeOfArrayIsObjectArray + 1; static_assert(kNumberOfArraySetPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); using ExpectedComponentTypeField = - BitField<Primitive::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>; + BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>; DISALLOW_COPY_AND_ASSIGN(HArraySet); }; @@ -5548,7 +5550,7 @@ class HArraySet FINAL : public HTemplateInstruction<3> { class HArrayLength FINAL : public HExpression<1> { public: HArrayLength(HInstruction* array, uint32_t dex_pc, bool is_string_length = false) - : HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) { + : HExpression(DataType::Type::kInt32, SideEffects::None(), dex_pc) { SetPackedFlag<kFlagIsStringLength>(is_string_length); // Note that arrays do not change length, so the instruction does not // depend on any write. @@ -5590,7 +5592,7 @@ class HBoundsCheck FINAL : public HExpression<2> { uint32_t dex_pc, bool string_char_at = false) : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc) { - DCHECK_EQ(Primitive::kPrimInt, Primitive::PrimitiveKind(index->GetType())); + DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(index->GetType())); SetPackedFlag<kFlagIsStringCharAt>(string_char_at); SetRawInputAt(0, index); SetRawInputAt(1, length); @@ -5800,8 +5802,8 @@ class HLoadClass FINAL : public HInstruction { &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u); } - Primitive::Type GetType() const OVERRIDE { - return Primitive::kPrimNot; + DataType::Type GetType() const OVERRIDE { + return DataType::Type::kReference; } Handle<mirror::Class> GetClass() const { @@ -5968,8 +5970,8 @@ class HLoadString FINAL : public HInstruction { &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u); } - Primitive::Type GetType() const OVERRIDE { - return Primitive::kPrimNot; + DataType::Type GetType() const OVERRIDE { + return DataType::Type::kReference; } DECLARE_INSTRUCTION(LoadString); @@ -6020,7 +6022,7 @@ class HClinitCheck FINAL : public HExpression<1> { public: HClinitCheck(HLoadClass* constant, uint32_t dex_pc) : HExpression( - Primitive::kPrimNot, + DataType::Type::kReference, SideEffects::AllChanges(), // Assume write/read on all fields/arrays. dex_pc) { SetRawInputAt(0, constant); @@ -6053,7 +6055,7 @@ class HStaticFieldGet FINAL : public HExpression<1> { public: HStaticFieldGet(HInstruction* cls, ArtField* field, - Primitive::Type field_type, + DataType::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, @@ -6085,7 +6087,7 @@ class HStaticFieldGet FINAL : public HExpression<1> { const FieldInfo& GetFieldInfo() const { return field_info_; } MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); } - Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); } + DataType::Type GetFieldType() const { return field_info_.GetFieldType(); } bool IsVolatile() const { return field_info_.IsVolatile(); } DECLARE_INSTRUCTION(StaticFieldGet); @@ -6101,7 +6103,7 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { HStaticFieldSet(HInstruction* cls, HInstruction* value, ArtField* field, - Primitive::Type field_type, + DataType::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, @@ -6123,7 +6125,7 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { const FieldInfo& GetFieldInfo() const { return field_info_; } MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); } - Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); } + DataType::Type GetFieldType() const { return field_info_.GetFieldType(); } bool IsVolatile() const { return field_info_.IsVolatile(); } HInstruction* GetValue() const { return InputAt(1); } @@ -6146,7 +6148,7 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> { public: HUnresolvedInstanceFieldGet(HInstruction* obj, - Primitive::Type field_type, + DataType::Type field_type, uint32_t field_index, uint32_t dex_pc) : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc), @@ -6157,7 +6159,7 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> { bool NeedsEnvironment() const OVERRIDE { return true; } bool CanThrow() const OVERRIDE { return true; } - Primitive::Type GetFieldType() const { return GetType(); } + DataType::Type GetFieldType() const { return GetType(); } uint32_t GetFieldIndex() const { return field_index_; } DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet); @@ -6172,13 +6174,13 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> { public: HUnresolvedInstanceFieldSet(HInstruction* obj, HInstruction* value, - Primitive::Type field_type, + DataType::Type field_type, uint32_t field_index, uint32_t dex_pc) : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc), field_index_(field_index) { SetPackedField<FieldTypeField>(field_type); - DCHECK_EQ(Primitive::PrimitiveKind(field_type), Primitive::PrimitiveKind(value->GetType())); + DCHECK_EQ(DataType::Kind(field_type), DataType::Kind(value->GetType())); SetRawInputAt(0, obj); SetRawInputAt(1, value); } @@ -6186,7 +6188,7 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> { bool NeedsEnvironment() const OVERRIDE { return true; } bool CanThrow() const OVERRIDE { return true; } - Primitive::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); } + DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); } uint32_t GetFieldIndex() const { return field_index_; } DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet); @@ -6194,12 +6196,12 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> { private: static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldFieldTypeSize = - MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast)); + MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast)); static constexpr size_t kNumberOfUnresolvedStaticFieldSetPackedBits = kFieldFieldType + kFieldFieldTypeSize; static_assert(kNumberOfUnresolvedStaticFieldSetPackedBits <= HInstruction::kMaxNumberOfPackedBits, "Too many packed fields."); - using FieldTypeField = BitField<Primitive::Type, kFieldFieldType, kFieldFieldTypeSize>; + using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>; const uint32_t field_index_; @@ -6208,7 +6210,7 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> { class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { public: - HUnresolvedStaticFieldGet(Primitive::Type field_type, + HUnresolvedStaticFieldGet(DataType::Type field_type, uint32_t field_index, uint32_t dex_pc) : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc), @@ -6218,7 +6220,7 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { bool NeedsEnvironment() const OVERRIDE { return true; } bool CanThrow() const OVERRIDE { return true; } - Primitive::Type GetFieldType() const { return GetType(); } + DataType::Type GetFieldType() const { return GetType(); } uint32_t GetFieldIndex() const { return field_index_; } DECLARE_INSTRUCTION(UnresolvedStaticFieldGet); @@ -6232,20 +6234,20 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> { public: HUnresolvedStaticFieldSet(HInstruction* value, - Primitive::Type field_type, + DataType::Type field_type, uint32_t field_index, uint32_t dex_pc) : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc), field_index_(field_index) { SetPackedField<FieldTypeField>(field_type); - DCHECK_EQ(Primitive::PrimitiveKind(field_type), Primitive::PrimitiveKind(value->GetType())); + DCHECK_EQ(DataType::Kind(field_type), DataType::Kind(value->GetType())); SetRawInputAt(0, value); } bool NeedsEnvironment() const OVERRIDE { return true; } bool CanThrow() const OVERRIDE { return true; } - Primitive::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); } + DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); } uint32_t GetFieldIndex() const { return field_index_; } DECLARE_INSTRUCTION(UnresolvedStaticFieldSet); @@ -6253,12 +6255,12 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> { private: static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldFieldTypeSize = - MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast)); + MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast)); static constexpr size_t kNumberOfUnresolvedStaticFieldSetPackedBits = kFieldFieldType + kFieldFieldTypeSize; static_assert(kNumberOfUnresolvedStaticFieldSetPackedBits <= HInstruction::kMaxNumberOfPackedBits, "Too many packed fields."); - using FieldTypeField = BitField<Primitive::Type, kFieldFieldType, kFieldFieldTypeSize>; + using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>; const uint32_t field_index_; @@ -6269,7 +6271,7 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> { class HLoadException FINAL : public HExpression<0> { public: explicit HLoadException(uint32_t dex_pc = kNoDexPc) - : HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc) {} + : HExpression(DataType::Type::kReference, SideEffects::None(), dex_pc) {} bool CanBeNull() const OVERRIDE { return false; } @@ -6335,7 +6337,7 @@ class HInstanceOf FINAL : public HExpression<2> { HLoadClass* constant, TypeCheckKind check_kind, uint32_t dex_pc) - : HExpression(Primitive::kPrimBoolean, + : HExpression(DataType::Type::kBool, SideEffectsForArchRuntimeCalls(check_kind), dex_pc) { SetPackedField<TypeCheckKindField>(check_kind); @@ -6386,11 +6388,11 @@ class HInstanceOf FINAL : public HExpression<2> { class HBoundType FINAL : public HExpression<1> { public: explicit HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc) - : HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc), + : HExpression(DataType::Type::kReference, SideEffects::None(), dex_pc), upper_bound_(ReferenceTypeInfo::CreateInvalid()) { SetPackedFlag<kFlagUpperCanBeNull>(true); SetPackedFlag<kFlagCanBeNull>(true); - DCHECK_EQ(input->GetType(), Primitive::kPrimNot); + DCHECK_EQ(input->GetType(), DataType::Type::kReference); SetRawInputAt(0, input); } @@ -6761,7 +6763,7 @@ class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> { public: MoveOperands(Location source, Location destination, - Primitive::Type type, + DataType::Type type, HInstruction* instruction) : source_(source), destination_(destination), type_(type), instruction_(instruction) {} @@ -6811,10 +6813,10 @@ class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> { return source_.IsInvalid(); } - Primitive::Type GetType() const { return type_; } + DataType::Type GetType() const { return type_; } bool Is64BitMove() const { - return Primitive::Is64BitType(type_); + return DataType::Is64BitType(type_); } HInstruction* GetInstruction() const { return instruction_; } @@ -6823,7 +6825,7 @@ class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> { Location source_; Location destination_; // The type this move is for. - Primitive::Type type_; + DataType::Type type_; // The instruction this move is assocatied with. Null when this move is // for moving an input in the expected locations of user (including a phi user). // This is only used in debug mode, to ensure we do not connect interval siblings @@ -6845,7 +6847,7 @@ class HParallelMove FINAL : public HTemplateInstruction<0> { void AddMove(Location source, Location destination, - Primitive::Type type, + DataType::Type type, HInstruction* instruction) { DCHECK(source.IsValid()); DCHECK(destination.IsValid()); diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h index 8e439d9621..80e652eaa7 100644 --- a/compiler/optimizing/nodes_mips.h +++ b/compiler/optimizing/nodes_mips.h @@ -24,7 +24,7 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> { public: // Treat the value as an int32_t, but it is really a 32 bit native pointer. HMipsComputeBaseMethodAddress() - : HExpression(Primitive::kPrimInt, SideEffects::None(), kNoDexPc) {} + : HExpression(DataType::Type::kInt32, SideEffects::None(), kNoDexPc) {} bool CanBeMoved() const OVERRIDE { return true; } diff --git a/compiler/optimizing/nodes_shared.cc b/compiler/optimizing/nodes_shared.cc index f6d33f015f..f982523634 100644 --- a/compiler/optimizing/nodes_shared.cc +++ b/compiler/optimizing/nodes_shared.cc @@ -42,20 +42,20 @@ void HDataProcWithShifterOp::GetOpInfoFromInstruction(HInstruction* instruction, *shift_amount = instruction->AsUShr()->GetRight()->AsIntConstant()->GetValue(); } else { DCHECK(instruction->IsTypeConversion()); - Primitive::Type result_type = instruction->AsTypeConversion()->GetResultType(); - Primitive::Type input_type = instruction->AsTypeConversion()->GetInputType(); - int result_size = Primitive::ComponentSize(result_type); - int input_size = Primitive::ComponentSize(input_type); + DataType::Type result_type = instruction->AsTypeConversion()->GetResultType(); + DataType::Type input_type = instruction->AsTypeConversion()->GetInputType(); + int result_size = DataType::Size(result_type); + int input_size = DataType::Size(input_type); int min_size = std::min(result_size, input_size); - if (result_type == Primitive::kPrimInt && input_type == Primitive::kPrimLong) { + if (result_type == DataType::Type::kInt32 && input_type == DataType::Type::kInt64) { // There is actually nothing to do. On ARM the high register from the // pair will be ignored. On ARM64 the register will be used as a W // register, discarding the top bits. This is represented by the // default encoding 'LSL 0'. *op_kind = kLSL; *shift_amount = 0; - } else if (result_type == Primitive::kPrimChar || - (input_type == Primitive::kPrimChar && input_size < result_size)) { + } else if (result_type == DataType::Type::kUint16 || + (input_type == DataType::Type::kUint16 && input_size < result_size)) { *op_kind = kUXTH; } else { switch (min_size) { diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h index 075a816f3f..14cbf85c3f 100644 --- a/compiler/optimizing/nodes_shared.h +++ b/compiler/optimizing/nodes_shared.h @@ -26,7 +26,7 @@ namespace art { class HMultiplyAccumulate FINAL : public HExpression<3> { public: - HMultiplyAccumulate(Primitive::Type type, + HMultiplyAccumulate(DataType::Type type, InstructionKind op, HInstruction* accumulator, HInstruction* mul_left, @@ -60,11 +60,11 @@ class HMultiplyAccumulate FINAL : public HExpression<3> { class HBitwiseNegatedRight FINAL : public HBinaryOperation { public: - HBitwiseNegatedRight(Primitive::Type result_type, - InstructionKind op, - HInstruction* left, - HInstruction* right, - uint32_t dex_pc = kNoDexPc) + HBitwiseNegatedRight(DataType::Type result_type, + InstructionKind op, + HInstruction* left, + HInstruction* right, + uint32_t dex_pc = kNoDexPc) : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc), op_kind_(op) { DCHECK(op == HInstruction::kAnd || op == HInstruction::kOr || op == HInstruction::kXor) << op; @@ -122,14 +122,14 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation { // This instruction computes an intermediate address pointing in the 'middle' of an object. The // result pointer cannot be handled by GC, so extra care is taken to make sure that this value is // never used across anything that can trigger GC. -// The result of this instruction is not a pointer in the sense of `Primitive::kPrimNot`. So we -// represent it by the type `Primitive::kPrimInt`. +// The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`. +// So we represent it by the type `DataType::Type::kInt`. class HIntermediateAddress FINAL : public HExpression<2> { public: HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc) - : HExpression(Primitive::kPrimInt, SideEffects::DependsOnGC(), dex_pc) { - DCHECK_EQ(Primitive::ComponentSize(Primitive::kPrimInt), - Primitive::ComponentSize(Primitive::kPrimNot)) + : HExpression(DataType::Type::kInt32, SideEffects::DependsOnGC(), dex_pc) { + DCHECK_EQ(DataType::Size(DataType::Type::kInt32), + DataType::Size(DataType::Type::kReference)) << "kPrimInt and kPrimNot have different sizes."; SetRawInputAt(0, base_address); SetRawInputAt(1, offset); @@ -171,7 +171,7 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> { public: HIntermediateAddressIndex( HInstruction* index, HInstruction* offset, HInstruction* shift, uint32_t dex_pc) - : HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) { + : HExpression(DataType::Type::kInt32, SideEffects::None(), dex_pc) { SetRawInputAt(0, index); SetRawInputAt(1, offset); SetRawInputAt(2, shift); @@ -222,7 +222,7 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> { uint32_t dex_pc = kNoDexPc) : HExpression(instr->GetType(), SideEffects::None(), dex_pc), instr_kind_(instr->GetKind()), op_kind_(op), - shift_amount_(shift & (instr->GetType() == Primitive::kPrimInt + shift_amount_(shift & (instr->GetType() == DataType::Type::kInt32 ? kMaxIntShiftDistance : kMaxLongShiftDistance)) { DCHECK(!instr->HasSideEffects()); diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc index f3a78a064e..ada6177bfb 100644 --- a/compiler/optimizing/nodes_test.cc +++ b/compiler/optimizing/nodes_test.cc @@ -36,7 +36,7 @@ TEST(Node, RemoveInstruction) { graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (&allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); entry->AddInstruction(new (&allocator) HGoto()); @@ -79,9 +79,9 @@ TEST(Node, InsertInstruction) { graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter1 = new (&allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); HInstruction* parameter2 = new (&allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter1); entry->AddInstruction(parameter2); entry->AddInstruction(new (&allocator) HExit()); @@ -107,7 +107,7 @@ TEST(Node, AddInstruction) { graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (&allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); ASSERT_FALSE(parameter->HasUses()); @@ -128,7 +128,7 @@ TEST(Node, ParentEnvironment) { graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter1 = new (&allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); HInstruction* with_environment = new (&allocator) HNullCheck(parameter1, 0); entry->AddInstruction(parameter1); entry->AddInstruction(with_environment); diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h index 1488b7086a..0aac260839 100644 --- a/compiler/optimizing/nodes_vector.h +++ b/compiler/optimizing/nodes_vector.h @@ -65,10 +65,10 @@ class HVecOperation : public HVariableInputSizeInstruction { public: // A SIMD operation looks like a FPU location. // TODO: we could introduce SIMD types in HIR. - static constexpr Primitive::Type kSIMDType = Primitive::kPrimDouble; + static constexpr DataType::Type kSIMDType = DataType::Type::kFloat64; HVecOperation(ArenaAllocator* arena, - Primitive::Type packed_type, + DataType::Type packed_type, SideEffects side_effects, size_t number_of_inputs, size_t vector_length, @@ -90,16 +90,16 @@ class HVecOperation : public HVariableInputSizeInstruction { // Returns the number of bytes in a full vector. size_t GetVectorNumberOfBytes() const { - return vector_length_ * Primitive::ComponentSize(GetPackedType()); + return vector_length_ * DataType::Size(GetPackedType()); } // Returns the type of the vector operation. - Primitive::Type GetType() const OVERRIDE { + DataType::Type GetType() const OVERRIDE { return kSIMDType; } // Returns the true component type packed in a vector. - Primitive::Type GetPackedType() const { + DataType::Type GetPackedType() const { return GetPackedField<TypeField>(); } @@ -122,10 +122,10 @@ class HVecOperation : public HVariableInputSizeInstruction { // Additional packed bits. static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits; static constexpr size_t kFieldTypeSize = - MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast)); + MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast)); static constexpr size_t kNumberOfVectorOpPackedBits = kFieldType + kFieldTypeSize; static_assert(kNumberOfVectorOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); - using TypeField = BitField<Primitive::Type, kFieldType, kFieldTypeSize>; + using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>; private: const size_t vector_length_; @@ -138,7 +138,7 @@ class HVecUnaryOperation : public HVecOperation { public: HVecUnaryOperation(ArenaAllocator* arena, HInstruction* input, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) : HVecOperation(arena, @@ -164,7 +164,7 @@ class HVecBinaryOperation : public HVecOperation { HVecBinaryOperation(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) : HVecOperation(arena, @@ -192,13 +192,13 @@ class HVecBinaryOperation : public HVecOperation { class HVecMemoryOperation : public HVecOperation { public: HVecMemoryOperation(ArenaAllocator* arena, - Primitive::Type packed_type, + DataType::Type packed_type, SideEffects side_effects, size_t number_of_inputs, size_t vector_length, uint32_t dex_pc) : HVecOperation(arena, packed_type, side_effects, number_of_inputs, vector_length, dex_pc), - alignment_(Primitive::ComponentSize(packed_type), 0) { + alignment_(DataType::Size(packed_type), 0) { DCHECK_GE(number_of_inputs, 2u); } @@ -224,21 +224,21 @@ class HVecMemoryOperation : public HVecOperation { }; // Packed type consistency checker ("same vector length" integral types may mix freely). -inline static bool HasConsistentPackedTypes(HInstruction* input, Primitive::Type type) { +inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type type) { if (input->IsPhi()) { return input->GetType() == HVecOperation::kSIMDType; // carries SIMD } DCHECK(input->IsVecOperation()); - Primitive::Type input_type = input->AsVecOperation()->GetPackedType(); + DataType::Type input_type = input->AsVecOperation()->GetPackedType(); switch (input_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - return type == Primitive::kPrimBoolean || - type == Primitive::kPrimByte; - case Primitive::kPrimChar: - case Primitive::kPrimShort: - return type == Primitive::kPrimChar || - type == Primitive::kPrimShort; + case DataType::Type::kBool: + case DataType::Type::kInt8: + return type == DataType::Type::kBool || + type == DataType::Type::kInt8; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + return type == DataType::Type::kUint16 || + type == DataType::Type::kInt16; default: return type == input_type; } @@ -254,7 +254,7 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation { public: HVecReplicateScalar(ArenaAllocator* arena, HInstruction* scalar, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecUnaryOperation(arena, scalar, packed_type, vector_length, dex_pc) { @@ -279,7 +279,7 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation { public: HVecExtractScalar(ArenaAllocator* arena, HInstruction* input, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, size_t index, uint32_t dex_pc = kNoDexPc) @@ -290,7 +290,7 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation { } // Yields a single component in the vector. - Primitive::Type GetType() const OVERRIDE { + DataType::Type GetType() const OVERRIDE { return GetPackedType(); } @@ -317,7 +317,7 @@ class HVecReduce FINAL : public HVecUnaryOperation { HVecReduce(ArenaAllocator* arena, HInstruction* input, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, ReductionKind kind, uint32_t dex_pc = kNoDexPc) @@ -350,7 +350,7 @@ class HVecCnv FINAL : public HVecUnaryOperation { public: HVecCnv(ArenaAllocator* arena, HInstruction* input, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) { @@ -358,8 +358,8 @@ class HVecCnv FINAL : public HVecUnaryOperation { DCHECK_NE(GetInputType(), GetResultType()); // actual convert } - Primitive::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); } - Primitive::Type GetResultType() const { return GetPackedType(); } + DataType::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); } + DataType::Type GetResultType() const { return GetPackedType(); } bool CanBeMoved() const OVERRIDE { return true; } @@ -375,7 +375,7 @@ class HVecNeg FINAL : public HVecUnaryOperation { public: HVecNeg(ArenaAllocator* arena, HInstruction* input, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) { @@ -396,7 +396,7 @@ class HVecAbs FINAL : public HVecUnaryOperation { public: HVecAbs(ArenaAllocator* arena, HInstruction* input, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) { @@ -418,7 +418,7 @@ class HVecNot FINAL : public HVecUnaryOperation { public: HVecNot(ArenaAllocator* arena, HInstruction* input, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) { @@ -444,7 +444,7 @@ class HVecAdd FINAL : public HVecBinaryOperation { HVecAdd(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -469,7 +469,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation { HVecHalvingAdd(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, bool is_unsigned, bool is_rounded, @@ -513,7 +513,7 @@ class HVecSub FINAL : public HVecBinaryOperation { HVecSub(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -536,7 +536,7 @@ class HVecMul FINAL : public HVecBinaryOperation { HVecMul(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -559,7 +559,7 @@ class HVecDiv FINAL : public HVecBinaryOperation { HVecDiv(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -582,7 +582,7 @@ class HVecMin FINAL : public HVecBinaryOperation { HVecMin(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, bool is_unsigned, uint32_t dex_pc = kNoDexPc) @@ -620,7 +620,7 @@ class HVecMax FINAL : public HVecBinaryOperation { HVecMax(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, bool is_unsigned, uint32_t dex_pc = kNoDexPc) @@ -658,7 +658,7 @@ class HVecAnd FINAL : public HVecBinaryOperation { HVecAnd(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -680,7 +680,7 @@ class HVecAndNot FINAL : public HVecBinaryOperation { HVecAndNot(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -702,7 +702,7 @@ class HVecOr FINAL : public HVecBinaryOperation { HVecOr(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -724,7 +724,7 @@ class HVecXor FINAL : public HVecBinaryOperation { HVecXor(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -746,7 +746,7 @@ class HVecShl FINAL : public HVecBinaryOperation { HVecShl(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -768,7 +768,7 @@ class HVecShr FINAL : public HVecBinaryOperation { HVecShr(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -790,7 +790,7 @@ class HVecUShr FINAL : public HVecBinaryOperation { HVecUShr(ArenaAllocator* arena, HInstruction* left, HInstruction* right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { @@ -816,7 +816,7 @@ class HVecSetScalars FINAL : public HVecOperation { public: HVecSetScalars(ArenaAllocator* arena, HInstruction* scalars[], - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, size_t number_of_scalars, uint32_t dex_pc = kNoDexPc) @@ -851,7 +851,7 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation { HInstruction* accumulator, HInstruction* mul_left, HInstruction* mul_right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecOperation(arena, @@ -900,7 +900,7 @@ class HVecSADAccumulate FINAL : public HVecOperation { HInstruction* accumulator, HInstruction* sad_left, HInstruction* sad_right, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecOperation(arena, @@ -932,7 +932,7 @@ class HVecLoad FINAL : public HVecMemoryOperation { HVecLoad(ArenaAllocator* arena, HInstruction* base, HInstruction* index, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, bool is_string_char_at, uint32_t dex_pc = kNoDexPc) @@ -976,7 +976,7 @@ class HVecStore FINAL : public HVecMemoryOperation { HInstruction* base, HInstruction* index, HInstruction* value, - Primitive::Type packed_type, + DataType::Type packed_type, size_t vector_length, uint32_t dex_pc = kNoDexPc) : HVecMemoryOperation(arena, diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc index 5a56a2c210..3acdb20b32 100644 --- a/compiler/optimizing/nodes_vector_test.cc +++ b/compiler/optimizing/nodes_vector_test.cc @@ -45,7 +45,7 @@ class NodesVectorTest : public CommonCompilerTest { parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimInt); + DataType::Type::kInt32); entry_block_->AddInstruction(parameter_); } @@ -119,15 +119,15 @@ TEST(NodesVector, AlignmentString) { TEST_F(NodesVectorTest, VectorOperationProperties) { HVecOperation* v0 = new (&allocator_) - HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4); + HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4); HVecOperation* v1 = new (&allocator_) - HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4); + HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4); HVecOperation* v2 = new (&allocator_) - HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 2); + HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 2); HVecOperation* v3 = new (&allocator_) - HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimShort, 4); + HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt16, 4); HVecOperation* v4 = new (&allocator_) - HVecStore(&allocator_, parameter_, parameter_, v0, Primitive::kPrimInt, 4); + HVecStore(&allocator_, parameter_, parameter_, v0, DataType::Type::kInt32, 4); EXPECT_TRUE(v0->Equals(v0)); EXPECT_TRUE(v1->Equals(v1)); @@ -149,17 +149,17 @@ TEST_F(NodesVectorTest, VectorOperationProperties) { EXPECT_EQ(4u, v3->GetVectorLength()); EXPECT_EQ(4u, v4->GetVectorLength()); - EXPECT_EQ(Primitive::kPrimDouble, v0->GetType()); - EXPECT_EQ(Primitive::kPrimDouble, v1->GetType()); - EXPECT_EQ(Primitive::kPrimDouble, v2->GetType()); - EXPECT_EQ(Primitive::kPrimDouble, v3->GetType()); - EXPECT_EQ(Primitive::kPrimDouble, v4->GetType()); + EXPECT_EQ(DataType::Type::kFloat64, v0->GetType()); + EXPECT_EQ(DataType::Type::kFloat64, v1->GetType()); + EXPECT_EQ(DataType::Type::kFloat64, v2->GetType()); + EXPECT_EQ(DataType::Type::kFloat64, v3->GetType()); + EXPECT_EQ(DataType::Type::kFloat64, v4->GetType()); - EXPECT_EQ(Primitive::kPrimInt, v0->GetPackedType()); - EXPECT_EQ(Primitive::kPrimInt, v1->GetPackedType()); - EXPECT_EQ(Primitive::kPrimInt, v2->GetPackedType()); - EXPECT_EQ(Primitive::kPrimShort, v3->GetPackedType()); - EXPECT_EQ(Primitive::kPrimInt, v4->GetPackedType()); + EXPECT_EQ(DataType::Type::kInt32, v0->GetPackedType()); + EXPECT_EQ(DataType::Type::kInt32, v1->GetPackedType()); + EXPECT_EQ(DataType::Type::kInt32, v2->GetPackedType()); + EXPECT_EQ(DataType::Type::kInt16, v3->GetPackedType()); + EXPECT_EQ(DataType::Type::kInt32, v4->GetPackedType()); EXPECT_EQ(16u, v0->GetVectorNumberOfBytes()); EXPECT_EQ(16u, v1->GetVectorNumberOfBytes()); @@ -175,12 +175,12 @@ TEST_F(NodesVectorTest, VectorOperationProperties) { } TEST_F(NodesVectorTest, VectorAlignmentAndStringCharAtMatterOnLoad) { - HVecLoad* v0 = new (&allocator_) - HVecLoad(&allocator_, parameter_, parameter_, Primitive::kPrimInt, 4, /*is_string_char_at*/ false); - HVecLoad* v1 = new (&allocator_) - HVecLoad(&allocator_, parameter_, parameter_, Primitive::kPrimInt, 4, /*is_string_char_at*/ false); - HVecLoad* v2 = new (&allocator_) - HVecLoad(&allocator_, parameter_, parameter_, Primitive::kPrimInt, 4, /*is_string_char_at*/ true); + HVecLoad* v0 = new (&allocator_) HVecLoad( + &allocator_, parameter_, parameter_, DataType::Type::kInt32, 4, /*is_string_char_at*/ false); + HVecLoad* v1 = new (&allocator_) HVecLoad( + &allocator_, parameter_, parameter_, DataType::Type::kInt32, 4, /*is_string_char_at*/ false); + HVecLoad* v2 = new (&allocator_) HVecLoad( + &allocator_, parameter_, parameter_, DataType::Type::kInt32, 4, /*is_string_char_at*/ true); EXPECT_TRUE(v0->CanBeMoved()); EXPECT_TRUE(v1->CanBeMoved()); @@ -210,14 +210,14 @@ TEST_F(NodesVectorTest, VectorAlignmentAndStringCharAtMatterOnLoad) { TEST_F(NodesVectorTest, VectorSignMattersOnMin) { HVecOperation* v0 = new (&allocator_) - HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4); + HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4); HVecMin* v1 = new (&allocator_) - HVecMin(&allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ true); + HVecMin(&allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ true); HVecMin* v2 = new (&allocator_) - HVecMin(&allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ false); + HVecMin(&allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ false); HVecMin* v3 = new (&allocator_) - HVecMin(&allocator_, v0, v0, Primitive::kPrimInt, 2, /*is_unsigned*/ true); + HVecMin(&allocator_, v0, v0, DataType::Type::kInt32, 2, /*is_unsigned*/ true); EXPECT_FALSE(v0->CanBeMoved()); EXPECT_TRUE(v1->CanBeMoved()); @@ -238,14 +238,14 @@ TEST_F(NodesVectorTest, VectorSignMattersOnMin) { TEST_F(NodesVectorTest, VectorSignMattersOnMax) { HVecOperation* v0 = new (&allocator_) - HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4); + HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4); HVecMax* v1 = new (&allocator_) - HVecMax(&allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ true); + HVecMax(&allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ true); HVecMax* v2 = new (&allocator_) - HVecMax(&allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ false); + HVecMax(&allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ false); HVecMax* v3 = new (&allocator_) - HVecMax(&allocator_, v0, v0, Primitive::kPrimInt, 2, /*is_unsigned*/ true); + HVecMax(&allocator_, v0, v0, DataType::Type::kInt32, 2, /*is_unsigned*/ true); EXPECT_FALSE(v0->CanBeMoved()); EXPECT_TRUE(v1->CanBeMoved()); @@ -266,18 +266,18 @@ TEST_F(NodesVectorTest, VectorSignMattersOnMax) { TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) { HVecOperation* v0 = new (&allocator_) - HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4); + HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4); HVecHalvingAdd* v1 = new (&allocator_) HVecHalvingAdd( - &allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ true, /*is_rounded*/ true); + &allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, /*is_rounded*/ true); HVecHalvingAdd* v2 = new (&allocator_) HVecHalvingAdd( - &allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ true, /*is_rounded*/ false); + &allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, /*is_rounded*/ false); HVecHalvingAdd* v3 = new (&allocator_) HVecHalvingAdd( - &allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ false, /*is_rounded*/ true); + &allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, /*is_rounded*/ true); HVecHalvingAdd* v4 = new (&allocator_) HVecHalvingAdd( - &allocator_, v0, v0, Primitive::kPrimInt, 4, /*is_unsigned*/ false, /*is_rounded*/ false); + &allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, /*is_rounded*/ false); HVecHalvingAdd* v5 = new (&allocator_) HVecHalvingAdd( - &allocator_, v0, v0, Primitive::kPrimInt, 2, /*is_unsigned*/ true, /*is_rounded*/ true); + &allocator_, v0, v0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, /*is_rounded*/ true); EXPECT_FALSE(v0->CanBeMoved()); EXPECT_TRUE(v1->CanBeMoved()); @@ -306,14 +306,14 @@ TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) { TEST_F(NodesVectorTest, VectorOperationMattersOnMultiplyAccumulate) { HVecOperation* v0 = new (&allocator_) - HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4); + HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4); - HVecMultiplyAccumulate* v1 = new (&allocator_) - HVecMultiplyAccumulate(&allocator_, HInstruction::kAdd, v0, v0, v0, Primitive::kPrimInt, 4); - HVecMultiplyAccumulate* v2 = new (&allocator_) - HVecMultiplyAccumulate(&allocator_, HInstruction::kSub, v0, v0, v0, Primitive::kPrimInt, 4); - HVecMultiplyAccumulate* v3 = new (&allocator_) - HVecMultiplyAccumulate(&allocator_, HInstruction::kAdd, v0, v0, v0, Primitive::kPrimInt, 2); + HVecMultiplyAccumulate* v1 = new (&allocator_) HVecMultiplyAccumulate( + &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 4); + HVecMultiplyAccumulate* v2 = new (&allocator_) HVecMultiplyAccumulate( + &allocator_, HInstruction::kSub, v0, v0, v0, DataType::Type::kInt32, 4); + HVecMultiplyAccumulate* v3 = new (&allocator_) HVecMultiplyAccumulate( + &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 2); EXPECT_FALSE(v0->CanBeMoved()); EXPECT_TRUE(v1->CanBeMoved()); @@ -334,14 +334,14 @@ TEST_F(NodesVectorTest, VectorOperationMattersOnMultiplyAccumulate) { TEST_F(NodesVectorTest, VectorKindMattersOnReduce) { HVecOperation* v0 = new (&allocator_) - HVecReplicateScalar(&allocator_, parameter_, Primitive::kPrimInt, 4); + HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4); HVecReduce* v1 = new (&allocator_) HVecReduce( - &allocator_, v0, Primitive::kPrimInt, 4, HVecReduce::kSum); + &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kSum); HVecReduce* v2 = new (&allocator_) HVecReduce( - &allocator_, v0, Primitive::kPrimInt, 4, HVecReduce::kMin); + &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMin); HVecReduce* v3 = new (&allocator_) HVecReduce( - &allocator_, v0, Primitive::kPrimInt, 4, HVecReduce::kMax); + &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMax); EXPECT_FALSE(v0->CanBeMoved()); EXPECT_TRUE(v1->CanBeMoved()); diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h index 75893c3129..22e92eab31 100644 --- a/compiler/optimizing/nodes_x86.h +++ b/compiler/optimizing/nodes_x86.h @@ -24,7 +24,7 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> { public: // Treat the value as an int32_t, but it is really a 32 bit native pointer. HX86ComputeBaseMethodAddress() - : HExpression(Primitive::kPrimInt, SideEffects::None(), kNoDexPc) {} + : HExpression(DataType::Type::kInt32, SideEffects::None(), kNoDexPc) {} bool CanBeMoved() const OVERRIDE { return true; } @@ -61,12 +61,12 @@ class HX86LoadFromConstantTable FINAL : public HExpression<2> { // Version of HNeg with access to the constant table for FP types. class HX86FPNeg FINAL : public HExpression<2> { public: - HX86FPNeg(Primitive::Type result_type, + HX86FPNeg(DataType::Type result_type, HInstruction* input, HX86ComputeBaseMethodAddress* method_base, uint32_t dex_pc) : HExpression(result_type, SideEffects::None(), dex_pc) { - DCHECK(Primitive::IsFloatingPointType(result_type)); + DCHECK(DataType::IsFloatingPointType(result_type)); SetRawInputAt(0, input); SetRawInputAt(1, method_base); } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 7451196677..aeb2b03787 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -990,8 +990,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, HGraphBuilder builder(graph, &dex_compilation_unit, &dex_compilation_unit, - &dex_file, - *code_item, compiler_driver, codegen.get(), compilation_stats_.get(), diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 08493fa177..33f1a4affe 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -50,7 +50,8 @@ LiveInterval* BuildInterval(const size_t ranges[][2], ArenaAllocator* allocator, int reg = -1, HInstruction* defined_by = nullptr) { - LiveInterval* interval = LiveInterval::MakeInterval(allocator, Primitive::kPrimInt, defined_by); + LiveInterval* interval = + LiveInterval::MakeInterval(allocator, DataType::Type::kInt32, defined_by); if (defined_by != nullptr) { defined_by->SetLiveInterval(interval); } @@ -88,7 +89,7 @@ inline HGraph* CreateGraph(ArenaAllocator* allocator) { // Create a control-flow graph from Dex instructions. inline HGraph* CreateCFG(ArenaAllocator* allocator, const uint16_t* data, - Primitive::Type return_type = Primitive::kPrimInt) { + DataType::Type return_type = DataType::Type::kInt32) { const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data); HGraph* graph = CreateGraph(allocator); diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc index be470ccb7d..2036b4a370 100644 --- a/compiler/optimizing/parallel_move_resolver.cc +++ b/compiler/optimizing/parallel_move_resolver.cc @@ -457,7 +457,7 @@ void ParallelMoveResolverNoSwap::PerformMove(size_t index) { DCHECK_NE(kind, Location::kConstant); Location scratch = AllocateScratchLocationFor(kind); // We only care about the move size. - Primitive::Type type = move->Is64BitMove() ? Primitive::kPrimLong : Primitive::kPrimInt; + DataType::Type type = move->Is64BitMove() ? DataType::Type::kInt64 : DataType::Type::kInt32; // Perform (C -> scratch) move->SetDestination(scratch); EmitMove(index); @@ -521,7 +521,8 @@ void ParallelMoveResolverNoSwap::UpdateMoveSource(Location from, Location to) { } void ParallelMoveResolverNoSwap::AddPendingMove(Location source, - Location destination, Primitive::Type type) { + Location destination, + DataType::Type type) { pending_moves_.push_back(new (allocator_) MoveOperands(source, destination, type, nullptr)); } diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h index 4278861690..e6e069f96e 100644 --- a/compiler/optimizing/parallel_move_resolver.h +++ b/compiler/optimizing/parallel_move_resolver.h @@ -19,8 +19,8 @@ #include "base/arena_containers.h" #include "base/value_object.h" +#include "data_type.h" #include "locations.h" -#include "primitive.h" namespace art { @@ -177,7 +177,7 @@ class ParallelMoveResolverNoSwap : public ParallelMoveResolver { void UpdateMoveSource(Location from, Location to); - void AddPendingMove(Location source, Location destination, Primitive::Type type); + void AddPendingMove(Location source, Location destination, DataType::Type type); void DeletePendingMove(MoveOperands* move); diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc index 50620f0e7b..cb87cabe1c 100644 --- a/compiler/optimizing/parallel_move_test.cc +++ b/compiler/optimizing/parallel_move_test.cc @@ -158,7 +158,7 @@ static HParallelMove* BuildParallelMove(ArenaAllocator* allocator, moves->AddMove( Location::RegisterLocation(operands[i][0]), Location::RegisterLocation(operands[i][1]), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); } return moves; @@ -264,12 +264,12 @@ TYPED_TEST(ParallelMoveTest, ConstantLast) { moves->AddMove( Location::ConstantLocation(new (&allocator) HIntConstant(0)), Location::RegisterLocation(0), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterLocation(1), Location::RegisterLocation(2), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); resolver.EmitNativeCode(moves); ASSERT_STREQ("(1 -> 2) (C -> 0)", resolver.GetMessage().c_str()); @@ -285,12 +285,12 @@ TYPED_TEST(ParallelMoveTest, Pairs) { moves->AddMove( Location::RegisterLocation(2), Location::RegisterLocation(4), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); resolver.EmitNativeCode(moves); ASSERT_STREQ("(2 -> 4) (0,1 -> 2,3)", resolver.GetMessage().c_str()); @@ -302,12 +302,12 @@ TYPED_TEST(ParallelMoveTest, Pairs) { moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterLocation(2), Location::RegisterLocation(4), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); resolver.EmitNativeCode(moves); ASSERT_STREQ("(2 -> 4) (0,1 -> 2,3)", resolver.GetMessage().c_str()); @@ -319,12 +319,12 @@ TYPED_TEST(ParallelMoveTest, Pairs) { moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterLocation(2), Location::RegisterLocation(0), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -339,17 +339,17 @@ TYPED_TEST(ParallelMoveTest, Pairs) { moves->AddMove( Location::RegisterLocation(2), Location::RegisterLocation(7), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterLocation(7), Location::RegisterLocation(1), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -365,17 +365,17 @@ TYPED_TEST(ParallelMoveTest, Pairs) { moves->AddMove( Location::RegisterLocation(2), Location::RegisterLocation(7), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterLocation(7), Location::RegisterLocation(1), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -391,17 +391,17 @@ TYPED_TEST(ParallelMoveTest, Pairs) { moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterLocation(2), Location::RegisterLocation(7), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterLocation(7), Location::RegisterLocation(1), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -416,12 +416,12 @@ TYPED_TEST(ParallelMoveTest, Pairs) { moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterPairLocation(2, 3), Location::RegisterPairLocation(0, 1), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -436,12 +436,12 @@ TYPED_TEST(ParallelMoveTest, Pairs) { moves->AddMove( Location::RegisterPairLocation(2, 3), Location::RegisterPairLocation(0, 1), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -473,17 +473,17 @@ TYPED_TEST(ParallelMoveTest, MultiCycles) { moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterLocation(2), Location::RegisterLocation(0), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterLocation(3), Location::RegisterLocation(1), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -499,17 +499,17 @@ TYPED_TEST(ParallelMoveTest, MultiCycles) { moves->AddMove( Location::RegisterLocation(2), Location::RegisterLocation(0), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterLocation(3), Location::RegisterLocation(1), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -527,17 +527,17 @@ TYPED_TEST(ParallelMoveTest, MultiCycles) { moves->AddMove( Location::RegisterLocation(10), Location::RegisterLocation(5), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterPairLocation(4, 5), Location::DoubleStackSlot(32), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::DoubleStackSlot(32), Location::RegisterPairLocation(10, 11), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -560,17 +560,17 @@ TYPED_TEST(ParallelMoveTest, CyclesWith64BitsMoves) { moves->AddMove( Location::RegisterLocation(0), Location::RegisterLocation(1), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterLocation(1), Location::StackSlot(48), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::StackSlot(48), Location::RegisterLocation(0), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -587,17 +587,17 @@ TYPED_TEST(ParallelMoveTest, CyclesWith64BitsMoves) { moves->AddMove( Location::RegisterPairLocation(0, 1), Location::RegisterPairLocation(2, 3), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterPairLocation(2, 3), Location::DoubleStackSlot(32), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::DoubleStackSlot(32), Location::RegisterPairLocation(0, 1), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { @@ -619,17 +619,17 @@ TYPED_TEST(ParallelMoveTest, CyclesWith64BitsMoves2) { moves->AddMove( Location::RegisterLocation(0), Location::RegisterLocation(3), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); moves->AddMove( Location::RegisterPairLocation(2, 3), Location::RegisterPairLocation(0, 1), - Primitive::kPrimLong, + DataType::Type::kInt64, nullptr); moves->AddMove( Location::RegisterLocation(7), Location::RegisterLocation(2), - Primitive::kPrimInt, + DataType::Type::kInt32, nullptr); resolver.EmitNativeCode(moves); if (TestFixture::has_swap) { diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc index 9877e10474..a114e78eb4 100644 --- a/compiler/optimizing/pc_relative_fixups_x86.cc +++ b/compiler/optimizing/pc_relative_fixups_x86.cc @@ -63,7 +63,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { void VisitReturn(HReturn* ret) OVERRIDE { HConstant* value = ret->InputAt(0)->AsConstant(); - if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) { + if ((value != nullptr && DataType::IsFloatingPointType(value->GetType()))) { ReplaceInput(ret, value, 0, true); } } @@ -102,7 +102,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { void BinaryFP(HBinaryOperation* bin) { HConstant* rhs = bin->InputAt(1)->AsConstant(); - if (rhs != nullptr && Primitive::IsFloatingPointType(rhs->GetType())) { + if (rhs != nullptr && DataType::IsFloatingPointType(rhs->GetType())) { ReplaceInput(bin, rhs, 1, false); } } @@ -132,7 +132,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } void VisitNeg(HNeg* neg) OVERRIDE { - if (Primitive::IsFloatingPointType(neg->GetType())) { + if (DataType::IsFloatingPointType(neg->GetType())) { // We need to replace the HNeg with a HX86FPNeg in order to address the constant area. HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg); HGraph* graph = GetGraph(); @@ -225,7 +225,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { HInputsRef inputs = invoke->GetInputs(); for (size_t i = 0; i < inputs.size(); i++) { HConstant* input = inputs[i]->AsConstant(); - if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) { + if (input != nullptr && DataType::IsFloatingPointType(input->GetType())) { ReplaceInput(invoke, input, i, true); } } diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index 2c856cd3d9..b52de367d1 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -77,7 +77,7 @@ void PrepareForRegisterAllocation::VisitArraySet(HArraySet* instruction) { // BoundType (as value input of this ArraySet) with a NullConstant. // If so, this ArraySet no longer needs a type check. if (value->IsNullConstant()) { - DCHECK_EQ(value->GetType(), Primitive::kPrimNot); + DCHECK_EQ(value->GetType(), DataType::Type::kReference); if (instruction->NeedsTypeCheck()) { instruction->ClearNeedsTypeCheck(); } diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 93613a5542..f5064c3057 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -133,7 +133,7 @@ void ReferenceTypePropagation::ValidateTypes() { for (HBasicBlock* block : graph_->GetReversePostOrder()) { for (HInstructionIterator iti(block->GetInstructions()); !iti.Done(); iti.Advance()) { HInstruction* instr = iti.Current(); - if (instr->GetType() == Primitive::kPrimNot) { + if (instr->GetType() == DataType::Type::kReference) { DCHECK(instr->GetReferenceTypeInfo().IsValid()) << "Invalid RTI for instruction: " << instr->DebugName(); if (instr->IsBoundType()) { @@ -555,7 +555,7 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* dex::TypeIndex type_idx, const DexFile& dex_file, bool is_exact) { - DCHECK_EQ(instr->GetType(), Primitive::kPrimNot); + DCHECK_EQ(instr->GetType(), DataType::Type::kReference); ScopedObjectAccess soa(Thread::Current()); ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_); @@ -576,7 +576,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) { void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) { // We check if the existing type is valid: the inliner may have set it. - if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) { + if (instr->GetType() == DataType::Type::kReference && !instr->GetReferenceTypeInfo().IsValid()) { UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), @@ -586,7 +586,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info) { - if (instr->GetType() != Primitive::kPrimNot) { + if (instr->GetType() != DataType::Type::kReference) { return; } @@ -612,7 +612,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitStaticFieldGet(HStaticFieldGet* void ReferenceTypePropagation::RTPVisitor::VisitUnresolvedInstanceFieldGet( HUnresolvedInstanceFieldGet* instr) { // TODO: Use descriptor to get the actual type. - if (instr->GetFieldType() == Primitive::kPrimNot) { + if (instr->GetFieldType() == DataType::Type::kReference) { instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); } } @@ -620,7 +620,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitUnresolvedInstanceFieldGet( void ReferenceTypePropagation::RTPVisitor::VisitUnresolvedStaticFieldGet( HUnresolvedStaticFieldGet* instr) { // TODO: Use descriptor to get the actual type. - if (instr->GetFieldType() == Primitive::kPrimNot) { + if (instr->GetFieldType() == DataType::Type::kReference) { instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); } } @@ -729,7 +729,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitCheckCast(HCheckCast* check_cast } void ReferenceTypePropagation::VisitPhi(HPhi* phi) { - if (phi->IsDead() || phi->GetType() != Primitive::kPrimNot) { + if (phi->IsDead() || phi->GetType() != DataType::Type::kReference) { return; } @@ -813,7 +813,7 @@ ReferenceTypeInfo ReferenceTypePropagation::MergeTypes(const ReferenceTypeInfo& } void ReferenceTypePropagation::UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache) { - DCHECK_EQ(Primitive::kPrimNot, instr->GetType()); + DCHECK_EQ(DataType::Type::kReference, instr->GetType()); ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo(); if (!parent_rti.IsValid()) { @@ -857,7 +857,7 @@ bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) { } void ReferenceTypePropagation::RTPVisitor::VisitInvoke(HInvoke* instr) { - if (instr->GetType() != Primitive::kPrimNot) { + if (instr->GetType() != DataType::Type::kReference) { return; } @@ -868,7 +868,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitInvoke(HInvoke* instr) { } void ReferenceTypePropagation::RTPVisitor::VisitArrayGet(HArrayGet* instr) { - if (instr->GetType() != Primitive::kPrimNot) { + if (instr->GetType() != DataType::Type::kReference) { return; } @@ -989,7 +989,7 @@ void ReferenceTypePropagation::ProcessWorklist() { } void ReferenceTypePropagation::AddToWorklist(HInstruction* instruction) { - DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot) + DCHECK_EQ(instruction->GetType(), DataType::Type::kReference) << instruction->DebugName() << ":" << instruction->GetType(); worklist_.push_back(instruction); } @@ -1000,7 +1000,7 @@ void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HInstruction* if ((user->IsPhi() && user->AsPhi()->IsLive()) || user->IsBoundType() || user->IsNullCheck() - || (user->IsArrayGet() && (user->GetType() == Primitive::kPrimNot))) { + || (user->IsArrayGet() && (user->GetType() == DataType::Type::kReference))) { AddToWorklist(user); } } diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc index ce3a4966aa..f0057c3095 100644 --- a/compiler/optimizing/register_allocation_resolver.cc +++ b/compiler/optimizing/register_allocation_resolver.cc @@ -100,24 +100,24 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint // [art method ]. size_t slot = current->GetSpillSlot(); switch (current->GetType()) { - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: slot += long_spill_slots; FALLTHROUGH_INTENDED; - case Primitive::kPrimLong: + case DataType::Type::kInt64: slot += float_spill_slots; FALLTHROUGH_INTENDED; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: slot += int_spill_slots; FALLTHROUGH_INTENDED; - case Primitive::kPrimNot: - case Primitive::kPrimInt: - case Primitive::kPrimChar: - case Primitive::kPrimByte: - case Primitive::kPrimBoolean: - case Primitive::kPrimShort: + case DataType::Type::kReference: + case DataType::Type::kInt32: + case DataType::Type::kUint16: + case DataType::Type::kInt8: + case DataType::Type::kBool: + case DataType::Type::kInt16: slot += reserved_out_slots; break; - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unexpected type for interval " << current->GetType(); } current->SetSpillSlot(slot * kVRegSize); @@ -205,12 +205,12 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint size_t temp_index = liveness_.GetTempIndex(temp); LocationSummary* locations = at->GetLocations(); switch (temp->GetType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: locations->SetTempAt(temp_index, Location::RegisterLocation(temp->GetRegister())); break; - case Primitive::kPrimDouble: - if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) { + case DataType::Type::kFloat64: + if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) { Location location = Location::FpuRegisterPairLocation( temp->GetRegister(), temp->GetHighInterval()->GetRegister()); locations->SetTempAt(temp_index, location); @@ -383,7 +383,7 @@ void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) { safepoint_position = safepoint_position->GetNext()) { DCHECK(current->CoversSlow(safepoint_position->GetPosition())); - if (current->GetType() == Primitive::kPrimNot) { + if (current->GetType() == DataType::Type::kReference) { DCHECK(interval->GetDefinedBy()->IsActualObject()) << interval->GetDefinedBy()->DebugName() << '(' << interval->GetDefinedBy()->GetId() << ')' @@ -507,13 +507,13 @@ void RegisterAllocationResolver::AddMove(HParallelMove* move, Location source, Location destination, HInstruction* instruction, - Primitive::Type type) const { - if (type == Primitive::kPrimLong + DataType::Type type) const { + if (type == DataType::Type::kInt64 && codegen_->ShouldSplitLongMoves() // The parallel move resolver knows how to deal with long constants. && !source.IsConstant()) { - move->AddMove(source.ToLow(), destination.ToLow(), Primitive::kPrimInt, instruction); - move->AddMove(source.ToHigh(), destination.ToHigh(), Primitive::kPrimInt, nullptr); + move->AddMove(source.ToLow(), destination.ToLow(), DataType::Type::kInt32, instruction); + move->AddMove(source.ToHigh(), destination.ToHigh(), DataType::Type::kInt32, nullptr); } else { move->AddMove(source, destination, type, instruction); } diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h index d48b1a0bb9..4a148e0abf 100644 --- a/compiler/optimizing/register_allocation_resolver.h +++ b/compiler/optimizing/register_allocation_resolver.h @@ -20,7 +20,7 @@ #include "base/arena_containers.h" #include "base/array_ref.h" #include "base/value_object.h" -#include "primitive.h" +#include "data_type.h" namespace art { @@ -88,7 +88,7 @@ class RegisterAllocationResolver : ValueObject { Location source, Location destination, HInstruction* instruction, - Primitive::Type type) const; + DataType::Type type) const; ArenaAllocator* const allocator_; CodeGenerator* const codegen_; diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h index 7e1fff8e2b..4375d6851a 100644 --- a/compiler/optimizing/register_allocator.h +++ b/compiler/optimizing/register_allocator.h @@ -21,7 +21,6 @@ #include "base/arena_containers.h" #include "base/arena_object.h" #include "base/macros.h" -#include "primitive.h" namespace art { diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc index 5e22772844..4ff7315045 100644 --- a/compiler/optimizing/register_allocator_graph_color.cc +++ b/compiler/optimizing/register_allocator_graph_color.cc @@ -540,7 +540,7 @@ class ColoringIteration { }; static bool IsCoreInterval(LiveInterval* interval) { - return !Primitive::IsFloatingPointType(interval->GetType()); + return !DataType::IsFloatingPointType(interval->GetType()); } static size_t ComputeReservedArtMethodSlots(const CodeGenerator& codegen) { @@ -573,7 +573,7 @@ RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocat // This includes globally blocked registers, such as the stack pointer. physical_core_nodes_.resize(codegen_->GetNumberOfCoreRegisters(), nullptr); for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) { - LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, Primitive::kPrimInt); + LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kInt32); physical_core_nodes_[i] = new (allocator_) InterferenceNode(allocator_, interval, liveness); physical_core_nodes_[i]->stage = NodeStage::kPrecolored; @@ -585,7 +585,8 @@ RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocat // Initialize physical floating point register live intervals and blocked registers. physical_fp_nodes_.resize(codegen_->GetNumberOfFloatingPointRegisters(), nullptr); for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) { - LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, Primitive::kPrimFloat); + LiveInterval* interval = + LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kFloat32); physical_fp_nodes_[i] = new (allocator_) InterferenceNode(allocator_, interval, liveness); physical_fp_nodes_[i]->stage = NodeStage::kPrecolored; @@ -936,7 +937,7 @@ void RegisterAllocatorGraphColor::CheckForTempLiveIntervals(HInstruction* instru switch (temp.GetPolicy()) { case Location::kRequiresRegister: { LiveInterval* interval = - LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt); + LiveInterval::MakeTempInterval(allocator_, DataType::Type::kInt32); interval->AddTempUse(instruction, i); core_intervals_.push_back(interval); temp_intervals_.push_back(interval); @@ -945,11 +946,11 @@ void RegisterAllocatorGraphColor::CheckForTempLiveIntervals(HInstruction* instru case Location::kRequiresFpuRegister: { LiveInterval* interval = - LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble); + LiveInterval::MakeTempInterval(allocator_, DataType::Type::kFloat64); interval->AddTempUse(instruction, i); fp_intervals_.push_back(interval); temp_intervals_.push_back(interval); - if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) { + if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) { interval->AddHighInterval(/*is_temp*/ true); temp_intervals_.push_back(interval->GetHighInterval()); } @@ -1927,24 +1928,24 @@ void RegisterAllocatorGraphColor::AllocateSpillSlots(const ArenaVector<Interfere // We need to find a spill slot for this interval. Place it in the correct // worklist to be processed later. switch (node->GetInterval()->GetType()) { - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: double_intervals.push_back(parent); break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: long_intervals.push_back(parent); break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: float_intervals.push_back(parent); break; - case Primitive::kPrimNot: - case Primitive::kPrimInt: - case Primitive::kPrimChar: - case Primitive::kPrimByte: - case Primitive::kPrimBoolean: - case Primitive::kPrimShort: + case DataType::Type::kReference: + case DataType::Type::kInt32: + case DataType::Type::kUint16: + case DataType::Type::kInt8: + case DataType::Type::kBool: + case DataType::Type::kInt16: int_intervals.push_back(parent); break; - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unexpected type for interval " << node->GetInterval()->GetType(); UNREACHABLE(); } diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h index 548687f784..3f6d674905 100644 --- a/compiler/optimizing/register_allocator_graph_color.h +++ b/compiler/optimizing/register_allocator_graph_color.h @@ -21,7 +21,6 @@ #include "base/arena_containers.h" #include "base/arena_object.h" #include "base/macros.h" -#include "primitive.h" #include "register_allocator.h" namespace art { diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc index ab8d540359..2012cd5847 100644 --- a/compiler/optimizing/register_allocator_linear_scan.cc +++ b/compiler/optimizing/register_allocator_linear_scan.cc @@ -83,8 +83,8 @@ RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ArenaAllocator* allocat static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) { if (interval == nullptr) return false; - bool is_core_register = (interval->GetType() != Primitive::kPrimDouble) - && (interval->GetType() != Primitive::kPrimFloat); + bool is_core_register = (interval->GetType() != DataType::Type::kFloat64) + && (interval->GetType() != DataType::Type::kFloat32); return processing_core_registers == is_core_register; } @@ -132,9 +132,9 @@ void RegisterAllocatorLinearScan::BlockRegister(Location location, size_t start, LiveInterval* interval = location.IsRegister() ? physical_core_register_intervals_[reg] : physical_fp_register_intervals_[reg]; - Primitive::Type type = location.IsRegister() - ? Primitive::kPrimInt - : Primitive::kPrimFloat; + DataType::Type type = location.IsRegister() + ? DataType::Type::kInt32 + : DataType::Type::kFloat32; if (interval == nullptr) { interval = LiveInterval::MakeFixedInterval(allocator_, reg, type); if (location.IsRegister()) { @@ -237,7 +237,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) switch (temp.GetPolicy()) { case Location::kRequiresRegister: { LiveInterval* interval = - LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt); + LiveInterval::MakeTempInterval(allocator_, DataType::Type::kInt32); temp_intervals_.push_back(interval); interval->AddTempUse(instruction, i); unhandled_core_intervals_.push_back(interval); @@ -246,10 +246,10 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) case Location::kRequiresFpuRegister: { LiveInterval* interval = - LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble); + LiveInterval::MakeTempInterval(allocator_, DataType::Type::kFloat64); temp_intervals_.push_back(interval); interval->AddTempUse(instruction, i); - if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) { + if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) { interval->AddHighInterval(/* is_temp */ true); LiveInterval* high = interval->GetHighInterval(); temp_intervals_.push_back(high); @@ -266,8 +266,8 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) } } - bool core_register = (instruction->GetType() != Primitive::kPrimDouble) - && (instruction->GetType() != Primitive::kPrimFloat); + bool core_register = (instruction->GetType() != DataType::Type::kFloat64) + && (instruction->GetType() != DataType::Type::kFloat32); if (locations->NeedsSafepoint()) { if (codegen_->IsLeafMethod()) { @@ -1104,24 +1104,24 @@ void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) { ArenaVector<size_t>* spill_slots = nullptr; switch (interval->GetType()) { - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: spill_slots = &double_spill_slots_; break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: spill_slots = &long_spill_slots_; break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: spill_slots = &float_spill_slots_; break; - case Primitive::kPrimNot: - case Primitive::kPrimInt: - case Primitive::kPrimChar: - case Primitive::kPrimByte: - case Primitive::kPrimBoolean: - case Primitive::kPrimShort: + case DataType::Type::kReference: + case DataType::Type::kInt32: + case DataType::Type::kUint16: + case DataType::Type::kInt8: + case DataType::Type::kBool: + case DataType::Type::kInt16: spill_slots = &int_spill_slots_; break; - case Primitive::kPrimVoid: + case DataType::Type::kVoid: LOG(FATAL) << "Unexpected type for interval " << interval->GetType(); } diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h index b3834f45e4..9c650a44d2 100644 --- a/compiler/optimizing/register_allocator_linear_scan.h +++ b/compiler/optimizing/register_allocator_linear_scan.h @@ -20,7 +20,6 @@ #include "arch/instruction_set.h" #include "base/arena_containers.h" #include "base/macros.h" -#include "primitive.h" #include "register_allocator.h" namespace art { diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc index bcdd7f9cd8..59987e26b6 100644 --- a/compiler/optimizing/register_allocator_test.cc +++ b/compiler/optimizing/register_allocator_test.cc @@ -461,15 +461,15 @@ TEST_F(RegisterAllocatorTest, FreeUntil) { // Add three temps holding the same register, and starting at different positions. // Put the one that should be picked in the middle of the inactive list to ensure // we do not depend on an order. - LiveInterval* interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt); + LiveInterval* interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32); interval->AddRange(40, 50); register_allocator.inactive_.push_back(interval); - interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt); + interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32); interval->AddRange(20, 30); register_allocator.inactive_.push_back(interval); - interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt); + interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32); interval->AddRange(60, 70); register_allocator.inactive_.push_back(interval); @@ -496,7 +496,7 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); HBasicBlock* block = new (allocator) HBasicBlock(graph); @@ -505,7 +505,7 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, HInstruction* test = new (allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimBoolean, + DataType::Type::kBool, MemberOffset(22), false, kUnknownFieldIndex, @@ -528,11 +528,11 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, then->AddInstruction(new (allocator) HGoto()); else_->AddInstruction(new (allocator) HGoto()); - *phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt); + *phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); join->AddPhi(*phi); *input1 = new (allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimInt, + DataType::Type::kInt32, MemberOffset(42), false, kUnknownFieldIndex, @@ -541,7 +541,7 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, 0); *input2 = new (allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimInt, + DataType::Type::kInt32, MemberOffset(42), false, kUnknownFieldIndex, @@ -658,7 +658,7 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator, graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); HBasicBlock* block = new (allocator) HBasicBlock(graph); @@ -667,7 +667,7 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator, *field = new (allocator) HInstanceFieldGet(parameter, nullptr, - Primitive::kPrimInt, + DataType::Type::kInt32, MemberOffset(42), false, kUnknownFieldIndex, @@ -742,7 +742,7 @@ static HGraph* BuildTwoSubs(ArenaAllocator* allocator, graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry->AddInstruction(parameter); HInstruction* constant1 = graph->GetIntConstant(1); @@ -752,9 +752,9 @@ static HGraph* BuildTwoSubs(ArenaAllocator* allocator, graph->AddBlock(block); entry->AddSuccessor(block); - *first_sub = new (allocator) HSub(Primitive::kPrimInt, parameter, constant1); + *first_sub = new (allocator) HSub(DataType::Type::kInt32, parameter, constant1); block->AddInstruction(*first_sub); - *second_sub = new (allocator) HSub(Primitive::kPrimInt, *first_sub, constant2); + *second_sub = new (allocator) HSub(DataType::Type::kInt32, *first_sub, constant2); block->AddInstruction(*second_sub); block->AddInstruction(new (allocator) HExit()); @@ -821,9 +821,9 @@ static HGraph* BuildDiv(ArenaAllocator* allocator, graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* first = new (allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); HInstruction* second = new (allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry->AddInstruction(first); entry->AddInstruction(second); @@ -831,7 +831,8 @@ static HGraph* BuildDiv(ArenaAllocator* allocator, graph->AddBlock(block); entry->AddSuccessor(block); - *div = new (allocator) HDiv(Primitive::kPrimInt, first, second, 0); // don't care about dex_pc. + *div = + new (allocator) HDiv(DataType::Type::kInt32, first, second, 0); // don't care about dex_pc. block->AddInstruction(*div); block->AddInstruction(new (allocator) HExit()); @@ -883,13 +884,13 @@ TEST_F(RegisterAllocatorTest, SpillInactive) { graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* one = new (&allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); HInstruction* two = new (&allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); HInstruction* three = new (&allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); HInstruction* four = new (&allocator) HParameterValue( - graph->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry->AddInstruction(one); entry->AddInstruction(two); entry->AddInstruction(three); @@ -902,7 +903,7 @@ TEST_F(RegisterAllocatorTest, SpillInactive) { // We create a synthesized user requesting a register, to avoid just spilling the // intervals. - HPhi* user = new (&allocator) HPhi(&allocator, 0, 1, Primitive::kPrimInt); + HPhi* user = new (&allocator) HPhi(&allocator, 0, 1, DataType::Type::kInt32); user->AddInput(one); user->SetBlock(block); LocationSummary* locations = new (&allocator) LocationSummary(user, LocationSummary::kNoCall); diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc index 38cd51bef6..5212e866cf 100644 --- a/compiler/optimizing/scheduler.cc +++ b/compiler/optimizing/scheduler.cc @@ -16,9 +16,11 @@ #include <string> -#include "prepare_for_register_allocation.h" #include "scheduler.h" +#include "data_type-inl.h" +#include "prepare_for_register_allocation.h" + #ifdef ART_ENABLE_CODEGEN_arm64 #include "scheduler_arm64.h" #endif @@ -399,17 +401,7 @@ bool SchedulingGraph::HasImmediateOtherDependency(const HInstruction* instructio } static const std::string InstructionTypeId(const HInstruction* instruction) { - std::string id; - Primitive::Type type = instruction->GetType(); - if (type == Primitive::kPrimNot) { - id.append("l"); - } else { - id.append(Primitive::Descriptor(instruction->GetType())); - } - // Use lower-case to be closer to the `HGraphVisualizer` output. - id[0] = std::tolower(id[0]); - id.append(std::to_string(instruction->GetId())); - return id; + return DataType::TypeId(instruction->GetType()) + std::to_string(instruction->GetId()); } // Ideally we would reuse the graph visualizer code, but it is not available diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc index 66756a5fc7..110db47eb5 100644 --- a/compiler/optimizing/scheduler_arm.cc +++ b/compiler/optimizing/scheduler_arm.cc @@ -31,15 +31,15 @@ using helpers::Uint64ConstantFrom; void SchedulingLatencyVisitorARM::HandleBinaryOperationLantencies(HBinaryOperation* instr) { switch (instr->GetResultType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: // HAdd and HSub long operations translate to ADDS+ADC or SUBS+SBC pairs, // so a bubble (kArmNopLatency) is added to represent the internal carry flag // dependency inside these pairs. last_visited_internal_latency_ = kArmIntegerOpLatency + kArmNopLatency; last_visited_latency_ = kArmIntegerOpLatency; break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: last_visited_latency_ = kArmFloatingPointOpLatency; break; default: @@ -58,12 +58,12 @@ void SchedulingLatencyVisitorARM::VisitSub(HSub* instr) { void SchedulingLatencyVisitorARM::VisitMul(HMul* instr) { switch (instr->GetResultType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: last_visited_internal_latency_ = 3 * kArmMulIntegerLatency; last_visited_latency_ = kArmIntegerOpLatency; break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: last_visited_latency_ = kArmMulFloatingPointLatency; break; default: @@ -74,12 +74,12 @@ void SchedulingLatencyVisitorARM::VisitMul(HMul* instr) { void SchedulingLatencyVisitorARM::HandleBitwiseOperationLantencies(HBinaryOperation* instr) { switch (instr->GetResultType()) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: last_visited_internal_latency_ = kArmIntegerOpLatency; last_visited_latency_ = kArmIntegerOpLatency; break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: last_visited_latency_ = kArmFloatingPointOpLatency; break; default: @@ -102,10 +102,10 @@ void SchedulingLatencyVisitorARM::VisitXor(HXor* instr) { void SchedulingLatencyVisitorARM::VisitRor(HRor* instr) { switch (instr->GetResultType()) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: last_visited_latency_ = kArmIntegerOpLatency; break; - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { // HandleLongRotate HInstruction* rhs = instr->GetRight(); if (rhs->IsConstant()) { @@ -130,16 +130,16 @@ void SchedulingLatencyVisitorARM::VisitRor(HRor* instr) { } void SchedulingLatencyVisitorARM::HandleShiftLatencies(HBinaryOperation* instr) { - Primitive::Type type = instr->GetResultType(); + DataType::Type type = instr->GetResultType(); HInstruction* rhs = instr->GetRight(); switch (type) { - case Primitive::kPrimInt: + case DataType::Type::kInt32: if (!rhs->IsConstant()) { last_visited_internal_latency_ = kArmIntegerOpLatency; } last_visited_latency_ = kArmIntegerOpLatency; break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (!rhs->IsConstant()) { last_visited_internal_latency_ = 8 * kArmIntegerOpLatency; } else { @@ -204,7 +204,7 @@ void SchedulingLatencyVisitorARM::HandleGenerateConditionWithZero(IfCondition co } void SchedulingLatencyVisitorARM::HandleGenerateLongTestConstant(HCondition* condition) { - DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong); + DCHECK_EQ(condition->GetLeft()->GetType(), DataType::Type::kInt64); IfCondition cond = condition->GetCondition(); @@ -270,7 +270,7 @@ void SchedulingLatencyVisitorARM::HandleGenerateLongTestConstant(HCondition* con } void SchedulingLatencyVisitorARM::HandleGenerateLongTest(HCondition* condition) { - DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong); + DCHECK_EQ(condition->GetLeft()->GetType(), DataType::Type::kInt64); IfCondition cond = condition->GetCondition(); @@ -301,13 +301,13 @@ void SchedulingLatencyVisitorARM::HandleGenerateLongTest(HCondition* condition) // The GenerateTest series of function all counted as internal latency. void SchedulingLatencyVisitorARM::HandleGenerateTest(HCondition* condition) { - const Primitive::Type type = condition->GetLeft()->GetType(); + const DataType::Type type = condition->GetLeft()->GetType(); - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { condition->InputAt(1)->IsConstant() ? HandleGenerateLongTestConstant(condition) : HandleGenerateLongTest(condition); - } else if (Primitive::IsFloatingPointType(type)) { + } else if (DataType::IsFloatingPointType(type)) { // GenerateVcmp + Vmrs last_visited_internal_latency_ += 2 * kArmFloatingPointOpLatency; } else { @@ -317,7 +317,7 @@ void SchedulingLatencyVisitorARM::HandleGenerateTest(HCondition* condition) { } bool SchedulingLatencyVisitorARM::CanGenerateTest(HCondition* condition) { - if (condition->GetLeft()->GetType() == Primitive::kPrimLong) { + if (condition->GetLeft()->GetType() == DataType::Type::kInt64) { HInstruction* right = condition->InputAt(1); if (right->IsConstant()) { @@ -353,7 +353,7 @@ void SchedulingLatencyVisitorARM::HandleGenerateConditionGeneric(HCondition* con } void SchedulingLatencyVisitorARM::HandleGenerateEqualLong(HCondition* cond) { - DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong); + DCHECK_EQ(cond->GetLeft()->GetType(), DataType::Type::kInt64); IfCondition condition = cond->GetCondition(); @@ -374,7 +374,7 @@ void SchedulingLatencyVisitorARM::HandleGenerateLongComparesAndJumps() { } void SchedulingLatencyVisitorARM::HandleGenerateConditionLong(HCondition* cond) { - DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong); + DCHECK_EQ(cond->GetLeft()->GetType(), DataType::Type::kInt64); IfCondition condition = cond->GetCondition(); HInstruction* right = cond->InputAt(1); @@ -424,11 +424,11 @@ void SchedulingLatencyVisitorARM::HandleGenerateConditionLong(HCondition* cond) } void SchedulingLatencyVisitorARM::HandleGenerateConditionIntegralOrNonPrimitive(HCondition* cond) { - const Primitive::Type type = cond->GetLeft()->GetType(); + const DataType::Type type = cond->GetLeft()->GetType(); - DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type; + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; - if (type == Primitive::kPrimLong) { + if (type == DataType::Type::kInt64) { HandleGenerateConditionLong(cond); return; } @@ -482,19 +482,19 @@ void SchedulingLatencyVisitorARM::HandleCondition(HCondition* cond) { return; } - const Primitive::Type type = cond->GetLeft()->GetType(); + const DataType::Type type = cond->GetLeft()->GetType(); - if (Primitive::IsFloatingPointType(type)) { + if (DataType::IsFloatingPointType(type)) { HandleGenerateConditionGeneric(cond); return; } - DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type; + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; const IfCondition condition = cond->GetCondition(); - if (type == Primitive::kPrimBoolean && - cond->GetRight()->GetType() == Primitive::kPrimBoolean && + if (type == DataType::Type::kBool && + cond->GetRight()->GetType() == DataType::Type::kBool && (condition == kCondEQ || condition == kCondNE)) { if (condition == kCondEQ) { last_visited_internal_latency_ = kArmIntegerOpLatency; @@ -511,20 +511,20 @@ void SchedulingLatencyVisitorARM::VisitCondition(HCondition* instr) { } void SchedulingLatencyVisitorARM::VisitCompare(HCompare* instr) { - Primitive::Type type = instr->InputAt(0)->GetType(); + DataType::Type type = instr->InputAt(0)->GetType(); switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: last_visited_internal_latency_ = 2 * kArmIntegerOpLatency; break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: last_visited_internal_latency_ = 2 * kArmIntegerOpLatency + 3 * kArmBranchLatency; break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: last_visited_internal_latency_ = kArmIntegerOpLatency + 2 * kArmFloatingPointOpLatency; break; default: @@ -535,7 +535,7 @@ void SchedulingLatencyVisitorARM::VisitCompare(HCompare* instr) { } void SchedulingLatencyVisitorARM::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) { - if (instruction->GetResultType() == Primitive::kPrimInt) { + if (instruction->GetResultType() == DataType::Type::kInt32) { last_visited_latency_ = kArmIntegerOpLatency; } else { last_visited_internal_latency_ = kArmIntegerOpLatency; @@ -566,7 +566,7 @@ void SchedulingLatencyVisitorARM::HandleGenerateDataProc(HDataProcWithShifterOp* } void SchedulingLatencyVisitorARM::HandleGenerateLongDataProc(HDataProcWithShifterOp* instruction) { - DCHECK_EQ(instruction->GetType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetType(), DataType::Type::kInt64); DCHECK(HDataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())); const uint32_t shift_value = instruction->GetShiftAmount(); @@ -595,10 +595,10 @@ void SchedulingLatencyVisitorARM::HandleGenerateLongDataProc(HDataProcWithShifte void SchedulingLatencyVisitorARM::VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) { const HDataProcWithShifterOp::OpKind op_kind = instruction->GetOpKind(); - if (instruction->GetType() == Primitive::kPrimInt) { + if (instruction->GetType() == DataType::Type::kInt32) { HandleGenerateDataProcInstruction(); } else { - DCHECK_EQ(instruction->GetType(), Primitive::kPrimLong); + DCHECK_EQ(instruction->GetType(), DataType::Type::kInt64); if (HDataProcWithShifterOp::IsExtensionOp(op_kind)) { HandleGenerateDataProc(instruction); } else { @@ -624,7 +624,7 @@ void SchedulingLatencyVisitorARM::VisitMultiplyAccumulate(HMultiplyAccumulate* A } void SchedulingLatencyVisitorARM::VisitArrayGet(HArrayGet* instruction) { - Primitive::Type type = instruction->GetType(); + DataType::Type type = instruction->GetType(); const bool maybe_compressed_char_at = mirror::kUseStringCompression && instruction->IsStringCharAt(); HInstruction* array_instr = instruction->GetArray(); @@ -632,11 +632,11 @@ void SchedulingLatencyVisitorARM::VisitArrayGet(HArrayGet* instruction) { HInstruction* index = instruction->InputAt(1); switch (type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: { if (maybe_compressed_char_at) { last_visited_internal_latency_ += kArmMemoryLoadLatency; } @@ -664,7 +664,7 @@ void SchedulingLatencyVisitorARM::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { last_visited_latency_ = kArmLoadWithBakerReadBarrierLatency; } else { @@ -681,7 +681,7 @@ void SchedulingLatencyVisitorARM::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (index->IsConstant()) { last_visited_latency_ = kArmMemoryLoadLatency; } else { @@ -691,7 +691,7 @@ void SchedulingLatencyVisitorARM::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (index->IsConstant()) { last_visited_latency_ = kArmMemoryLoadLatency; } else { @@ -701,7 +701,7 @@ void SchedulingLatencyVisitorARM::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (index->IsConstant()) { last_visited_latency_ = kArmMemoryLoadLatency; } else { @@ -727,16 +727,16 @@ void SchedulingLatencyVisitorARM::VisitArrayLength(HArrayLength* instruction) { void SchedulingLatencyVisitorARM::VisitArraySet(HArraySet* instruction) { HInstruction* index = instruction->InputAt(1); - Primitive::Type value_type = instruction->GetComponentType(); + DataType::Type value_type = instruction->GetComponentType(); HInstruction* array_instr = instruction->GetArray(); bool has_intermediate_address = array_instr->IsIntermediateAddress(); switch (value_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: { + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: { if (index->IsConstant()) { last_visited_latency_ = kArmMemoryStoreLatency; } else { @@ -749,7 +749,7 @@ void SchedulingLatencyVisitorARM::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimNot: { + case DataType::Type::kReference: { if (instruction->InputAt(2)->IsNullConstant()) { if (index->IsConstant()) { last_visited_latency_ = kArmMemoryStoreLatency; @@ -765,7 +765,7 @@ void SchedulingLatencyVisitorARM::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimLong: { + case DataType::Type::kInt64: { if (index->IsConstant()) { last_visited_latency_ = kArmMemoryLoadLatency; } else { @@ -775,7 +775,7 @@ void SchedulingLatencyVisitorARM::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimFloat: { + case DataType::Type::kFloat32: { if (index->IsConstant()) { last_visited_latency_ = kArmMemoryLoadLatency; } else { @@ -785,7 +785,7 @@ void SchedulingLatencyVisitorARM::VisitArraySet(HArraySet* instruction) { break; } - case Primitive::kPrimDouble: { + case DataType::Type::kFloat64: { if (index->IsConstant()) { last_visited_latency_ = kArmMemoryLoadLatency; } else { @@ -823,9 +823,9 @@ void SchedulingLatencyVisitorARM::HandleDivRemConstantIntegralLatencies(int32_t } void SchedulingLatencyVisitorARM::VisitDiv(HDiv* instruction) { - Primitive::Type type = instruction->GetResultType(); + DataType::Type type = instruction->GetResultType(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { HInstruction* rhs = instruction->GetRight(); if (rhs->IsConstant()) { int32_t imm = Int32ConstantFrom(rhs->AsConstant()); @@ -835,10 +835,10 @@ void SchedulingLatencyVisitorARM::VisitDiv(HDiv* instruction) { } break; } - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: last_visited_latency_ = kArmDivFloatLatency; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: last_visited_latency_ = kArmDivDoubleLatency; break; default: @@ -886,9 +886,9 @@ void SchedulingLatencyVisitorARM::VisitNewInstance(HNewInstance* instruction) { } void SchedulingLatencyVisitorARM::VisitRem(HRem* instruction) { - Primitive::Type type = instruction->GetResultType(); + DataType::Type type = instruction->GetResultType(); switch (type) { - case Primitive::kPrimInt: { + case DataType::Type::kInt32: { HInstruction* rhs = instruction->GetRight(); if (rhs->IsConstant()) { int32_t imm = Int32ConstantFrom(rhs->AsConstant()); @@ -911,19 +911,19 @@ void SchedulingLatencyVisitorARM::HandleFieldGetLatencies(HInstruction* instruct DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); DCHECK(codegen_ != nullptr); bool is_volatile = field_info.IsVolatile(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); switch (field_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: + case DataType::Type::kInt32: last_visited_latency_ = kArmMemoryLoadLatency; break; - case Primitive::kPrimNot: + case DataType::Type::kReference: if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { last_visited_internal_latency_ = kArmMemoryLoadLatency + kArmIntegerOpLatency; last_visited_latency_ = kArmMemoryLoadLatency; @@ -932,7 +932,7 @@ void SchedulingLatencyVisitorARM::HandleFieldGetLatencies(HInstruction* instruct } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (is_volatile && !atomic_ldrd_strd) { last_visited_internal_latency_ = kArmMemoryLoadLatency + kArmIntegerOpLatency; last_visited_latency_ = kArmMemoryLoadLatency; @@ -941,11 +941,11 @@ void SchedulingLatencyVisitorARM::HandleFieldGetLatencies(HInstruction* instruct } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: last_visited_latency_ = kArmMemoryLoadLatency; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: if (is_volatile && !atomic_ldrd_strd) { last_visited_internal_latency_ = kArmMemoryLoadLatency + kArmIntegerOpLatency + kArmMemoryLoadLatency; @@ -970,16 +970,16 @@ void SchedulingLatencyVisitorARM::HandleFieldSetLatencies(HInstruction* instruct DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); DCHECK(codegen_ != nullptr); bool is_volatile = field_info.IsVolatile(); - Primitive::Type field_type = field_info.GetFieldType(); + DataType::Type field_type = field_info.GetFieldType(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); switch (field_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimShort: - case Primitive::kPrimChar: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kUint16: if (is_volatile) { last_visited_internal_latency_ = kArmMemoryBarrierLatency + kArmMemoryStoreLatency; last_visited_latency_ = kArmMemoryBarrierLatency; @@ -988,15 +988,15 @@ void SchedulingLatencyVisitorARM::HandleFieldSetLatencies(HInstruction* instruct } break; - case Primitive::kPrimInt: - case Primitive::kPrimNot: + case DataType::Type::kInt32: + case DataType::Type::kReference: if (kPoisonHeapReferences && needs_write_barrier) { last_visited_internal_latency_ += kArmIntegerOpLatency * 2; } last_visited_latency_ = kArmMemoryStoreLatency; break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: if (is_volatile && !atomic_ldrd_strd) { last_visited_internal_latency_ = kArmIntegerOpLatency + kArmMemoryLoadLatency + kArmMemoryStoreLatency; @@ -1006,11 +1006,11 @@ void SchedulingLatencyVisitorARM::HandleFieldSetLatencies(HInstruction* instruct } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: last_visited_latency_ = kArmMemoryStoreLatency; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: if (is_volatile && !atomic_ldrd_strd) { last_visited_internal_latency_ = kArmIntegerOpLatency + kArmIntegerOpLatency + kArmMemoryLoadLatency + kArmMemoryStoreLatency; @@ -1043,23 +1043,23 @@ void SchedulingLatencyVisitorARM::VisitSuspendCheck(HSuspendCheck* instruction) } void SchedulingLatencyVisitorARM::VisitTypeConversion(HTypeConversion* instr) { - Primitive::Type result_type = instr->GetResultType(); - Primitive::Type input_type = instr->GetInputType(); + DataType::Type result_type = instr->GetResultType(); + DataType::Type input_type = instr->GetInputType(); switch (result_type) { - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: last_visited_latency_ = kArmIntegerOpLatency; // SBFX or UBFX break; - case Primitive::kPrimInt: + case DataType::Type::kInt32: switch (input_type) { - case Primitive::kPrimLong: + case DataType::Type::kInt64: last_visited_latency_ = kArmIntegerOpLatency; // MOV break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: last_visited_internal_latency_ = kArmTypeConversionFloatingPointIntegerLatency; last_visited_latency_ = kArmFloatingPointOpLatency; break; @@ -1069,19 +1069,19 @@ void SchedulingLatencyVisitorARM::VisitTypeConversion(HTypeConversion* instr) { } break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: switch (input_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: // MOV and extension last_visited_internal_latency_ = kArmIntegerOpLatency; last_visited_latency_ = kArmIntegerOpLatency; break; - case Primitive::kPrimFloat: - case Primitive::kPrimDouble: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: // invokes runtime last_visited_internal_latency_ = kArmCallInternalLatency; break; @@ -1092,21 +1092,21 @@ void SchedulingLatencyVisitorARM::VisitTypeConversion(HTypeConversion* instr) { } break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: switch (input_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: last_visited_internal_latency_ = kArmTypeConversionFloatingPointIntegerLatency; last_visited_latency_ = kArmFloatingPointOpLatency; break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: // invokes runtime last_visited_internal_latency_ = kArmCallInternalLatency; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: last_visited_latency_ = kArmFloatingPointOpLatency; break; default: @@ -1115,21 +1115,21 @@ void SchedulingLatencyVisitorARM::VisitTypeConversion(HTypeConversion* instr) { } break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: switch (input_type) { - case Primitive::kPrimBoolean: - case Primitive::kPrimByte: - case Primitive::kPrimChar: - case Primitive::kPrimShort: - case Primitive::kPrimInt: + case DataType::Type::kBool: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: last_visited_internal_latency_ = kArmTypeConversionFloatingPointIntegerLatency; last_visited_latency_ = kArmFloatingPointOpLatency; break; - case Primitive::kPrimLong: + case DataType::Type::kInt64: last_visited_internal_latency_ = 5 * kArmFloatingPointOpLatency; last_visited_latency_ = kArmFloatingPointOpLatency; break; - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: last_visited_latency_ = kArmFloatingPointOpLatency; break; default: diff --git a/compiler/optimizing/scheduler_arm64.cc b/compiler/optimizing/scheduler_arm64.cc index 1d9d28ab24..7bcf4e75a9 100644 --- a/compiler/optimizing/scheduler_arm64.cc +++ b/compiler/optimizing/scheduler_arm64.cc @@ -24,7 +24,7 @@ namespace art { namespace arm64 { void SchedulingLatencyVisitorARM64::VisitBinaryOperation(HBinaryOperation* instr) { - last_visited_latency_ = Primitive::IsFloatingPointType(instr->GetResultType()) + last_visited_latency_ = DataType::IsFloatingPointType(instr->GetResultType()) ? kArm64FloatingPointOpLatency : kArm64IntegerOpLatency; } @@ -80,12 +80,12 @@ void SchedulingLatencyVisitorARM64::VisitBoundsCheck(HBoundsCheck* ATTRIBUTE_UNU } void SchedulingLatencyVisitorARM64::VisitDiv(HDiv* instr) { - Primitive::Type type = instr->GetResultType(); + DataType::Type type = instr->GetResultType(); switch (type) { - case Primitive::kPrimFloat: + case DataType::Type::kFloat32: last_visited_latency_ = kArm64DivFloatLatency; break; - case Primitive::kPrimDouble: + case DataType::Type::kFloat64: last_visited_latency_ = kArm64DivDoubleLatency; break; default: @@ -133,7 +133,7 @@ void SchedulingLatencyVisitorARM64::VisitLoadString(HLoadString* ATTRIBUTE_UNUSE } void SchedulingLatencyVisitorARM64::VisitMul(HMul* instr) { - last_visited_latency_ = Primitive::IsFloatingPointType(instr->GetResultType()) + last_visited_latency_ = DataType::IsFloatingPointType(instr->GetResultType()) ? kArm64MulFloatingPointLatency : kArm64MulIntegerLatency; } @@ -153,7 +153,7 @@ void SchedulingLatencyVisitorARM64::VisitNewInstance(HNewInstance* instruction) } void SchedulingLatencyVisitorARM64::VisitRem(HRem* instruction) { - if (Primitive::IsFloatingPointType(instruction->GetResultType())) { + if (DataType::IsFloatingPointType(instruction->GetResultType())) { last_visited_internal_latency_ = kArm64CallInternalLatency; last_visited_latency_ = kArm64CallLatency; } else { @@ -194,8 +194,8 @@ void SchedulingLatencyVisitorARM64::VisitSuspendCheck(HSuspendCheck* instruction } void SchedulingLatencyVisitorARM64::VisitTypeConversion(HTypeConversion* instr) { - if (Primitive::IsFloatingPointType(instr->GetResultType()) || - Primitive::IsFloatingPointType(instr->GetInputType())) { + if (DataType::IsFloatingPointType(instr->GetResultType()) || + DataType::IsFloatingPointType(instr->GetInputType())) { last_visited_latency_ = kArm64TypeConversionFloatingPointIntegerLatency; } else { last_visited_latency_ = kArm64IntegerOpLatency; @@ -203,7 +203,7 @@ void SchedulingLatencyVisitorARM64::VisitTypeConversion(HTypeConversion* instr) } void SchedulingLatencyVisitorARM64::HandleSimpleArithmeticSIMD(HVecOperation *instr) { - if (Primitive::IsFloatingPointType(instr->GetPackedType())) { + if (DataType::IsFloatingPointType(instr->GetPackedType())) { last_visited_latency_ = kArm64SIMDFloatingPointOpLatency; } else { last_visited_latency_ = kArm64SIMDIntegerOpLatency; @@ -236,7 +236,7 @@ void SchedulingLatencyVisitorARM64::VisitVecAbs(HVecAbs* instr) { } void SchedulingLatencyVisitorARM64::VisitVecNot(HVecNot* instr) { - if (instr->GetPackedType() == Primitive::kPrimBoolean) { + if (instr->GetPackedType() == DataType::Type::kBool) { last_visited_internal_latency_ = kArm64SIMDIntegerOpLatency; } last_visited_latency_ = kArm64SIMDIntegerOpLatency; @@ -255,7 +255,7 @@ void SchedulingLatencyVisitorARM64::VisitVecSub(HVecSub* instr) { } void SchedulingLatencyVisitorARM64::VisitVecMul(HVecMul* instr) { - if (Primitive::IsFloatingPointType(instr->GetPackedType())) { + if (DataType::IsFloatingPointType(instr->GetPackedType())) { last_visited_latency_ = kArm64SIMDMulFloatingPointLatency; } else { last_visited_latency_ = kArm64SIMDMulIntegerLatency; @@ -263,10 +263,10 @@ void SchedulingLatencyVisitorARM64::VisitVecMul(HVecMul* instr) { } void SchedulingLatencyVisitorARM64::VisitVecDiv(HVecDiv* instr) { - if (instr->GetPackedType() == Primitive::kPrimFloat) { + if (instr->GetPackedType() == DataType::Type::kFloat32) { last_visited_latency_ = kArm64SIMDDivFloatLatency; } else { - DCHECK(instr->GetPackedType() == Primitive::kPrimDouble); + DCHECK(instr->GetPackedType() == DataType::Type::kFloat64); last_visited_latency_ = kArm64SIMDDivDoubleLatency; } } @@ -327,9 +327,9 @@ void SchedulingLatencyVisitorARM64::HandleVecAddress( void SchedulingLatencyVisitorARM64::VisitVecLoad(HVecLoad* instr) { last_visited_internal_latency_ = 0; - size_t size = Primitive::ComponentSize(instr->GetPackedType()); + size_t size = DataType::Size(instr->GetPackedType()); - if (instr->GetPackedType() == Primitive::kPrimChar + if (instr->GetPackedType() == DataType::Type::kUint16 && mirror::kUseStringCompression && instr->IsStringCharAt()) { // Set latencies for the uncompressed case. @@ -344,7 +344,7 @@ void SchedulingLatencyVisitorARM64::VisitVecLoad(HVecLoad* instr) { void SchedulingLatencyVisitorARM64::VisitVecStore(HVecStore* instr) { last_visited_internal_latency_ = 0; - size_t size = Primitive::ComponentSize(instr->GetPackedType()); + size_t size = DataType::Size(instr->GetPackedType()); HandleVecAddress(instr, size); last_visited_latency_ = kArm64SIMDMemoryStoreLatency; } diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc index cdb6666f83..0e6e0c5a3d 100644 --- a/compiler/optimizing/scheduler_test.cc +++ b/compiler/optimizing/scheduler_test.cc @@ -103,18 +103,20 @@ class SchedulerTest : public CommonCompilerTest { HInstruction* array = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimNot); + DataType::Type::kReference); HInstruction* c1 = graph_->GetIntConstant(1); HInstruction* c2 = graph_->GetIntConstant(10); - HInstruction* add1 = new (&allocator_) HAdd(Primitive::kPrimInt, c1, c2); - HInstruction* add2 = new (&allocator_) HAdd(Primitive::kPrimInt, add1, c2); - HInstruction* mul = new (&allocator_) HMul(Primitive::kPrimInt, add1, add2); + HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, c1, c2); + HInstruction* add2 = new (&allocator_) HAdd(DataType::Type::kInt32, add1, c2); + HInstruction* mul = new (&allocator_) HMul(DataType::Type::kInt32, add1, add2); HInstruction* div_check = new (&allocator_) HDivZeroCheck(add2, 0); - HInstruction* div = new (&allocator_) HDiv(Primitive::kPrimInt, add1, div_check, 0); - HInstruction* array_get1 = new (&allocator_) HArrayGet(array, add1, Primitive::kPrimInt, 0); - HInstruction* array_set1 = new (&allocator_) HArraySet(array, add1, add2, Primitive::kPrimInt, 0); - HInstruction* array_get2 = new (&allocator_) HArrayGet(array, add1, Primitive::kPrimInt, 0); - HInstruction* array_set2 = new (&allocator_) HArraySet(array, add1, add2, Primitive::kPrimInt, 0); + HInstruction* div = new (&allocator_) HDiv(DataType::Type::kInt32, add1, div_check, 0); + HInstruction* array_get1 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0); + HInstruction* array_set1 = + new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0); + HInstruction* array_get2 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0); + HInstruction* array_set2 = + new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0); DCHECK(div_check->CanThrow()); @@ -204,37 +206,41 @@ class SchedulerTest : public CommonCompilerTest { HInstruction* arr = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimNot); + DataType::Type::kReference); HInstruction* i = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, - Primitive::kPrimInt); + DataType::Type::kInt32); HInstruction* j = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, - Primitive::kPrimInt); + DataType::Type::kInt32); HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, - Primitive::kPrimNot); + DataType::Type::kReference); HInstruction* c0 = graph_->GetIntConstant(0); HInstruction* c1 = graph_->GetIntConstant(1); - HInstruction* add0 = new (&allocator_) HAdd(Primitive::kPrimInt, i, c0); - HInstruction* add1 = new (&allocator_) HAdd(Primitive::kPrimInt, i, c1); - HInstruction* sub0 = new (&allocator_) HSub(Primitive::kPrimInt, i, c0); - HInstruction* sub1 = new (&allocator_) HSub(Primitive::kPrimInt, i, c1); - HInstruction* arr_set_0 = new (&allocator_) HArraySet(arr, c0, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set_1 = new (&allocator_) HArraySet(arr, c1, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set_i = new (&allocator_) HArraySet(arr, i, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set_add0 = new (&allocator_) HArraySet(arr, add0, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set_add1 = new (&allocator_) HArraySet(arr, add1, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set_sub0 = new (&allocator_) HArraySet(arr, sub0, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set_sub1 = new (&allocator_) HArraySet(arr, sub1, c0, Primitive::kPrimInt, 0); - HInstruction* arr_set_j = new (&allocator_) HArraySet(arr, j, c0, Primitive::kPrimInt, 0); + HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c0); + HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c1); + HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, i, c0); + HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, i, c1); + HInstruction* arr_set_0 = new (&allocator_) HArraySet(arr, c0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_1 = new (&allocator_) HArraySet(arr, c1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_i = new (&allocator_) HArraySet(arr, i, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_add0 = + new (&allocator_) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_add1 = + new (&allocator_) HArraySet(arr, add1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_sub0 = + new (&allocator_) HArraySet(arr, sub0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_sub1 = + new (&allocator_) HArraySet(arr, sub1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_j = new (&allocator_) HArraySet(arr, j, c0, DataType::Type::kInt32, 0); HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object, c1, nullptr, - Primitive::kPrimInt, + DataType::Type::kInt32, MemberOffset(10), false, kUnknownFieldIndex, diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc index e220d32344..827b5913af 100644 --- a/compiler/optimizing/select_generator.cc +++ b/compiler/optimizing/select_generator.cc @@ -140,11 +140,11 @@ void HSelectGenerator::Run() { false_value, if_instruction->GetDexPc()); if (both_successors_return) { - if (true_value->GetType() == Primitive::kPrimNot) { - DCHECK(false_value->GetType() == Primitive::kPrimNot); + if (true_value->GetType() == DataType::Type::kReference) { + DCHECK(false_value->GetType() == DataType::Type::kReference); ReferenceTypePropagation::FixUpInstructionType(select, handle_scope_); } - } else if (phi->GetType() == Primitive::kPrimNot) { + } else if (phi->GetType() == DataType::Type::kReference) { select->SetReferenceTypeInfo(phi->GetReferenceTypeInfo()); } block->InsertInstructionBefore(select, if_instruction); diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc index b01bc1ca0d..ac5eb15228 100644 --- a/compiler/optimizing/side_effects_test.cc +++ b/compiler/optimizing/side_effects_test.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "gtest/gtest.h" +#include <gtest/gtest.h> + +#include "data_type.h" #include "nodes.h" -#include "primitive.h" namespace art { @@ -89,18 +90,18 @@ TEST(SideEffectsTest, None) { } TEST(SideEffectsTest, DependencesAndNoDependences) { - // Apply test to each individual primitive type. - for (Primitive::Type type = Primitive::kPrimNot; - type < Primitive::kPrimVoid; - type = Primitive::Type(type + 1)) { - // Same primitive type and access type: proper write/read dep. + // Apply test to each individual data type. + for (DataType::Type type = DataType::Type::kReference; + type < DataType::Type::kVoid; + type = static_cast<DataType::Type>(static_cast<uint8_t>(type) + 1u)) { + // Same data type and access type: proper write/read dep. testWriteAndReadDependence( SideEffects::FieldWriteOfType(type, false), SideEffects::FieldReadOfType(type, false)); testWriteAndReadDependence( SideEffects::ArrayWriteOfType(type), SideEffects::ArrayReadOfType(type)); - // Same primitive type but different access type: no write/read dep. + // Same data type but different access type: no write/read dep. testNoWriteAndReadDependence( SideEffects::FieldWriteOfType(type, false), SideEffects::ArrayReadOfType(type)); @@ -111,31 +112,31 @@ TEST(SideEffectsTest, DependencesAndNoDependences) { } TEST(SideEffectsTest, NoDependences) { - // Different primitive type, same access type: no write/read dep. + // Different data type, same access type: no write/read dep. testNoWriteAndReadDependence( - SideEffects::FieldWriteOfType(Primitive::kPrimInt, false), - SideEffects::FieldReadOfType(Primitive::kPrimDouble, false)); + SideEffects::FieldWriteOfType(DataType::Type::kInt32, false), + SideEffects::FieldReadOfType(DataType::Type::kFloat64, false)); testNoWriteAndReadDependence( - SideEffects::ArrayWriteOfType(Primitive::kPrimInt), - SideEffects::ArrayReadOfType(Primitive::kPrimDouble)); + SideEffects::ArrayWriteOfType(DataType::Type::kInt32), + SideEffects::ArrayReadOfType(DataType::Type::kFloat64)); // Everything different: no write/read dep. testNoWriteAndReadDependence( - SideEffects::FieldWriteOfType(Primitive::kPrimInt, false), - SideEffects::ArrayReadOfType(Primitive::kPrimDouble)); + SideEffects::FieldWriteOfType(DataType::Type::kInt32, false), + SideEffects::ArrayReadOfType(DataType::Type::kFloat64)); testNoWriteAndReadDependence( - SideEffects::ArrayWriteOfType(Primitive::kPrimInt), - SideEffects::FieldReadOfType(Primitive::kPrimDouble, false)); + SideEffects::ArrayWriteOfType(DataType::Type::kInt32), + SideEffects::FieldReadOfType(DataType::Type::kFloat64, false)); } TEST(SideEffectsTest, VolatileDependences) { SideEffects volatile_write = - SideEffects::FieldWriteOfType(Primitive::kPrimInt, /* is_volatile */ true); + SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ true); SideEffects any_write = - SideEffects::FieldWriteOfType(Primitive::kPrimInt, /* is_volatile */ false); + SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false); SideEffects volatile_read = - SideEffects::FieldReadOfType(Primitive::kPrimByte, /* is_volatile */ true); + SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ true); SideEffects any_read = - SideEffects::FieldReadOfType(Primitive::kPrimByte, /* is_volatile */ false); + SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ false); EXPECT_FALSE(volatile_write.MayDependOn(any_read)); EXPECT_TRUE(any_read.MayDependOn(volatile_write)); @@ -151,26 +152,26 @@ TEST(SideEffectsTest, VolatileDependences) { TEST(SideEffectsTest, SameWidthTypesNoAlias) { // Type I/F. testNoWriteAndReadDependence( - SideEffects::FieldWriteOfType(Primitive::kPrimInt, /* is_volatile */ false), - SideEffects::FieldReadOfType(Primitive::kPrimFloat, /* is_volatile */ false)); + SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false), + SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile */ false)); testNoWriteAndReadDependence( - SideEffects::ArrayWriteOfType(Primitive::kPrimInt), - SideEffects::ArrayReadOfType(Primitive::kPrimFloat)); + SideEffects::ArrayWriteOfType(DataType::Type::kInt32), + SideEffects::ArrayReadOfType(DataType::Type::kFloat32)); // Type L/D. testNoWriteAndReadDependence( - SideEffects::FieldWriteOfType(Primitive::kPrimLong, /* is_volatile */ false), - SideEffects::FieldReadOfType(Primitive::kPrimDouble, /* is_volatile */ false)); + SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false), + SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile */ false)); testNoWriteAndReadDependence( - SideEffects::ArrayWriteOfType(Primitive::kPrimLong), - SideEffects::ArrayReadOfType(Primitive::kPrimDouble)); + SideEffects::ArrayWriteOfType(DataType::Type::kInt64), + SideEffects::ArrayReadOfType(DataType::Type::kFloat64)); } TEST(SideEffectsTest, AllWritesAndReads) { SideEffects s = SideEffects::None(); // Keep taking the union of different writes and reads. - for (Primitive::Type type = Primitive::kPrimNot; - type < Primitive::kPrimVoid; - type = Primitive::Type(type + 1)) { + for (DataType::Type type = DataType::Type::kReference; + type < DataType::Type::kVoid; + type = static_cast<DataType::Type>(static_cast<uint8_t>(type) + 1u)) { s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile */ false)); s = s.Union(SideEffects::ArrayWriteOfType(type)); s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile */ false)); @@ -214,41 +215,41 @@ TEST(SideEffectsTest, BitStrings) { SideEffects::AllReads().ToString().c_str()); EXPECT_STREQ( "||||||L|", - SideEffects::FieldWriteOfType(Primitive::kPrimNot, false).ToString().c_str()); + SideEffects::FieldWriteOfType(DataType::Type::kReference, false).ToString().c_str()); EXPECT_STREQ( "||DFJISCBZL|DFJISCBZL||DFJISCBZL|DFJISCBZL|", - SideEffects::FieldWriteOfType(Primitive::kPrimNot, true).ToString().c_str()); + SideEffects::FieldWriteOfType(DataType::Type::kReference, true).ToString().c_str()); EXPECT_STREQ( "|||||Z||", - SideEffects::ArrayWriteOfType(Primitive::kPrimBoolean).ToString().c_str()); + SideEffects::ArrayWriteOfType(DataType::Type::kBool).ToString().c_str()); EXPECT_STREQ( "|||||C||", - SideEffects::ArrayWriteOfType(Primitive::kPrimChar).ToString().c_str()); + SideEffects::ArrayWriteOfType(DataType::Type::kUint16).ToString().c_str()); EXPECT_STREQ( "|||||S||", - SideEffects::ArrayWriteOfType(Primitive::kPrimShort).ToString().c_str()); + SideEffects::ArrayWriteOfType(DataType::Type::kInt16).ToString().c_str()); EXPECT_STREQ( "|||B||||", - SideEffects::FieldReadOfType(Primitive::kPrimByte, false).ToString().c_str()); + SideEffects::FieldReadOfType(DataType::Type::kInt8, false).ToString().c_str()); EXPECT_STREQ( "||D|||||", - SideEffects::ArrayReadOfType(Primitive::kPrimDouble).ToString().c_str()); + SideEffects::ArrayReadOfType(DataType::Type::kFloat64).ToString().c_str()); EXPECT_STREQ( "||J|||||", - SideEffects::ArrayReadOfType(Primitive::kPrimLong).ToString().c_str()); + SideEffects::ArrayReadOfType(DataType::Type::kInt64).ToString().c_str()); EXPECT_STREQ( "||F|||||", - SideEffects::ArrayReadOfType(Primitive::kPrimFloat).ToString().c_str()); + SideEffects::ArrayReadOfType(DataType::Type::kFloat32).ToString().c_str()); EXPECT_STREQ( "||I|||||", - SideEffects::ArrayReadOfType(Primitive::kPrimInt).ToString().c_str()); + SideEffects::ArrayReadOfType(DataType::Type::kInt32).ToString().c_str()); SideEffects s = SideEffects::None(); - s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimChar, /* is_volatile */ false)); - s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimLong, /* is_volatile */ false)); - s = s.Union(SideEffects::ArrayWriteOfType(Primitive::kPrimShort)); - s = s.Union(SideEffects::FieldReadOfType(Primitive::kPrimInt, /* is_volatile */ false)); - s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimFloat)); - s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimDouble)); + s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile */ false)); + s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false)); + s = s.Union(SideEffects::ArrayWriteOfType(DataType::Type::kInt16)); + s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile */ false)); + s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat32)); + s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat64)); EXPECT_STREQ("||DF|I||S|JC|", s.ToString().c_str()); } diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index 50ab11bc23..77b7a228dc 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -17,6 +17,7 @@ #include "ssa_builder.h" #include "bytecode_utils.h" +#include "data_type-inl.h" #include "mirror/class-inl.h" #include "nodes.h" #include "reference_type_propagation.h" @@ -37,10 +38,11 @@ void SsaBuilder::FixNullConstantType() { HInstruction* right = equality_instr->InputAt(1); HInstruction* int_operand = nullptr; - if ((left->GetType() == Primitive::kPrimNot) && (right->GetType() == Primitive::kPrimInt)) { + if ((left->GetType() == DataType::Type::kReference) && + (right->GetType() == DataType::Type::kInt32)) { int_operand = right; - } else if ((right->GetType() == Primitive::kPrimNot) - && (left->GetType() == Primitive::kPrimInt)) { + } else if ((right->GetType() == DataType::Type::kReference) && + (left->GetType() == DataType::Type::kInt32)) { int_operand = left; } else { continue; @@ -122,7 +124,7 @@ static void AddDependentInstructionsToWorklist(HInstruction* instruction, // Find a candidate primitive type for `phi` by merging the type of its inputs. // Return false if conflict is identified. static bool TypePhiFromInputs(HPhi* phi) { - Primitive::Type common_type = phi->GetType(); + DataType::Type common_type = phi->GetType(); for (HInstruction* input : phi->GetInputs()) { if (input->IsPhi() && input->AsPhi()->IsDead()) { @@ -131,26 +133,29 @@ static bool TypePhiFromInputs(HPhi* phi) { return false; } - Primitive::Type input_type = HPhi::ToPhiType(input->GetType()); + DataType::Type input_type = HPhi::ToPhiType(input->GetType()); if (common_type == input_type) { // No change in type. - } else if (Primitive::Is64BitType(common_type) != Primitive::Is64BitType(input_type)) { + } else if (DataType::Is64BitType(common_type) != DataType::Is64BitType(input_type)) { // Types are of different sizes, e.g. int vs. long. Must be a conflict. return false; - } else if (Primitive::IsIntegralType(common_type)) { + } else if (DataType::IsIntegralType(common_type)) { // Previous inputs were integral, this one is not but is of the same size. // This does not imply conflict since some bytecode instruction types are // ambiguous. TypeInputsOfPhi will either type them or detect a conflict. - DCHECK(Primitive::IsFloatingPointType(input_type) || input_type == Primitive::kPrimNot); + DCHECK(DataType::IsFloatingPointType(input_type) || + input_type == DataType::Type::kReference); common_type = input_type; - } else if (Primitive::IsIntegralType(input_type)) { + } else if (DataType::IsIntegralType(input_type)) { // Input is integral, common type is not. Same as in the previous case, if // there is a conflict, it will be detected during TypeInputsOfPhi. - DCHECK(Primitive::IsFloatingPointType(common_type) || common_type == Primitive::kPrimNot); + DCHECK(DataType::IsFloatingPointType(common_type) || + common_type == DataType::Type::kReference); } else { // Combining float and reference types. Clearly a conflict. - DCHECK((common_type == Primitive::kPrimFloat && input_type == Primitive::kPrimNot) || - (common_type == Primitive::kPrimNot && input_type == Primitive::kPrimFloat)); + DCHECK( + (common_type == DataType::Type::kFloat32 && input_type == DataType::Type::kReference) || + (common_type == DataType::Type::kReference && input_type == DataType::Type::kFloat32)); return false; } } @@ -163,8 +168,8 @@ static bool TypePhiFromInputs(HPhi* phi) { // Replace inputs of `phi` to match its type. Return false if conflict is identified. bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) { - Primitive::Type common_type = phi->GetType(); - if (Primitive::IsIntegralType(common_type)) { + DataType::Type common_type = phi->GetType(); + if (DataType::IsIntegralType(common_type)) { // We do not need to retype ambiguous inputs because they are always constructed // with the integral type candidate. if (kIsDebugBuild) { @@ -175,14 +180,15 @@ bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) { // Inputs did not need to be replaced, hence no conflict. Report success. return true; } else { - DCHECK(common_type == Primitive::kPrimNot || Primitive::IsFloatingPointType(common_type)); + DCHECK(common_type == DataType::Type::kReference || + DataType::IsFloatingPointType(common_type)); HInputsRef inputs = phi->GetInputs(); for (size_t i = 0; i < inputs.size(); ++i) { HInstruction* input = inputs[i]; if (input->GetType() != common_type) { // Input type does not match phi's type. Try to retype the input or // generate a suitably typed equivalent. - HInstruction* equivalent = (common_type == Primitive::kPrimNot) + HInstruction* equivalent = (common_type == DataType::Type::kReference) ? GetReferenceTypeEquivalent(input) : GetFloatOrDoubleEquivalent(input, common_type); if (equivalent == nullptr) { @@ -209,7 +215,7 @@ bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) { // it was changed by the algorithm or not. bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist) { DCHECK(phi->IsLive()); - Primitive::Type original_type = phi->GetType(); + DataType::Type original_type = phi->GetType(); // Try to type the phi in two stages: // (1) find a candidate type for the phi by merging types of all its inputs, @@ -270,8 +276,8 @@ void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* wor } static HArrayGet* FindFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { - Primitive::Type type = aget->GetType(); - DCHECK(Primitive::IsIntOrLongType(type)); + DataType::Type type = aget->GetType(); + DCHECK(DataType::IsIntOrLongType(type)); HInstruction* next = aget->GetNext(); if (next != nullptr && next->IsArrayGet()) { HArrayGet* next_aget = next->AsArrayGet(); @@ -283,24 +289,25 @@ static HArrayGet* FindFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { } static HArrayGet* CreateFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { - Primitive::Type type = aget->GetType(); - DCHECK(Primitive::IsIntOrLongType(type)); + DataType::Type type = aget->GetType(); + DCHECK(DataType::IsIntOrLongType(type)); DCHECK(FindFloatOrDoubleEquivalentOfArrayGet(aget) == nullptr); HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetArena()) HArrayGet( aget->GetArray(), aget->GetIndex(), - type == Primitive::kPrimInt ? Primitive::kPrimFloat : Primitive::kPrimDouble, + type == DataType::Type::kInt32 ? DataType::Type::kFloat32 : DataType::Type::kFloat64, aget->GetDexPc()); aget->GetBlock()->InsertInstructionAfter(equivalent, aget); return equivalent; } -static Primitive::Type GetPrimitiveArrayComponentType(HInstruction* array) +static DataType::Type GetPrimitiveArrayComponentType(HInstruction* array) REQUIRES_SHARED(Locks::mutator_lock_) { ReferenceTypeInfo array_type = array->GetReferenceTypeInfo(); DCHECK(array_type.IsPrimitiveArrayClass()); - return array_type.GetTypeHandle()->GetComponentType()->GetPrimitiveType(); + return DataTypeFromPrimitive( + array_type.GetTypeHandle()->GetComponentType()->GetPrimitiveType()); } bool SsaBuilder::FixAmbiguousArrayOps() { @@ -325,10 +332,10 @@ bool SsaBuilder::FixAmbiguousArrayOps() { } HArrayGet* aget_float = FindFloatOrDoubleEquivalentOfArrayGet(aget_int); - Primitive::Type array_type = GetPrimitiveArrayComponentType(array); - DCHECK_EQ(Primitive::Is64BitType(aget_int->GetType()), Primitive::Is64BitType(array_type)); + DataType::Type array_type = GetPrimitiveArrayComponentType(array); + DCHECK_EQ(DataType::Is64BitType(aget_int->GetType()), DataType::Is64BitType(array_type)); - if (Primitive::IsIntOrLongType(array_type)) { + if (DataType::IsIntOrLongType(array_type)) { if (aget_float != nullptr) { // There is a float/double equivalent. We must replace it and re-run // primitive type propagation on all dependent instructions. @@ -337,7 +344,7 @@ bool SsaBuilder::FixAmbiguousArrayOps() { AddDependentInstructionsToWorklist(aget_int, &worklist); } } else { - DCHECK(Primitive::IsFloatingPointType(array_type)); + DCHECK(DataType::IsFloatingPointType(array_type)); if (aget_float == nullptr) { // This is a float/double ArrayGet but there were no typed uses which // would create the typed equivalent. Create it now. @@ -365,13 +372,13 @@ bool SsaBuilder::FixAmbiguousArrayOps() { } HInstruction* value = aset->GetValue(); - Primitive::Type value_type = value->GetType(); - Primitive::Type array_type = GetPrimitiveArrayComponentType(array); - DCHECK_EQ(Primitive::Is64BitType(value_type), Primitive::Is64BitType(array_type)); + DataType::Type value_type = value->GetType(); + DataType::Type array_type = GetPrimitiveArrayComponentType(array); + DCHECK_EQ(DataType::Is64BitType(value_type), DataType::Is64BitType(array_type)); - if (Primitive::IsFloatingPointType(array_type)) { - if (!Primitive::IsFloatingPointType(value_type)) { - DCHECK(Primitive::IsIntegralType(value_type)); + if (DataType::IsFloatingPointType(array_type)) { + if (!DataType::IsFloatingPointType(value_type)) { + DCHECK(DataType::IsIntegralType(value_type)); // Array elements are floating-point but the value has not been replaced // with its floating-point equivalent. The replacement must always // succeed in code validated by the verifier. @@ -390,8 +397,8 @@ bool SsaBuilder::FixAmbiguousArrayOps() { } else { // Array elements are integral and the value assigned to it initially // was integral too. Nothing to do. - DCHECK(Primitive::IsIntegralType(array_type)); - DCHECK(Primitive::IsIntegralType(value_type)); + DCHECK(DataType::IsIntegralType(array_type)); + DCHECK(DataType::IsIntegralType(value_type)); } } } @@ -599,7 +606,7 @@ HDoubleConstant* SsaBuilder::GetDoubleEquivalent(HLongConstant* constant) { * floating point registers and core registers), we need to create a copy of the * phi with a floating point / reference type. */ -HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive::Type type) { +HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, DataType::Type type) { DCHECK(phi->IsLive()) << "Cannot get equivalent of a dead phi since it would create a live one."; // We place the floating point /reference phi next to this phi. @@ -637,9 +644,9 @@ HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive: } HArrayGet* SsaBuilder::GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { - DCHECK(Primitive::IsIntegralType(aget->GetType())); + DCHECK(DataType::IsIntegralType(aget->GetType())); - if (!Primitive::IsIntOrLongType(aget->GetType())) { + if (!DataType::IsIntOrLongType(aget->GetType())) { // Cannot type boolean, char, byte, short to float/double. return nullptr; } @@ -650,7 +657,7 @@ HArrayGet* SsaBuilder::GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { // int/long. Requesting a float/double equivalent should lead to a conflict. if (kIsDebugBuild) { ScopedObjectAccess soa(Thread::Current()); - DCHECK(Primitive::IsIntOrLongType(GetPrimitiveArrayComponentType(aget->GetArray()))); + DCHECK(DataType::IsIntOrLongType(GetPrimitiveArrayComponentType(aget->GetArray()))); } return nullptr; } else { @@ -661,7 +668,7 @@ HArrayGet* SsaBuilder::GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { } } -HInstruction* SsaBuilder::GetFloatOrDoubleEquivalent(HInstruction* value, Primitive::Type type) { +HInstruction* SsaBuilder::GetFloatOrDoubleEquivalent(HInstruction* value, DataType::Type type) { if (value->IsArrayGet()) { return GetFloatOrDoubleEquivalentOfArrayGet(value->AsArrayGet()); } else if (value->IsLongConstant()) { @@ -679,7 +686,7 @@ HInstruction* SsaBuilder::GetReferenceTypeEquivalent(HInstruction* value) { if (value->IsIntConstant() && value->AsIntConstant()->GetValue() == 0) { return graph_->GetNullConstant(); } else if (value->IsPhi()) { - return GetFloatDoubleOrReferenceEquivalentOfPhi(value->AsPhi(), Primitive::kPrimNot); + return GetFloatDoubleOrReferenceEquivalentOfPhi(value->AsPhi(), DataType::Type::kReference); } else { return nullptr; } diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h index 978f113ec4..1819ee568e 100644 --- a/compiler/optimizing/ssa_builder.h +++ b/compiler/optimizing/ssa_builder.h @@ -64,20 +64,20 @@ class SsaBuilder : public ValueObject { GraphAnalysisResult BuildSsa(); - HInstruction* GetFloatOrDoubleEquivalent(HInstruction* instruction, Primitive::Type type); + HInstruction* GetFloatOrDoubleEquivalent(HInstruction* instruction, DataType::Type type); HInstruction* GetReferenceTypeEquivalent(HInstruction* instruction); void MaybeAddAmbiguousArrayGet(HArrayGet* aget) { - Primitive::Type type = aget->GetType(); - DCHECK(!Primitive::IsFloatingPointType(type)); - if (Primitive::IsIntOrLongType(type)) { + DataType::Type type = aget->GetType(); + DCHECK(!DataType::IsFloatingPointType(type)); + if (DataType::IsIntOrLongType(type)) { ambiguous_agets_.push_back(aget); } } void MaybeAddAmbiguousArraySet(HArraySet* aset) { - Primitive::Type type = aset->GetValue()->GetType(); - if (Primitive::IsIntOrLongType(type)) { + DataType::Type type = aset->GetValue()->GetType(); + if (DataType::IsIntOrLongType(type)) { ambiguous_asets_.push_back(aset); } } @@ -111,7 +111,7 @@ class SsaBuilder : public ValueObject { HFloatConstant* GetFloatEquivalent(HIntConstant* constant); HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant); - HPhi* GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive::Type type); + HPhi* GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, DataType::Type type); HArrayGet* GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget); void RemoveRedundantUninitializedStrings(); diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 754a762214..f1f1be25d7 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -480,7 +480,7 @@ size_t LiveInterval::NumberOfSpillSlotsNeeded() const { return definition->AsVecOperation()->GetVectorNumberOfBytes() / kVRegSize; } // Return number of needed spill slots based on type. - return (type_ == Primitive::kPrimLong || type_ == Primitive::kPrimDouble) ? 2 : 1; + return (type_ == DataType::Type::kInt64 || type_ == DataType::Type::kFloat64) ? 2 : 1; } Location LiveInterval::ToLocation() const { diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index a6681575a2..ec4ab31d61 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -262,16 +262,16 @@ class SafepointPosition : public ArenaObject<kArenaAllocSsaLiveness> { class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { public: static LiveInterval* MakeInterval(ArenaAllocator* allocator, - Primitive::Type type, + DataType::Type type, HInstruction* instruction = nullptr) { return new (allocator) LiveInterval(allocator, type, instruction); } - static LiveInterval* MakeFixedInterval(ArenaAllocator* allocator, int reg, Primitive::Type type) { + static LiveInterval* MakeFixedInterval(ArenaAllocator* allocator, int reg, DataType::Type type) { return new (allocator) LiveInterval(allocator, type, nullptr, true, reg, false); } - static LiveInterval* MakeTempInterval(ArenaAllocator* allocator, Primitive::Type type) { + static LiveInterval* MakeTempInterval(ArenaAllocator* allocator, DataType::Type type) { return new (allocator) LiveInterval(allocator, type, nullptr, false, kNoRegister, true); } @@ -608,7 +608,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { return parent_->env_uses_; } - Primitive::Type GetType() const { + DataType::Type GetType() const { return type_; } @@ -783,7 +783,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { size_t NumberOfSpillSlotsNeeded() const; bool IsFloatingPoint() const { - return type_ == Primitive::kPrimFloat || type_ == Primitive::kPrimDouble; + return type_ == DataType::Type::kFloat32 || type_ == DataType::Type::kFloat64; } // Converts the location of the interval to a `Location` object. @@ -970,7 +970,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { private: LiveInterval(ArenaAllocator* allocator, - Primitive::Type type, + DataType::Type type, HInstruction* defined_by = nullptr, bool is_fixed = false, int reg = kNoRegister, @@ -1102,7 +1102,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { EnvUsePositionList env_uses_; // The instruction type this interval corresponds to. - const Primitive::Type type_; + const DataType::Type type_; // Live interval that is the result of a split. LiveInterval* next_sibling_; @@ -1262,7 +1262,7 @@ class SsaLivenessAnalysis : public ValueObject { // the exception handler to its location at the top of the catch block. if (env_holder->CanThrowIntoCatchBlock()) return true; if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true; - return instruction->GetType() == Primitive::kPrimNot; + return instruction->GetType() == DataType::Type::kReference; } void CheckNoLiveInIrreducibleLoop(const HBasicBlock& block) const { diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc index b46060a24a..e89bf6d801 100644 --- a/compiler/optimizing/ssa_liveness_analysis_test.cc +++ b/compiler/optimizing/ssa_liveness_analysis_test.cc @@ -70,7 +70,7 @@ class SsaLivenessAnalysisTest : public testing::Test { TEST_F(SsaLivenessAnalysisTest, TestReturnArg) { HInstruction* arg = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry_->AddInstruction(arg); HBasicBlock* block = CreateSuccessor(entry_); @@ -90,15 +90,15 @@ TEST_F(SsaLivenessAnalysisTest, TestReturnArg) { TEST_F(SsaLivenessAnalysisTest, TestAput) { HInstruction* array = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); HInstruction* index = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(1), 1, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); HInstruction* value = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(2), 2, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32); HInstruction* extra_arg1 = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(3), 3, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32); HInstruction* extra_arg2 = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(4), 4, Primitive::kPrimNot); + graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference); ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 }, allocator_.Adapter()); for (HInstruction* insn : args) { @@ -127,7 +127,7 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) { bounds_check_env->CopyFrom(args); bounds_check->SetRawEnvironment(bounds_check_env); HInstruction* array_set = - new (&allocator_) HArraySet(array, index, value, Primitive::kPrimInt, /* dex_pc */ 0); + new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); block->AddInstruction(array_set); graph_->BuildDominatorTree(); @@ -160,15 +160,15 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) { TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { HInstruction* array = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); HInstruction* index = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(1), 1, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); HInstruction* value = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(2), 2, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32); HInstruction* extra_arg1 = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(3), 3, Primitive::kPrimInt); + graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32); HInstruction* extra_arg2 = new (&allocator_) HParameterValue( - graph_->GetDexFile(), dex::TypeIndex(4), 4, Primitive::kPrimNot); + graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference); ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 }, allocator_.Adapter()); for (HInstruction* insn : args) { @@ -201,7 +201,7 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { deoptimize_env->CopyFrom(args); deoptimize->SetRawEnvironment(deoptimize_env); HInstruction* array_set = - new (&allocator_) HArraySet(array, index, value, Primitive::kPrimInt, /* dex_pc */ 0); + new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); block->AddInstruction(array_set); graph_->BuildDominatorTree(); diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc index f69f417efc..ac998dbcab 100644 --- a/compiler/optimizing/ssa_test.cc +++ b/compiler/optimizing/ssa_test.cc @@ -89,7 +89,7 @@ static void TestCode(const uint16_t* data, const char* expected) { // Test that phis had their type set. for (HBasicBlock* block : graph->GetBlocks()) { for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - ASSERT_NE(it.Current()->GetType(), Primitive::kPrimVoid); + ASSERT_NE(it.Current()->GetType(), DataType::Type::kVoid); } } diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc index 4e256832a2..0271850f29 100644 --- a/compiler/optimizing/x86_memory_gen.cc +++ b/compiler/optimizing/x86_memory_gen.cc @@ -41,7 +41,7 @@ class MemoryOperandVisitor : public HGraphVisitor { } HInstruction* array = array_len->InputAt(0); - DCHECK_EQ(array->GetType(), Primitive::kPrimNot); + DCHECK_EQ(array->GetType(), DataType::Type::kReference); // Don't apply this optimization when the array is nullptr. if (array->IsConstant() || (array->IsNullCheck() && array->InputAt(0)->IsConstant())) { diff --git a/runtime/primitive.cc b/runtime/primitive.cc index 1ec345a359..6f3571c78c 100644 --- a/runtime/primitive.cc +++ b/runtime/primitive.cc @@ -60,9 +60,9 @@ const char* Primitive::BoxedDescriptor(Primitive::Type type) { return kBoxedDescriptors[type]; } -std::ostream& operator<<(std::ostream& os, const Primitive::Type& type) { - int32_t int_type = static_cast<int32_t>(type); - if (type >= Primitive::kPrimNot && type <= Primitive::kPrimVoid) { +std::ostream& operator<<(std::ostream& os, Primitive::Type type) { + uint32_t int_type = static_cast<uint32_t>(type); + if (type <= Primitive::kPrimLast) { os << kTypeNames[int_type]; } else { os << "Type[" << int_type << "]"; diff --git a/runtime/primitive.h b/runtime/primitive.h index a0edaee6fe..a429914d5c 100644 --- a/runtime/primitive.h +++ b/runtime/primitive.h @@ -49,7 +49,7 @@ class Primitive { kPrimLast = kPrimVoid }; - static Type GetType(char type) { + static constexpr Type GetType(char type) { switch (type) { case 'B': return kPrimByte; @@ -74,7 +74,7 @@ class Primitive { } } - static size_t ComponentSizeShift(Type type) { + static constexpr size_t ComponentSizeShift(Type type) { switch (type) { case kPrimVoid: case kPrimBoolean: @@ -86,13 +86,12 @@ class Primitive { case kPrimLong: case kPrimDouble: return 3; case kPrimNot: return ComponentSizeShiftWidth(kObjectReferenceSize); - default: - LOG(FATAL) << "Invalid type " << static_cast<int>(type); - return 0; } + LOG(FATAL) << "Invalid type " << static_cast<int>(type); + UNREACHABLE(); } - static size_t ComponentSize(Type type) { + static constexpr size_t ComponentSize(Type type) { switch (type) { case kPrimVoid: return 0; case kPrimBoolean: @@ -104,10 +103,9 @@ class Primitive { case kPrimLong: case kPrimDouble: return 8; case kPrimNot: return kObjectReferenceSize; - default: - LOG(FATAL) << "Invalid type " << static_cast<int>(type); - return 0; } + LOG(FATAL) << "Invalid type " << static_cast<int>(type); + UNREACHABLE(); } static const char* Descriptor(Type type) { @@ -141,26 +139,6 @@ class Primitive { // Returns the descriptor corresponding to the boxed type of |type|. static const char* BoxedDescriptor(Type type); - static bool IsFloatingPointType(Type type) { - return type == kPrimFloat || type == kPrimDouble; - } - - static bool IsIntegralType(Type type) { - // The Java language does not allow treating boolean as an integral type but - // our bit representation makes it safe. - switch (type) { - case kPrimBoolean: - case kPrimByte: - case kPrimChar: - case kPrimShort: - case kPrimInt: - case kPrimLong: - return true; - default: - return false; - } - } - // Return true if |type| is an numeric type. static constexpr bool IsNumericType(Type type) { switch (type) { @@ -175,6 +153,8 @@ class Primitive { case Primitive::Type::kPrimDouble: return true; case Primitive::Type::kPrimVoid: return false; } + LOG(FATAL) << "Invalid type " << static_cast<int>(type); + UNREACHABLE(); } // Returns true if it is possible to widen type |from| to type |to|. Both |from| and @@ -190,73 +170,15 @@ class Primitive { return IsNumericType(from) && IsNumericType(to) && from <= to; } - static bool IsIntOrLongType(Type type) { - return type == kPrimInt || type == kPrimLong; - } - static bool Is64BitType(Type type) { return type == kPrimLong || type == kPrimDouble; } - // Return the general kind of `type`, fusing integer-like types as kPrimInt. - static Type PrimitiveKind(Type type) { - switch (type) { - case kPrimBoolean: - case kPrimByte: - case kPrimShort: - case kPrimChar: - case kPrimInt: - return kPrimInt; - default: - return type; - } - } - - static int64_t MinValueOfIntegralType(Type type) { - switch (type) { - case kPrimBoolean: - return std::numeric_limits<bool>::min(); - case kPrimByte: - return std::numeric_limits<int8_t>::min(); - case kPrimChar: - return std::numeric_limits<uint16_t>::min(); - case kPrimShort: - return std::numeric_limits<int16_t>::min(); - case kPrimInt: - return std::numeric_limits<int32_t>::min(); - case kPrimLong: - return std::numeric_limits<int64_t>::min(); - default: - LOG(FATAL) << "non integral type"; - } - return 0; - } - - static int64_t MaxValueOfIntegralType(Type type) { - switch (type) { - case kPrimBoolean: - return std::numeric_limits<bool>::max(); - case kPrimByte: - return std::numeric_limits<int8_t>::max(); - case kPrimChar: - return std::numeric_limits<uint16_t>::max(); - case kPrimShort: - return std::numeric_limits<int16_t>::max(); - case kPrimInt: - return std::numeric_limits<int32_t>::max(); - case kPrimLong: - return std::numeric_limits<int64_t>::max(); - default: - LOG(FATAL) << "non integral type"; - } - return 0; - } - private: DISALLOW_IMPLICIT_CONSTRUCTORS(Primitive); }; -std::ostream& operator<<(std::ostream& os, const Primitive::Type& state); +std::ostream& operator<<(std::ostream& os, Primitive::Type state); } // namespace art diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java index c2683acb30..255ce7859b 100644 --- a/test/529-checker-unresolved/src/Main.java +++ b/test/529-checker-unresolved/src/Main.java @@ -45,21 +45,21 @@ public class Main extends UnresolvedSuperClass { } /// CHECK-START: void Main.callUnresolvedStaticFieldAccess() register (before) - /// CHECK: UnresolvedStaticFieldSet field_type:PrimByte - /// CHECK: UnresolvedStaticFieldSet field_type:PrimChar - /// CHECK: UnresolvedStaticFieldSet field_type:PrimInt - /// CHECK: UnresolvedStaticFieldSet field_type:PrimLong - /// CHECK: UnresolvedStaticFieldSet field_type:PrimFloat - /// CHECK: UnresolvedStaticFieldSet field_type:PrimDouble - /// CHECK: UnresolvedStaticFieldSet field_type:PrimNot - - /// CHECK: UnresolvedStaticFieldGet field_type:PrimByte - /// CHECK: UnresolvedStaticFieldGet field_type:PrimChar - /// CHECK: UnresolvedStaticFieldGet field_type:PrimInt - /// CHECK: UnresolvedStaticFieldGet field_type:PrimLong - /// CHECK: UnresolvedStaticFieldGet field_type:PrimFloat - /// CHECK: UnresolvedStaticFieldGet field_type:PrimDouble - /// CHECK: UnresolvedStaticFieldGet field_type:PrimNot + /// CHECK: UnresolvedStaticFieldSet field_type:Int8 + /// CHECK: UnresolvedStaticFieldSet field_type:Uint16 + /// CHECK: UnresolvedStaticFieldSet field_type:Int32 + /// CHECK: UnresolvedStaticFieldSet field_type:Int64 + /// CHECK: UnresolvedStaticFieldSet field_type:Float32 + /// CHECK: UnresolvedStaticFieldSet field_type:Float64 + /// CHECK: UnresolvedStaticFieldSet field_type:Reference + + /// CHECK: UnresolvedStaticFieldGet field_type:Int8 + /// CHECK: UnresolvedStaticFieldGet field_type:Uint16 + /// CHECK: UnresolvedStaticFieldGet field_type:Int32 + /// CHECK: UnresolvedStaticFieldGet field_type:Int64 + /// CHECK: UnresolvedStaticFieldGet field_type:Float32 + /// CHECK: UnresolvedStaticFieldGet field_type:Float64 + /// CHECK: UnresolvedStaticFieldGet field_type:Reference static public void callUnresolvedStaticFieldAccess() { Object o = new Object(); UnresolvedClass.staticByte = (byte)1; @@ -90,21 +90,21 @@ public class Main extends UnresolvedSuperClass { } /// CHECK-START: void Main.callUnresolvedInstanceFieldAccess(UnresolvedClass) register (before) - /// CHECK: UnresolvedInstanceFieldSet field_type:PrimByte - /// CHECK: UnresolvedInstanceFieldSet field_type:PrimChar - /// CHECK: UnresolvedInstanceFieldSet field_type:PrimInt - /// CHECK: UnresolvedInstanceFieldSet field_type:PrimLong - /// CHECK: UnresolvedInstanceFieldSet field_type:PrimFloat - /// CHECK: UnresolvedInstanceFieldSet field_type:PrimDouble - /// CHECK: UnresolvedInstanceFieldSet field_type:PrimNot - - /// CHECK: UnresolvedInstanceFieldGet field_type:PrimByte - /// CHECK: UnresolvedInstanceFieldGet field_type:PrimChar - /// CHECK: UnresolvedInstanceFieldGet field_type:PrimInt - /// CHECK: UnresolvedInstanceFieldGet field_type:PrimLong - /// CHECK: UnresolvedInstanceFieldGet field_type:PrimFloat - /// CHECK: UnresolvedInstanceFieldGet field_type:PrimDouble - /// CHECK: UnresolvedInstanceFieldGet field_type:PrimNot + /// CHECK: UnresolvedInstanceFieldSet field_type:Int8 + /// CHECK: UnresolvedInstanceFieldSet field_type:Uint16 + /// CHECK: UnresolvedInstanceFieldSet field_type:Int32 + /// CHECK: UnresolvedInstanceFieldSet field_type:Int64 + /// CHECK: UnresolvedInstanceFieldSet field_type:Float32 + /// CHECK: UnresolvedInstanceFieldSet field_type:Float64 + /// CHECK: UnresolvedInstanceFieldSet field_type:Reference + + /// CHECK: UnresolvedInstanceFieldGet field_type:Int8 + /// CHECK: UnresolvedInstanceFieldGet field_type:Uint16 + /// CHECK: UnresolvedInstanceFieldGet field_type:Int32 + /// CHECK: UnresolvedInstanceFieldGet field_type:Int64 + /// CHECK: UnresolvedInstanceFieldGet field_type:Float32 + /// CHECK: UnresolvedInstanceFieldGet field_type:Float64 + /// CHECK: UnresolvedInstanceFieldGet field_type:Reference static public void callUnresolvedInstanceFieldAccess(UnresolvedClass c) { Object o = new Object(); c.instanceByte = (byte)1; |