summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/common_compiler_test.h2
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc2
-rw-r--r--compiler/dex/ssa_transformation.cc89
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc3
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc56
-rw-r--r--compiler/optimizing/builder.cc161
-rw-r--r--compiler/optimizing/builder.h7
-rw-r--r--compiler/optimizing/code_generator.cc145
-rw-r--r--compiler/optimizing/code_generator.h36
-rw-r--r--compiler/optimizing/code_generator_arm.cc139
-rw-r--r--compiler/optimizing/code_generator_arm.h35
-rw-r--r--compiler/optimizing/code_generator_arm64.cc177
-rw-r--r--compiler/optimizing/code_generator_arm64.h36
-rw-r--r--compiler/optimizing/code_generator_mips64.cc147
-rw-r--r--compiler/optimizing/code_generator_mips64.h31
-rw-r--r--compiler/optimizing/code_generator_x86.cc132
-rw-r--r--compiler/optimizing/code_generator_x86.h33
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc112
-rw-r--r--compiler/optimizing/code_generator_x86_64.h32
-rw-r--r--compiler/optimizing/graph_visualizer.cc21
-rw-r--r--compiler/optimizing/gvn_test.cc51
-rw-r--r--compiler/optimizing/inliner.cc19
-rw-r--r--compiler/optimizing/instruction_simplifier.cc6
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc40
-rw-r--r--compiler/optimizing/licm_test.cc12
-rw-r--r--compiler/optimizing/nodes.h145
-rw-r--r--compiler/optimizing/nodes_x86.h5
-rw-r--r--compiler/optimizing/optimizing_compiler.cc34
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h11
-rw-r--r--compiler/optimizing/reference_type_propagation.cc54
-rw-r--r--compiler/optimizing/register_allocator_test.cc12
-rw-r--r--compiler/optimizing/stack_map_stream.cc1
-rw-r--r--compiler/utils/arm/assembler_arm32_test.cc2
-rw-r--r--compiler/utils/assembler_test.h202
-rw-r--r--compiler/utils/assembler_test_base.h53
-rw-r--r--compiler/utils/mips64/assembler_mips64.h14
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc380
37 files changed, 2108 insertions, 329 deletions
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index dc2bc5c3f4..67b4428324 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -92,7 +92,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
void UnreserveImageSpace();
- Compiler::Kind compiler_kind_ = kUseOptimizingCompiler ? Compiler::kOptimizing : Compiler::kQuick;
+ Compiler::Kind compiler_kind_ = Compiler::kOptimizing;
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<VerificationResults> verification_results_;
std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1f114cf336..3c5c2fe010 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -148,7 +148,7 @@ void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, in
if (arg1.wide == 0) {
LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
} else {
- RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
+ RegStorage r_tmp = TargetReg(kArg2, kWide);
LoadValueDirectWideFixed(arg1, r_tmp);
}
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 939bf40564..6ed666b9f7 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -535,37 +535,76 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
if (block->visited || block->hidden) {
return;
}
- block->visited = true;
- /* Process this block */
- DoSSAConversion(block);
+ typedef struct {
+ BasicBlock* bb;
+ int32_t* ssa_map;
+ } BasicBlockInfo;
+ BasicBlockInfo temp;
- /* Save SSA map snapshot */
ScopedArenaAllocator allocator(&cu_->arena_stack);
+ ScopedArenaVector<BasicBlockInfo> bi_stack(allocator.Adapter());
+ ScopedArenaVector<BasicBlock*> succ_stack(allocator.Adapter());
+
uint32_t num_vregs = GetNumOfCodeAndTempVRs();
- int32_t* saved_ssa_map = allocator.AllocArray<int32_t>(num_vregs, kArenaAllocDalvikToSSAMap);
- size_t map_size = sizeof(saved_ssa_map[0]) * num_vregs;
- memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
-
- if (block->fall_through != NullBasicBlockId) {
- DoDFSPreOrderSSARename(GetBasicBlock(block->fall_through));
- /* Restore SSA map snapshot */
- memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
- }
- if (block->taken != NullBasicBlockId) {
- DoDFSPreOrderSSARename(GetBasicBlock(block->taken));
- /* Restore SSA map snapshot */
- memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
- }
- if (block->successor_block_list_type != kNotUsed) {
- for (SuccessorBlockInfo* successor_block_info : block->successor_blocks) {
- BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
- DoDFSPreOrderSSARename(succ_bb);
- /* Restore SSA map snapshot */
- memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
+ size_t map_size = sizeof(int32_t) * num_vregs;
+ temp.bb = block;
+ temp.ssa_map = vreg_to_ssa_map_;
+ bi_stack.push_back(temp);
+
+ while (!bi_stack.empty()) {
+ temp = bi_stack.back();
+ bi_stack.pop_back();
+ BasicBlock* b = temp.bb;
+
+ if (b->visited || b->hidden) {
+ continue;
+ }
+ b->visited = true;
+
+ /* Restore SSA map snapshot, except for the first block */
+ if (b != block) {
+ memcpy(vreg_to_ssa_map_, temp.ssa_map, map_size);
+ }
+
+ /* Process this block */
+ DoSSAConversion(b);
+
+ /* If there are no successor, taken, and fall through blocks, continue */
+ if (b->successor_block_list_type == kNotUsed &&
+ b->taken == NullBasicBlockId &&
+ b->fall_through == NullBasicBlockId) {
+ continue;
+ }
+
+ /* Save SSA map snapshot */
+ int32_t* saved_ssa_map =
+ allocator.AllocArray<int32_t>(num_vregs, kArenaAllocDalvikToSSAMap);
+ memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
+
+ if (b->successor_block_list_type != kNotUsed) {
+ for (SuccessorBlockInfo* successor_block_info : b->successor_blocks) {
+ BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
+ succ_stack.push_back(succ_bb);
+ }
+ while (!succ_stack.empty()) {
+ temp.bb = succ_stack.back();
+ succ_stack.pop_back();
+ temp.ssa_map = saved_ssa_map;
+ bi_stack.push_back(temp);
+ }
+ }
+ if (b->taken != NullBasicBlockId) {
+ temp.bb = GetBasicBlock(b->taken);
+ temp.ssa_map = saved_ssa_map;
+ bi_stack.push_back(temp);
+ }
+ if (b->fall_through != NullBasicBlockId) {
+ temp.bb = GetBasicBlock(b->fall_through);
+ temp.ssa_map = saved_ssa_map;
+ bi_stack.push_back(temp);
}
}
- return;
}
} // namespace art
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 960f4d9b7c..2c7c127474 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -965,7 +965,8 @@ class MonotonicValueRange : public ValueRange {
suspend_check->GetEnvironment(), header);
}
- HArrayLength* new_array_length = new (graph->GetArena()) HArrayLength(array);
+ HArrayLength* new_array_length
+ = new (graph->GetArena()) HArrayLength(array, array->GetDexPc());
deopt_block->InsertInstructionBefore(new_array_length, deopt_block->GetLastInstruction());
if (loop_entry_test_block_added) {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 08e1e3682b..ce6dc75741 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -91,7 +91,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block2);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check2 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
HArraySet* array_set = new (&allocator_) HArraySet(
@@ -104,7 +104,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block3);
null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
cmp = new (&allocator_) HLessThan(parameter2, array_length);
if_inst = new (&allocator_) HIf(cmp);
block3->AddInstruction(null_check);
@@ -115,7 +115,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
HBasicBlock* block4 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block4);
null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check4 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
array_set = new (&allocator_) HArraySet(
@@ -128,7 +128,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
HBasicBlock* block5 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block5);
null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check5 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
array_set = new (&allocator_) HArraySet(
@@ -190,7 +190,7 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
graph_->AddBlock(block2);
HInstruction* add = new (&allocator_) HAdd(Primitive::kPrimInt, parameter2, constant_max_int);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* cmp2 = new (&allocator_) HGreaterThanOrEqual(add, array_length);
if_inst = new (&allocator_) HIf(cmp2);
block2->AddInstruction(add);
@@ -245,7 +245,7 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block1);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, array_length);
HIf* if_inst = new (&allocator_) HIf(cmp);
block1->AddInstruction(null_check);
@@ -308,7 +308,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
entry->AddSuccessor(block);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check6 = new (&allocator_)
HBoundsCheck(constant_6, array_length, 0);
HInstruction* array_set = new (&allocator_) HArraySet(
@@ -319,7 +319,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
block->AddInstruction(array_set);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check5 = new (&allocator_)
HBoundsCheck(constant_5, array_length, 0);
array_set = new (&allocator_) HArraySet(
@@ -330,7 +330,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
block->AddInstruction(array_set);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check4 = new (&allocator_)
HBoundsCheck(constant_4, array_length, 0);
array_set = new (&allocator_) HArraySet(
@@ -389,7 +389,7 @@ static HInstruction* BuildSSAGraph1(HGraph* graph,
HPhi* phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt);
HInstruction* null_check = new (allocator) HNullCheck(parameter, 0);
- HInstruction* array_length = new (allocator) HArrayLength(null_check);
+ HInstruction* array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* cmp = nullptr;
if (cond == kCondGE) {
cmp = new (allocator) HGreaterThanOrEqual(phi, array_length);
@@ -406,7 +406,7 @@ static HInstruction* BuildSSAGraph1(HGraph* graph,
phi->AddInput(constant_initial);
null_check = new (allocator) HNullCheck(parameter, 0);
- array_length = new (allocator) HArrayLength(null_check);
+ array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
@@ -489,7 +489,7 @@ static HInstruction* BuildSSAGraph2(HGraph *graph,
graph->AddBlock(block);
entry->AddSuccessor(block);
HInstruction* null_check = new (allocator) HNullCheck(parameter, 0);
- HInstruction* array_length = new (allocator) HArrayLength(null_check);
+ HInstruction* array_length = new (allocator) HArrayLength(null_check, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(new (allocator) HGoto());
@@ -522,7 +522,7 @@ static HInstruction* BuildSSAGraph2(HGraph *graph,
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_minus_1);
null_check = new (allocator) HNullCheck(parameter, 0);
- array_length = new (allocator) HArrayLength(null_check);
+ array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* bounds_check = new (allocator) HBoundsCheck(add, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
@@ -631,7 +631,7 @@ static HInstruction* BuildSSAGraph3(HGraph* graph,
phi->AddInput(constant_initial);
HNullCheck* null_check = new (allocator) HNullCheck(new_array, 0);
- HArrayLength* array_length = new (allocator) HArrayLength(null_check);
+ HArrayLength* array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
@@ -716,7 +716,7 @@ static HInstruction* BuildSSAGraph4(HGraph* graph,
HPhi* phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt);
HInstruction* null_check = new (allocator) HNullCheck(parameter, 0);
- HInstruction* array_length = new (allocator) HArrayLength(null_check);
+ HInstruction* array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* cmp = nullptr;
if (cond == kCondGE) {
cmp = new (allocator) HGreaterThanOrEqual(phi, array_length);
@@ -732,7 +732,7 @@ static HInstruction* BuildSSAGraph4(HGraph* graph,
phi->AddInput(constant_initial);
null_check = new (allocator) HNullCheck(parameter, 0);
- array_length = new (allocator) HArrayLength(null_check);
+ array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* sub = new (allocator) HSub(Primitive::kPrimInt, array_length, phi);
HInstruction* add_minus_1 = new (allocator)
HAdd(Primitive::kPrimInt, sub, constant_minus_1);
@@ -811,7 +811,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
graph_->AddBlock(outer_header);
HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HAdd* add = new (&allocator_) HAdd(Primitive::kPrimInt, array_length, constant_minus_1);
HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi_i, add);
HIf* if_inst = new (&allocator_) HIf(cmp);
@@ -827,7 +827,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
graph_->AddBlock(inner_header);
HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HSub* sub = new (&allocator_) HSub(Primitive::kPrimInt, array_length, phi_i);
add = new (&allocator_) HAdd(Primitive::kPrimInt, sub, constant_minus_1);
cmp = new (&allocator_) HGreaterThanOrEqual(phi_j, add);
@@ -844,20 +844,20 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
HBasicBlock* inner_body_compare = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(inner_body_compare);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check1 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
HArrayGet* array_get_j = new (&allocator_)
- HArrayGet(null_check, bounds_check1, Primitive::kPrimInt);
+ HArrayGet(null_check, bounds_check1, Primitive::kPrimInt, 0);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
inner_body_compare->AddInstruction(bounds_check1);
inner_body_compare->AddInstruction(array_get_j);
HInstruction* j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
HArrayGet* array_get_j_plus_1 = new (&allocator_)
- HArrayGet(null_check, bounds_check2, Primitive::kPrimInt);
+ HArrayGet(null_check, bounds_check2, Primitive::kPrimInt, 0);
cmp = new (&allocator_) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
if_inst = new (&allocator_) HIf(cmp);
inner_body_compare->AddInstruction(j_plus_1);
@@ -873,10 +873,10 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
// temp = array[j+1]
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* bounds_check3 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
array_get_j_plus_1 = new (&allocator_)
- HArrayGet(null_check, bounds_check3, Primitive::kPrimInt);
+ HArrayGet(null_check, bounds_check3, Primitive::kPrimInt, 0);
inner_body_swap->AddInstruction(j_plus_1);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
@@ -884,16 +884,16 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_body_swap->AddInstruction(array_get_j_plus_1);
// array[j+1] = array[j]
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* bounds_check4 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
array_get_j = new (&allocator_)
- HArrayGet(null_check, bounds_check4, Primitive::kPrimInt);
+ HArrayGet(null_check, bounds_check4, Primitive::kPrimInt, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check4);
inner_body_swap->AddInstruction(array_get_j);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* bounds_check5 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
HArraySet* array_set_j_plus_1 = new (&allocator_)
HArraySet(null_check, bounds_check5, array_get_j, Primitive::kPrimInt, 0);
@@ -903,7 +903,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_body_swap->AddInstruction(array_set_j_plus_1);
// array[j] = temp
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* bounds_check6 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
HArraySet* array_set_j = new (&allocator_)
HArraySet(null_check, bounds_check6, array_get_j_plus_1, Primitive::kPrimInt, 0);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index cb36f62235..5acc5fda71 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -940,7 +940,8 @@ HClinitCheck* HGraphBuilder::ProcessClinitCheckForInvoke(
storage_index,
*dex_compilation_unit_->GetDexFile(),
is_outer_class,
- dex_pc);
+ dex_pc,
+ /*needs_access_check*/ false);
current_block_->AddInstruction(load_class);
clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
current_block_->AddInstruction(clinit_check);
@@ -1186,6 +1187,12 @@ void HGraphBuilder::PotentiallySimplifyFakeString(uint16_t original_dex_register
}
}
+static Primitive::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) {
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+ return Primitive::GetType(type[0]);
+}
+
bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -1205,44 +1212,61 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
ArtField* resolved_field =
compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa);
- if (resolved_field == nullptr) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
- return false;
- }
-
- Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot, dex_pc);
- current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc));
+ HInstruction* null_check = new (arena_) HNullCheck(object, dex_pc);
+ current_block_->AddInstruction(null_check);
+
+ Primitive::Type field_type = (resolved_field == nullptr)
+ ? GetFieldAccessType(*dex_file_, field_index)
+ : resolved_field->GetTypeAsPrimitiveType();
if (is_put) {
Temporaries temps(graph_);
- HInstruction* null_check = current_block_->GetLastInstruction();
// We need one temporary for the null check.
temps.Add(null_check);
HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
- current_block_->AddInstruction(new (arena_) HInstanceFieldSet(
- null_check,
- value,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- *dex_file_,
- dex_compilation_unit_->GetDexCache(),
- dex_pc));
+ HInstruction* field_set = nullptr;
+ if (resolved_field == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+ field_set = new (arena_) HUnresolvedInstanceFieldSet(null_check,
+ value,
+ field_type,
+ field_index,
+ dex_pc);
+ } else {
+ field_set = new (arena_) HInstanceFieldSet(null_check,
+ value,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ *dex_file_,
+ dex_compilation_unit_->GetDexCache(),
+ dex_pc);
+ }
+ current_block_->AddInstruction(field_set);
} else {
- current_block_->AddInstruction(new (arena_) HInstanceFieldGet(
- current_block_->GetLastInstruction(),
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- *dex_file_,
- dex_compilation_unit_->GetDexCache(),
- dex_pc));
-
- UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
+ HInstruction* field_get = nullptr;
+ if (resolved_field == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+ field_get = new (arena_) HUnresolvedInstanceFieldGet(null_check,
+ field_type,
+ field_index,
+ dex_pc);
+ } else {
+ field_get = new (arena_) HInstanceFieldGet(null_check,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ *dex_file_,
+ dex_compilation_unit_->GetDexCache(),
+ dex_pc);
+ }
+ current_block_->AddInstruction(field_get);
+ UpdateLocal(source_or_dest_reg, field_get, dex_pc);
}
+
return true;
}
@@ -1282,6 +1306,23 @@ bool HGraphBuilder::IsOutermostCompilingClass(uint16_t type_index) const {
return outer_class.Get() == cls.Get();
}
+void HGraphBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
+ uint32_t dex_pc,
+ bool is_put,
+ Primitive::Type field_type) {
+ uint32_t source_or_dest_reg = instruction.VRegA_21c();
+ uint16_t field_index = instruction.VRegB_21c();
+
+ if (is_put) {
+ HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
+ current_block_->AddInstruction(
+ new (arena_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
+ } else {
+ current_block_->AddInstruction(
+ new (arena_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
+ UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
+ }
+}
bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -1299,10 +1340,13 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
if (resolved_field == nullptr) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
- return false;
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+ Primitive::Type field_type = GetFieldAccessType(*dex_file_, field_index);
+ BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+ return true;
}
+ Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle(
outer_compilation_unit_->GetClassLinker()->FindDexCache(soa.Self(), outer_dex_file)));
@@ -1317,6 +1361,7 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
// The compiler driver cannot currently understand multiple dex caches involved. Just bailout.
return false;
} else {
+ // TODO: This is rather expensive. Perf it and cache the results if needed.
std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
outer_dex_cache.Get(),
GetCompilingClass(),
@@ -1325,7 +1370,9 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
&storage_index);
bool can_easily_access = is_put ? pair.second : pair.first;
if (!can_easily_access) {
- return false;
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
+ BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+ return true;
}
}
@@ -1338,7 +1385,8 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
storage_index,
*dex_compilation_unit_->GetDexFile(),
is_outer_class,
- dex_pc);
+ dex_pc,
+ /*needs_access_check*/ false);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
@@ -1346,8 +1394,6 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
cls = new (arena_) HClinitCheck(constant, dex_pc);
current_block_->AddInstruction(cls);
}
-
- Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
if (is_put) {
// We need to keep the class alive before loading the value.
Temporaries temps(graph_);
@@ -1571,7 +1617,9 @@ void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
SHARED_REQUIRES(Locks::mutator_lock_) {
- if (cls->IsInterface()) {
+ if (cls.Get() == nullptr) {
+ return TypeCheckKind::kUnresolvedCheck;
+ } else if (cls->IsInterface()) {
return TypeCheckKind::kInterfaceCheck;
} else if (cls->IsArrayClass()) {
if (cls->GetComponentType()->IsObjectClass()) {
@@ -1590,11 +1638,20 @@ static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
}
}
-bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
+void HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
uint8_t destination,
uint8_t reference,
uint16_t type_index,
uint32_t dex_pc) {
+ bool type_known_final, type_known_abstract, use_declaring_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(),
+ *dex_compilation_unit_->GetDexFile(),
+ type_index,
+ &type_known_final,
+ &type_known_abstract,
+ &use_declaring_class);
+
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(
@@ -1602,22 +1659,14 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
soa.Self(), *dex_compilation_unit_->GetDexFile())));
Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
- if ((resolved_class.Get() == nullptr) ||
- // TODO: Remove this check once the compiler actually knows which
- // ArtMethod it is compiling.
- (GetCompilingClass() == nullptr) ||
- !GetCompilingClass()->CanAccess(resolved_class.Get())) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
- return false;
- }
-
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot, dex_pc);
HLoadClass* cls = new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
*dex_compilation_unit_->GetDexFile(),
IsOutermostCompilingClass(type_index),
- dex_pc);
+ dex_pc,
+ !can_access);
current_block_->AddInstruction(cls);
// The class needs a temporary before being used by the type check.
@@ -1632,7 +1681,6 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
current_block_->AddInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc));
}
- return true;
}
bool HGraphBuilder::NeedsAccessCheck(uint32_t type_index) const {
@@ -2747,16 +2795,13 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
&type_known_final, &type_known_abstract, &dont_use_is_referrers_class);
- if (!can_access) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
- return false;
- }
current_block_->AddInstruction(new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
*dex_compilation_unit_->GetDexFile(),
IsOutermostCompilingClass(type_index),
- dex_pc));
+ dex_pc,
+ !can_access));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction(), dex_pc);
break;
}
@@ -2783,18 +2828,14 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
uint8_t destination = instruction.VRegA_22c();
uint8_t reference = instruction.VRegB_22c();
uint16_t type_index = instruction.VRegC_22c();
- if (!BuildTypeCheck(instruction, destination, reference, type_index, dex_pc)) {
- return false;
- }
+ BuildTypeCheck(instruction, destination, reference, type_index, dex_pc);
break;
}
case Instruction::CHECK_CAST: {
uint8_t reference = instruction.VRegA_21c();
uint16_t type_index = instruction.VRegB_21c();
- if (!BuildTypeCheck(instruction, -1, reference, type_index, dex_pc)) {
- return false;
- }
+ BuildTypeCheck(instruction, -1, reference, type_index, dex_pc);
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 4c8e3d0442..6910d5195c 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -187,6 +187,10 @@ class HGraphBuilder : public ValueObject {
// Builds an instance field access node and returns whether the instruction is supported.
bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
+ void BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
+ uint32_t dex_pc,
+ bool is_put,
+ Primitive::Type field_type);
// Builds a static field access node and returns whether the instruction is supported.
bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
@@ -232,8 +236,7 @@ class HGraphBuilder : public ValueObject {
uint32_t dex_pc);
// Builds a `HInstanceOf`, or a `HCheckCast` instruction.
- // Returns whether we succeeded in building the instruction.
- bool BuildTypeCheck(const Instruction& instruction,
+ void BuildTypeCheck(const Instruction& instruction,
uint8_t destination,
uint8_t reference,
uint16_t type_index,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index be05691741..1da2a07462 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -413,6 +413,151 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
+void CodeGenerator::CreateUnresolvedFieldLocationSummary(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ const FieldAccessCallingConvention& calling_convention) {
+ bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedInstanceFieldSet();
+ bool is_get = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedStaticFieldGet();
+
+ ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(field_access, LocationSummary::kCall);
+
+ locations->AddTemp(calling_convention.GetFieldIndexLocation());
+
+ if (is_instance) {
+ // Add the `this` object for instance field accesses.
+ locations->SetInAt(0, calling_convention.GetObjectLocation());
+ }
+
+ // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
+ // regardless of the the type. Because of that we forced to special case
+ // the access to floating point values.
+ if (is_get) {
+ if (Primitive::IsFloatingPointType(field_type)) {
+ // The return value will be stored in regular registers while register
+ // allocator expects it in a floating point register.
+ // Note We don't need to request additional temps because the return
+ // register(s) are already blocked due the call and they may overlap with
+ // the input or field index.
+ // The transfer between the two will be done at codegen level.
+ locations->SetOut(calling_convention.GetFpuLocation(field_type));
+ } else {
+ locations->SetOut(calling_convention.GetReturnLocation(field_type));
+ }
+ } else {
+ size_t set_index = is_instance ? 1 : 0;
+ if (Primitive::IsFloatingPointType(field_type)) {
+ // The set value comes from a float location while the calling convention
+ // expects it in a regular register location. Allocate a temp for it and
+ // make the transfer at codegen.
+ AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
+ locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
+ } else {
+ locations->SetInAt(set_index,
+ calling_convention.GetSetValueLocation(field_type, is_instance));
+ }
+ }
+}
+
+void CodeGenerator::GenerateUnresolvedFieldAccess(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc,
+ const FieldAccessCallingConvention& calling_convention) {
+ LocationSummary* locations = field_access->GetLocations();
+
+ MoveConstant(locations->GetTemp(0), field_index);
+
+ bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedInstanceFieldSet();
+ bool is_get = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedStaticFieldGet();
+
+ if (!is_get && Primitive::IsFloatingPointType(field_type)) {
+ // Copy the float value to be set into the calling convention register.
+ // Note that using directly the temp location is problematic as we don't
+ // support temp register pairs. To avoid boilerplate conversion code, use
+ // the location from the calling convention.
+ MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
+ locations->InAt(is_instance ? 1 : 0),
+ (Primitive::Is64BitType(field_type) ? Primitive::kPrimLong : Primitive::kPrimInt));
+ }
+
+ QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings.
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
+ : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
+ break;
+ case Primitive::kPrimByte:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
+ : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
+ break;
+ case Primitive::kPrimShort:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
+ : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
+ break;
+ case Primitive::kPrimChar:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
+ : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
+ : (is_get ? kQuickGet32Static : kQuickSet32Static);
+ break;
+ case Primitive::kPrimNot:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
+ : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
+ : (is_get ? kQuickGet64Static : kQuickSet64Static);
+ break;
+ default:
+ LOG(FATAL) << "Invalid type " << field_type;
+ }
+ InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
+
+ if (is_get && Primitive::IsFloatingPointType(field_type)) {
+ MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
+ }
+}
+
+void CodeGenerator::CreateLoadClassLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location) {
+ ArenaAllocator* allocator = cls->GetBlock()->GetGraph()->GetArena();
+ LocationSummary::CallKind call_kind = cls->NeedsAccessCheck()
+ ? LocationSummary::kCall
+ : (cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
+ LocationSummary* locations = new (allocator) LocationSummary(cls, call_kind);
+ if (cls->NeedsAccessCheck()) {
+ locations->SetInAt(0, Location::NoLocation());
+ locations->AddTemp(runtime_type_index_location);
+ locations->SetOut(runtime_return_location);
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ }
+}
+
+
void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
// The DCHECKS below check that a register is not specified twice in
// the summary. The out location can overlap with an input, so we need
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 5da0e59187..0a3698946e 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -143,6 +143,22 @@ class InvokeDexCallingConventionVisitor {
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
};
+class FieldAccessCallingConvention {
+ public:
+ virtual Location GetObjectLocation() const = 0;
+ virtual Location GetFieldIndexLocation() const = 0;
+ virtual Location GetReturnLocation(Primitive::Type type) const = 0;
+ virtual Location GetSetValueLocation(Primitive::Type type, bool is_instance) const = 0;
+ virtual Location GetFpuLocation(Primitive::Type type) const = 0;
+ virtual ~FieldAccessCallingConvention() {}
+
+ protected:
+ FieldAccessCallingConvention() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConvention);
+};
+
class CodeGenerator {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
@@ -177,6 +193,9 @@ class CodeGenerator {
virtual void Bind(HBasicBlock* block) = 0;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
virtual void MoveConstant(Location destination, int32_t value) = 0;
+ virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0;
+ virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0;
+
virtual Assembler* GetAssembler() = 0;
virtual const Assembler& GetAssembler() const = 0;
virtual size_t GetWordSize() const = 0;
@@ -385,6 +404,23 @@ class CodeGenerator {
void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
+ void CreateUnresolvedFieldLocationSummary(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ const FieldAccessCallingConvention& calling_convention);
+
+ void GenerateUnresolvedFieldAccess(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc,
+ const FieldAccessCallingConvention& calling_convention);
+
+ // TODO: This overlaps a bit with MoveFromReturnRegister. Refactor for a better design.
+ static void CreateLoadClassLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location);
+
void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a7dbb53382..08d8d88ca6 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -473,12 +473,8 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
kNumberOfRegisterPairs,
ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
arraysize(kCoreCalleeSaves)),
- graph->IsDebuggable()
- // If the graph is debuggable, we need to save the fpu registers ourselves,
- // as the stubs do not do it.
- ? 0
- : ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
- arraysize(kFpuCalleeSaves)),
+ ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
+ arraysize(kFpuCalleeSaves)),
compiler_options,
stats),
block_labels_(nullptr),
@@ -611,7 +607,12 @@ void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
}
blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
+ }
+ if (is_baseline || GetGraph()->IsDebuggable()) {
+ // Stubs do not save callee-save floating point registers. If the graph
+ // is debuggable, we need to deal with these registers differently. For
+ // now, just block them.
for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
}
@@ -906,6 +907,10 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
Primitive::kPrimInt);
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
+ } else if (source.IsFpuRegisterPair()) {
+ __ vmovrrd(destination.AsRegisterPairLow<Register>(),
+ destination.AsRegisterPairHigh<Register>(),
+ FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
} else {
DCHECK(source.IsDoubleStackSlot());
DCHECK(ExpectedPairLayout(destination));
@@ -917,6 +922,10 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
__ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
SP,
source.GetStackIndex());
+ } else if (source.IsRegisterPair()) {
+ __ vmovdrr(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
+ source.AsRegisterPairLow<Register>(),
+ source.AsRegisterPairHigh<Register>());
} else {
UNIMPLEMENTED(FATAL);
}
@@ -1038,6 +1047,25 @@ void CodeGeneratorARM::MoveConstant(Location location, int32_t value) {
__ LoadImmediate(location.AsRegister<Register>(), value);
}
+void CodeGeneratorARM::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ if (Primitive::Is64BitType(dst_type)) {
+ Move64(dst, src);
+ } else {
+ Move32(dst, src);
+ }
+}
+
+void CodeGeneratorARM::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else if (location.IsRegisterPair()) {
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
@@ -3605,6 +3633,74 @@ void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instructi
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderARM::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
? LocationSummary::kCallOnSlowPath
@@ -4372,17 +4468,24 @@ void ParallelMoveResolverARM::RestoreScratch(int reg) {
}
void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(R0));
}
void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
Register out = locations->Out().AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
if (cls->IsReferrersClass()) {
@@ -4508,6 +4611,7 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kArrayObjectCheck:
call_kind = LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -4548,10 +4652,11 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
__ CompareAndBranchIfZero(obj, &zero);
}
- // In case of an interface check, we put the object class into the object register.
+ // In case of an interface/unresolved check, we put the object class into the object register.
// This is safe, as the register is caller-save, and the object must be in another
// register if it survives the runtime call.
- Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
+ (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
? obj
: out;
__ LoadFromOffset(kLoadWord, target, obj, class_offset);
@@ -4632,7 +4737,7 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
-
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default: {
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
@@ -4673,6 +4778,7 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -4777,6 +4883,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
__ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel());
break;
}
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default:
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 111112e9b2..16d1d383b4 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -96,6 +96,38 @@ class InvokeDexCallingConventionVisitorARM : public InvokeDexCallingConventionVi
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM);
};
+class FieldAccessCallingConventionARM : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionARM() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(R1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(R0);
+ }
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(R0, R1)
+ : Location::RegisterLocation(R0);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(R2, R3)
+ : (is_instance
+ ? Location::RegisterLocation(R2)
+ : Location::RegisterLocation(R1));
+ }
+ Location GetFpuLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::FpuRegisterPairLocation(S0, S1)
+ : Location::FpuRegisterLocation(S0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM);
+};
+
class ParallelMoveResolverARM : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen)
@@ -225,6 +257,9 @@ class CodeGeneratorARM : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 78ecfdec10..415b37ddcf 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -19,7 +19,6 @@
#include "arch/arm64/instruction_set_features_arm64.h"
#include "art_method.h"
#include "code_generator_utils.h"
-#include "common_arm64.h"
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -119,11 +118,8 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
- CPURegList fp_list = CPURegList(
- CPURegister::kFPRegister,
- kDRegSize,
- register_set->GetFloatingPointRegisters()
- & (~(codegen->GetGraph()->IsDebuggable() ? 0 : callee_saved_fp_registers.list())));
+ CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
+ register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
@@ -583,9 +579,7 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
kNumberOfAllocatableFPRegisters,
kNumberOfAllocatableRegisterPairs,
callee_saved_core_registers.list(),
- // If the graph is debuggable, we need to save the fpu registers ourselves,
- // as the stubs do not do it.
- graph->IsDebuggable() ? 0 : callee_saved_fp_registers.list(),
+ callee_saved_fp_registers.list(),
compiler_options,
stats),
block_labels_(nullptr),
@@ -666,7 +660,7 @@ void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
void ParallelMoveResolverARM64::EmitMove(size_t index) {
DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
- codegen_->MoveLocation(move->GetDestination(), move->GetSource());
+ codegen_->MoveLocation(move->GetDestination(), move->GetSource(), Primitive::kPrimVoid);
}
void CodeGeneratorARM64::GenerateFrameEntry() {
@@ -750,7 +744,9 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
}
if (instruction->IsCurrentMethod()) {
- MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset));
+ MoveLocation(location,
+ Location::DoubleStackSlot(kCurrentMethodStackOffset),
+ Primitive::kPrimVoid);
} else if (locations != nullptr && locations->Out().Equals(location)) {
return;
} else if (instruction->IsIntConstant()
@@ -793,6 +789,14 @@ void CodeGeneratorARM64::MoveConstant(Location location, int32_t value) {
__ Mov(RegisterFrom(location, Primitive::kPrimInt), value);
}
+void CodeGeneratorARM64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
Primitive::Type type = load->GetType();
@@ -860,7 +864,12 @@ void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const {
while (!reserved_core_baseline_registers.IsEmpty()) {
blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true;
}
+ }
+ if (is_baseline || GetGraph()->IsDebuggable()) {
+ // Stubs do not save callee-save floating point registers. If the graph
+ // is debuggable, we need to deal with these registers differently. For
+ // now, just block them.
CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers;
while (!reserved_fp_baseline_registers.IsEmpty()) {
blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true;
@@ -943,7 +952,9 @@ static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
(cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
}
-void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
+void CodeGeneratorARM64::MoveLocation(Location destination,
+ Location source,
+ Primitive::Type dst_type) {
if (source.Equals(destination)) {
return;
}
@@ -952,7 +963,7 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
// locations. When moving from and to a register, the argument type can be
// used to generate 32bit instead of 64bit moves. In debug mode we also
// checks the coherency of the locations and the type.
- bool unspecified_type = (type == Primitive::kPrimVoid);
+ bool unspecified_type = (dst_type == Primitive::kPrimVoid);
if (destination.IsRegister() || destination.IsFpuRegister()) {
if (unspecified_type) {
@@ -962,30 +973,44 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
|| src_cst->IsFloatConstant()
|| src_cst->IsNullConstant()))) {
// For stack slots and 32bit constants, a 64bit type is appropriate.
- type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+ dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
} else {
// If the source is a double stack slot or a 64bit constant, a 64bit
// type is appropriate. Else the source is a register, and since the
// type has not been specified, we chose a 64bit type to force a 64bit
// move.
- type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+ dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
- (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
- CPURegister dst = CPURegisterFrom(destination, type);
+ DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
+ (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
+ CPURegister dst = CPURegisterFrom(destination, dst_type);
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
__ Ldr(dst, StackOperandFrom(source));
} else if (source.IsConstant()) {
- DCHECK(CoherentConstantAndType(source, type));
+ DCHECK(CoherentConstantAndType(source, dst_type));
MoveConstant(dst, source.GetConstant());
+ } else if (source.IsRegister()) {
+ if (destination.IsRegister()) {
+ __ Mov(Register(dst), RegisterFrom(source, dst_type));
+ } else {
+ DCHECK(destination.IsFpuRegister());
+ Primitive::Type source_type = Primitive::Is64BitType(dst_type)
+ ? Primitive::kPrimLong
+ : Primitive::kPrimInt;
+ __ Fmov(FPRegisterFrom(destination, dst_type), RegisterFrom(source, source_type));
+ }
} else {
+ DCHECK(source.IsFpuRegister());
if (destination.IsRegister()) {
- __ Mov(Register(dst), RegisterFrom(source, type));
+ Primitive::Type source_type = Primitive::Is64BitType(dst_type)
+ ? Primitive::kPrimDouble
+ : Primitive::kPrimFloat;
+ __ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type));
} else {
DCHECK(destination.IsFpuRegister());
- __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
+ __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
}
}
} else { // The destination is not a register. It must be a stack slot.
@@ -993,16 +1018,17 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
if (source.IsRegister() || source.IsFpuRegister()) {
if (unspecified_type) {
if (source.IsRegister()) {
- type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
} else {
- type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
- (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
- __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
+ DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
+ (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
+ __ Str(CPURegisterFrom(source, dst_type), StackOperandFrom(destination));
} else if (source.IsConstant()) {
- DCHECK(unspecified_type || CoherentConstantAndType(source, type)) << source << " " << type;
+ DCHECK(unspecified_type || CoherentConstantAndType(source, dst_type))
+ << source << " " << dst_type;
UseScratchRegisterScope temps(GetVIXLAssembler());
HConstant* src_cst = source.GetConstant();
CPURegister temp;
@@ -2362,6 +2388,7 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kArrayObjectCheck:
call_kind = LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -2403,10 +2430,11 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
__ Cbz(obj, &zero);
}
- // In case of an interface check, we put the object class into the object register.
+ // In case of an interface/unresolved check, we put the object class into the object register.
// This is safe, as the register is caller-save, and the object must be in another
// register if it survives the runtime call.
- Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
+ (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
? obj
: out;
__ Ldr(target, HeapOperand(obj.W(), class_offset));
@@ -2487,7 +2515,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
-
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default: {
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
@@ -2528,6 +2556,7 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -2633,6 +2662,7 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
__ Cbnz(temp, slow_path->GetEntryLabel());
break;
}
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default:
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
@@ -2988,14 +3018,23 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ LocationFrom(calling_convention.GetRegisterAt(0)),
+ LocationFrom(vixl::x0));
}
void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(cls->GetLocations()->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
Register out = OutputRegister(cls);
Register current_method = InputRegisterAt(cls, 0);
if (cls->IsReferrersClass()) {
@@ -3508,6 +3547,74 @@ void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruc
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderARM64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7178081bf8..a068b48797 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
#include "code_generator.h"
+#include "common_arm64.h"
#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
#include "nodes.h"
@@ -141,6 +142,34 @@ class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConvention
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64);
};
+class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionARM64() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return helpers::LocationFrom(vixl::x1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return helpers::LocationFrom(vixl::x0);
+ }
+ Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return helpers::LocationFrom(vixl::x0);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? helpers::LocationFrom(vixl::x2)
+ : (is_instance
+ ? helpers::LocationFrom(vixl::x2)
+ : helpers::LocationFrom(vixl::x1));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return helpers::LocationFrom(vixl::d0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
+};
+
class InstructionCodeGeneratorARM64 : public HGraphVisitor {
public:
InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
@@ -334,10 +363,9 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Code generation helpers.
void MoveConstant(vixl::CPURegister destination, HConstant* constant);
void MoveConstant(Location destination, int32_t value) OVERRIDE;
- // The type is optional. When specified it must be coherent with the
- // locations, and is used for optimisation and debugging.
- void MoveLocation(Location destination, Location source,
- Primitive::Type type = Primitive::kPrimVoid);
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
void LoadAcquire(HInstruction* instruction, vixl::CPURegister dst, const vixl::MemOperand& src);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index ad0a39c753..756336d0ee 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -617,7 +617,7 @@ void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
void CodeGeneratorMIPS64::MoveLocation(Location destination,
Location source,
- Primitive::Type type) {
+ Primitive::Type dst_type) {
if (source.Equals(destination)) {
return;
}
@@ -625,7 +625,7 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination,
// A valid move can always be inferred from the destination and source
// locations. When moving from and to a register, the argument type can be
// used to generate 32bit instead of 64bit moves.
- bool unspecified_type = (type == Primitive::kPrimVoid);
+ bool unspecified_type = (dst_type == Primitive::kPrimVoid);
DCHECK_EQ(unspecified_type, false);
if (destination.IsRegister() || destination.IsFpuRegister()) {
@@ -636,21 +636,21 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination,
|| src_cst->IsFloatConstant()
|| src_cst->IsNullConstant()))) {
// For stack slots and 32bit constants, a 64bit type is appropriate.
- type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+ dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
} else {
// If the source is a double stack slot or a 64bit constant, a 64bit
// type is appropriate. Else the source is a register, and since the
// type has not been specified, we chose a 64bit type to force a 64bit
// move.
- type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+ dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
- (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
+ DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
+ (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
// Move to GPR/FPR from stack
LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
- if (Primitive::IsFloatingPointType(type)) {
+ if (Primitive::IsFloatingPointType(dst_type)) {
__ LoadFpuFromOffset(load_type,
destination.AsFpuRegister<FpuRegister>(),
SP,
@@ -665,31 +665,47 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination,
} else if (source.IsConstant()) {
// Move to GPR/FPR from constant
GpuRegister gpr = AT;
- if (!Primitive::IsFloatingPointType(type)) {
+ if (!Primitive::IsFloatingPointType(dst_type)) {
gpr = destination.AsRegister<GpuRegister>();
}
- if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
+ if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) {
__ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
} else {
__ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
}
- if (type == Primitive::kPrimFloat) {
+ if (dst_type == Primitive::kPrimFloat) {
__ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
- } else if (type == Primitive::kPrimDouble) {
+ } else if (dst_type == Primitive::kPrimDouble) {
__ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
}
- } else {
+ } else if (source.IsRegister()) {
if (destination.IsRegister()) {
// Move to GPR from GPR
__ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
} else {
+ DCHECK(destination.IsFpuRegister());
+ if (Primitive::Is64BitType(dst_type)) {
+ __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
+ } else {
+ __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
+ }
+ }
+ } else if (source.IsFpuRegister()) {
+ if (destination.IsFpuRegister()) {
// Move to FPR from FPR
- if (type == Primitive::kPrimFloat) {
+ if (dst_type == Primitive::kPrimFloat) {
__ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
} else {
- DCHECK_EQ(type, Primitive::kPrimDouble);
+ DCHECK_EQ(dst_type, Primitive::kPrimDouble);
__ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
}
+ } else {
+ DCHECK(destination.IsRegister());
+ if (Primitive::Is64BitType(dst_type)) {
+ __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
+ } else {
+ __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
+ }
}
}
} else { // The destination is not a register. It must be a stack slot.
@@ -697,13 +713,13 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination,
if (source.IsRegister() || source.IsFpuRegister()) {
if (unspecified_type) {
if (source.IsRegister()) {
- type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
} else {
- type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
- (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
+ DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
+ (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
// Move to stack from GPR/FPR
StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
if (source.IsRegister()) {
@@ -861,6 +877,14 @@ void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
__ LoadConst32(location.AsRegister<GpuRegister>(), value);
}
+void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
Primitive::Type type = load->GetType();
@@ -2566,15 +2590,24 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke)
}
void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(A0));
}
void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
if (cls->IsReferrersClass()) {
@@ -3118,6 +3151,74 @@ void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instru
HandleFieldSet(instruction, instruction->GetFieldInfo());
}
+void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 16461d6c04..5e8f9e7f30 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -106,6 +106,31 @@ class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, Fpu
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
};
+class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionMIPS64() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(A1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(A0);
+ }
+ Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::RegisterLocation(A0);
+ }
+ Location GetSetValueLocation(
+ Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
+ return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1);
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(F0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS64);
+};
+
class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
@@ -280,11 +305,13 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void Finalize(CodeAllocator* allocator) OVERRIDE;
// Code generation helpers.
-
- void MoveLocation(Location destination, Location source, Primitive::Type type);
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+
void SwapLocations(Location loc1, Location loc2, Primitive::Type type);
// Generate code to invoke a runtime entry point.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3d97132d9b..5ef7de01e1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -827,7 +827,10 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
Primitive::kPrimInt);
} else if (source.IsFpuRegister()) {
- LOG(FATAL) << "Unimplemented";
+ XmmRegister src_reg = source.AsFpuRegister<XmmRegister>();
+ __ movd(destination.AsRegisterPairLow<Register>(), src_reg);
+ __ psrlq(src_reg, Immediate(32));
+ __ movd(destination.AsRegisterPairHigh<Register>(), src_reg);
} else {
// No conflict possible, so just do the moves.
DCHECK(source.IsDoubleStackSlot());
@@ -840,6 +843,15 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
__ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>());
} else if (source.IsDoubleStackSlot()) {
__ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex()));
+ } else if (source.IsRegisterPair()) {
+ size_t elem_size = Primitive::ComponentSize(Primitive::kPrimInt);
+ // Create stack space for 2 elements.
+ __ subl(ESP, Immediate(2 * elem_size));
+ __ movl(Address(ESP, 0), source.AsRegisterPairLow<Register>());
+ __ movl(Address(ESP, elem_size), source.AsRegisterPairHigh<Register>());
+ __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
+ // And remove the temporary stack space we allocated.
+ __ addl(ESP, Immediate(2 * elem_size));
} else {
LOG(FATAL) << "Unimplemented";
}
@@ -966,6 +978,25 @@ void CodeGeneratorX86::MoveConstant(Location location, int32_t value) {
__ movl(location.AsRegister<Register>(), Immediate(value));
}
+void CodeGeneratorX86::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ if (Primitive::Is64BitType(dst_type)) {
+ Move64(dst, src);
+ } else {
+ Move32(dst, src);
+ }
+}
+
+void CodeGeneratorX86::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else if (location.IsRegisterPair()) {
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* successor) {
DCHECK(!successor->IsExitBlock());
@@ -4085,6 +4116,74 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instr
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
+void LocationsBuilderX86::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
? LocationSummary::kCallOnSlowPath
@@ -4890,17 +4989,24 @@ void ParallelMoveResolverX86::RestoreScratch(int reg) {
}
void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(EAX));
}
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
Register out = locations->Out().AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
if (cls->IsReferrersClass()) {
@@ -5022,6 +5128,7 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kArrayObjectCheck:
call_kind = LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -5062,10 +5169,11 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ j(kEqual, &zero);
}
- // In case of an interface check, we put the object class into the object register.
+ // In case of an interface/unresolved check, we put the object class into the object register.
// This is safe, as the register is caller-save, and the object must be in another
// register if it survives the runtime call.
- Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
+ (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
? obj
: out;
__ movl(target, Address(obj, class_offset));
@@ -5174,7 +5282,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
-
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default: {
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
@@ -5216,6 +5324,7 @@ void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
: LocationSummary::kNoCall;
break;
case TypeCheckKind::kInterfaceCheck:
+ case TypeCheckKind::kUnresolvedCheck:
call_kind = LocationSummary::kCall;
break;
case TypeCheckKind::kArrayCheck:
@@ -5342,6 +5451,7 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
__ j(kNotEqual, slow_path->GetEntryLabel());
break;
}
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default:
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 2c2fc65444..ae2d84f945 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -91,6 +91,36 @@ class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVi
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86);
};
+class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionX86() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(ECX);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(EAX);
+ }
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(EAX, EDX)
+ : Location::RegisterLocation(EAX);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(EDX, EBX)
+ : (is_instance
+ ? Location::RegisterLocation(EDX)
+ : Location::RegisterLocation(ECX));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(XMM0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86);
+};
+
class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
@@ -228,6 +258,9 @@ class CodeGeneratorX86 : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 6ea6138668..272d86fe94 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -990,6 +990,19 @@ void CodeGeneratorX86_64::MoveConstant(Location location, int32_t value) {
Load64BitValue(location.AsRegister<CpuRegister>(), static_cast<int64_t>(value));
}
+void CodeGeneratorX86_64::MoveLocation(
+ Location dst, Location src, Primitive::Type dst_type ATTRIBUTE_UNUSED) {
+ Move(dst, src);
+}
+
+void CodeGeneratorX86_64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
DCHECK(!successor->IsExitBlock());
@@ -3849,6 +3862,74 @@ void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instru
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
? LocationSummary::kCallOnSlowPath
@@ -4613,17 +4694,24 @@ void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
}
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(RAX));
}
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
if (cls->IsReferrersClass()) {
@@ -4736,6 +4824,7 @@ void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kArrayObjectCheck:
call_kind = LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -4776,10 +4865,11 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ j(kEqual, &zero);
}
- // In case of an interface check, we put the object class into the object register.
+ // In case of an interface/unresolved check, we put the object class into the object register.
// This is safe, as the register is caller-save, and the object must be in another
// register if it survives the runtime call.
- CpuRegister target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ CpuRegister target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
+ (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
? obj
: out;
__ movl(target, Address(obj, class_offset));
@@ -4893,7 +4983,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
-
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default: {
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
@@ -4934,6 +5024,7 @@ void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -5061,6 +5152,7 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
__ j(kNotEqual, slow_path->GetEntryLabel());
break;
}
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default:
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 197ce63847..ecc8630e6b 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -70,6 +70,35 @@ class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegis
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
};
+class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionX86_64() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(RSI);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(RDI);
+ }
+ Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::RegisterLocation(RAX);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterLocation(RDX)
+ : (is_instance
+ ? Location::RegisterLocation(RDX)
+ : Location::RegisterLocation(RSI));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(XMM0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86_64);
+};
+
+
class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventionVisitor {
public:
InvokeDexCallingConventionVisitorX86_64() {}
@@ -215,6 +244,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 2c6c3b726a..d38f4c862f 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -398,6 +398,22 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
}
@@ -485,8 +501,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("can_be_null")
<< std::boolalpha << instruction->CanBeNull() << std::noboolalpha;
StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
+ } else if (instruction->IsLoadClass()) {
+ StartAttributeStream("klass") << "unresolved";
} else {
- DCHECK(!is_after_pass_) << "Type info should be valid after reference type propagation";
+ DCHECK(!is_after_pass_)
+ << "Expected a valid rti after reference type propagation";
}
}
if (disasm_info_ != nullptr) {
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 32f45b5669..56f2718264 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -47,14 +47,16 @@ TEST(GVNTest, LocalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* to_remove = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimNot,
@@ -62,7 +64,8 @@ TEST(GVNTest, LocalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* different_offset = block->GetLastInstruction();
// Kill the value.
block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
@@ -72,14 +75,16 @@ TEST(GVNTest, LocalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* use_after_kill = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HExit());
@@ -118,7 +123,8 @@ TEST(GVNTest, GlobalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
HBasicBlock* then = new (&allocator) HBasicBlock(graph);
@@ -139,7 +145,8 @@ TEST(GVNTest, GlobalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
then->AddInstruction(new (&allocator) HGoto());
else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimBoolean,
@@ -147,7 +154,8 @@ TEST(GVNTest, GlobalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
else_->AddInstruction(new (&allocator) HGoto());
join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimBoolean,
@@ -155,7 +163,8 @@ TEST(GVNTest, GlobalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
join->AddInstruction(new (&allocator) HExit());
graph->TryBuildingSsa();
@@ -191,7 +200,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
block->AddInstruction(new (&allocator) HGoto());
HBasicBlock* loop_header = new (&allocator) HBasicBlock(graph);
@@ -212,7 +222,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
@@ -225,7 +236,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* field_set = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimBoolean,
@@ -233,7 +245,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HGoto());
@@ -243,7 +256,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* field_get_in_exit = exit->GetLastInstruction();
exit->AddInstruction(new (&allocator) HExit());
@@ -339,7 +353,8 @@ TEST(GVNTest, LoopSideEffects) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
@@ -362,7 +377,8 @@ TEST(GVNTest, LoopSideEffects) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache),
+ dex_cache,
+ 0),
outer_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
@@ -386,7 +402,8 @@ TEST(GVNTest, LoopSideEffects) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache),
+ dex_cache,
+ 0),
inner_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 039029aa52..f3b5f08c7e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -43,6 +43,11 @@ namespace art {
static constexpr size_t kMaximumNumberOfHInstructions = 12;
void HInliner::Run() {
+ const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
+ if ((compiler_options.GetInlineDepthLimit() == 0)
+ || (compiler_options.GetInlineMaxCodeUnits() == 0)) {
+ return;
+ }
if (graph_->IsDebuggable()) {
// For simplicity, we currently never inline when the graph is debuggable. This avoids
// doing some logic in the runtime to discover if a method could have been inlined.
@@ -247,12 +252,14 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
return false;
}
- uint16_t class_def_idx = resolved_method->GetDeclaringClass()->GetDexClassDefIndex();
- if (!compiler_driver_->IsMethodVerifiedWithoutFailures(
- resolved_method->GetDexMethodIndex(), class_def_idx, *resolved_method->GetDexFile())) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
- << " couldn't be verified, so it cannot be inlined";
- return false;
+ if (!resolved_method->GetDeclaringClass()->IsVerified()) {
+ uint16_t class_def_idx = resolved_method->GetDeclaringClass()->GetDexClassDefIndex();
+ if (!compiler_driver_->IsMethodVerifiedWithoutFailures(
+ resolved_method->GetDexMethodIndex(), class_def_idx, *resolved_method->GetDexFile())) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ << " couldn't be verified, so it cannot be inlined";
+ return false;
+ }
}
if (invoke_instruction->IsInvokeStaticOrDirect() &&
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 3287a0a119..86a3ad98b4 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -216,7 +216,11 @@ static bool TypeCheckHasKnownOutcome(HLoadClass* klass, HInstruction* object, bo
}
ReferenceTypeInfo class_rti = klass->GetLoadedClassRTI();
- DCHECK(class_rti.IsValid() && class_rti.IsExact());
+ if (!class_rti.IsValid()) {
+ // Happens when the loaded class is unresolved.
+ return false;
+ }
+ DCHECK(class_rti.IsExact());
if (class_rti.IsSupertypeOf(obj_rti)) {
*outcome = true;
return true;
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 1b4d1614f1..b60905d682 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -404,6 +404,29 @@ static void GenMinMax(LocationSummary* locations,
GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ // Some architectures, such as ARM and MIPS (prior to r6), have a
+ // conditional move instruction which only changes the target
+ // (output) register if the condition is true (MIPS prior to r6 had
+ // MOVF, MOVT, and MOVZ). The SELEQZ and SELNEZ instructions always
+ // change the target (output) register. If the condition is true the
+ // output register gets the contents of the "rs" register; otherwise,
+ // the output register is set to zero. One consequence of this is
+ // that to implement something like "rd = c==0 ? rs : rt" MIPS64r6
+ // needs to use a pair of SELEQZ/SELNEZ instructions. After
+ // executing this pair of instructions one of the output registers
+ // from the pair will necessarily contain zero. Then the code ORs the
+ // output registers from the SELEQZ/SELNEZ instructions to get the
+ // final result.
+ //
+ // The initial test to see if the output register is same as the
+ // first input register is needed to make sure that value in the
+ // first input register isn't clobbered before we've finished
+ // computing the output value. The logic in the corresponding else
+ // clause performs the same task but makes sure the second input
+ // register isn't clobbered in the event that it's the same register
+ // as the output register; the else clause also handles the case
+ // where the output register is distinct from both the first, and the
+ // second input registers.
if (out == lhs) {
__ Slt(AT, rhs, lhs);
if (is_min) {
@@ -512,13 +535,12 @@ void IntrinsicLocationsBuilderMIPS64::VisitMathFloor(HInvoke* invoke) {
CreateFPToFP(arena_, invoke);
}
-// 0x200 - +zero
-// 0x040 - +infinity
-// 0x020 - -zero
-// 0x004 - -infinity
-// 0x002 - quiet NaN
-// 0x001 - signaling NaN
-const constexpr uint16_t CLASS_MASK = 0x267;
+const constexpr uint16_t kFPLeaveUnchanged = kPositiveZero |
+ kPositiveInfinity |
+ kNegativeZero |
+ kNegativeInfinity |
+ kQuietNaN |
+ kSignalingNaN;
void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
@@ -534,7 +556,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
// }
__ ClassD(out, in);
__ Dmfc1(AT, out);
- __ Andi(AT, AT, CLASS_MASK); // +0.0 | +Inf | -0.0 | -Inf | qNaN | sNaN
+ __ Andi(AT, AT, kFPLeaveUnchanged); // +0.0 | +Inf | -0.0 | -Inf | qNaN | sNaN
__ MovD(out, in);
__ Bnezc(AT, &done);
@@ -583,7 +605,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) {
// }
__ ClassD(out, in);
__ Dmfc1(AT, out);
- __ Andi(AT, AT, CLASS_MASK); // +0.0 | +Inf | -0.0 | -Inf | qNaN | sNaN
+ __ Andi(AT, AT, kFPLeaveUnchanged); // +0.0 | +Inf | -0.0 | -Inf | qNaN | sNaN
__ MovD(out, in);
__ Bnezc(AT, &done);
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index ec4a9ec916..558892d01c 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -106,11 +106,11 @@ TEST_F(LICMTest, FieldHoisting) {
NullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
parameter_, Primitive::kPrimLong, MemberOffset(10),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
+ false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
+ false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -127,11 +127,11 @@ TEST_F(LICMTest, NoFieldHoisting) {
NullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
parameter_, Primitive::kPrimLong, MemberOffset(10),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
+ false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
parameter_, get_field, Primitive::kPrimLong, MemberOffset(10),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
+ false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -146,7 +146,7 @@ TEST_F(LICMTest, ArrayHoisting) {
// Populate the loop with instructions: set/get array with different types.
HInstruction* get_array = new (&allocator_) HArrayGet(
- parameter_, constant_, Primitive::kPrimLong);
+ parameter_, constant_, Primitive::kPrimLong, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
HInstruction* set_array = new (&allocator_) HArraySet(
parameter_, constant_, constant_, Primitive::kPrimInt, 0);
@@ -164,7 +164,7 @@ TEST_F(LICMTest, NoArrayHoisting) {
// Populate the loop with instructions: set/get array with same types.
HInstruction* get_array = new (&allocator_) HArrayGet(
- parameter_, constant_, Primitive::kPrimLong);
+ parameter_, constant_, Primitive::kPrimLong, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
HInstruction* set_array = new (&allocator_) HArraySet(
parameter_, get_array, constant_, Primitive::kPrimLong, 0);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d52f5927de..dbf46ce3f4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1067,6 +1067,10 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(Shr, BinaryOperation) \
M(StaticFieldGet, Instruction) \
M(StaticFieldSet, Instruction) \
+ M(UnresolvedInstanceFieldGet, Instruction) \
+ M(UnresolvedInstanceFieldSet, Instruction) \
+ M(UnresolvedStaticFieldGet, Instruction) \
+ M(UnresolvedStaticFieldSet, Instruction) \
M(StoreLocal, Instruction) \
M(Sub, BinaryOperation) \
M(SuspendCheck, Instruction) \
@@ -1711,7 +1715,7 @@ std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
class HInstruction : public ArenaObject<kArenaAllocInstruction> {
public:
- HInstruction(SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ HInstruction(SideEffects side_effects, uint32_t dex_pc)
: previous_(nullptr),
next_(nullptr),
block_(nullptr),
@@ -2068,7 +2072,7 @@ class HBackwardInstructionIterator : public ValueObject {
template<size_t N>
class HTemplateInstruction: public HInstruction {
public:
- HTemplateInstruction<N>(SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ HTemplateInstruction<N>(SideEffects side_effects, uint32_t dex_pc)
: HInstruction(side_effects, dex_pc), inputs_() {}
virtual ~HTemplateInstruction() {}
@@ -2095,7 +2099,7 @@ class HTemplateInstruction: public HInstruction {
template<>
class HTemplateInstruction<0>: public HInstruction {
public:
- explicit HTemplateInstruction<0>(SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ explicit HTemplateInstruction<0>(SideEffects side_effects, uint32_t dex_pc)
: HInstruction(side_effects, dex_pc) {}
virtual ~HTemplateInstruction() {}
@@ -2121,7 +2125,7 @@ class HTemplateInstruction<0>: public HInstruction {
template<intptr_t N>
class HExpression : public HTemplateInstruction<N> {
public:
- HExpression<N>(Primitive::Type type, SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ HExpression<N>(Primitive::Type type, SideEffects side_effects, uint32_t dex_pc)
: HTemplateInstruction<N>(side_effects, dex_pc), type_(type) {}
virtual ~HExpression() {}
@@ -4206,7 +4210,7 @@ class HInstanceFieldGet : public HExpression<1> {
uint32_t field_idx,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HExpression(
field_type,
SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
@@ -4252,7 +4256,7 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
uint32_t field_idx,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HTemplateInstruction(
SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
@@ -4287,7 +4291,7 @@ class HArrayGet : public HExpression<2> {
HArrayGet(HInstruction* array,
HInstruction* index,
Primitive::Type type,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
@@ -4403,7 +4407,7 @@ class HArraySet : public HTemplateInstruction<3> {
class HArrayLength : public HExpression<1> {
public:
- explicit HArrayLength(HInstruction* array, uint32_t dex_pc = kNoDexPc)
+ explicit HArrayLength(HInstruction* array, uint32_t dex_pc)
: HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) {
// Note that arrays do not change length, so the instruction does not
// depend on any write.
@@ -4509,12 +4513,14 @@ class HLoadClass : public HExpression<1> {
uint16_t type_index,
const DexFile& dex_file,
bool is_referrers_class,
- uint32_t dex_pc)
+ uint32_t dex_pc,
+ bool needs_access_check)
: HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls(), dex_pc),
type_index_(type_index),
dex_file_(dex_file),
is_referrers_class_(is_referrers_class),
generate_clinit_check_(false),
+ needs_access_check_(needs_access_check),
loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
SetRawInputAt(0, current_method);
}
@@ -4534,19 +4540,22 @@ class HLoadClass : public HExpression<1> {
bool NeedsEnvironment() const OVERRIDE {
// Will call runtime and load the class if the class is not loaded yet.
// TODO: finer grain decision.
- return !is_referrers_class_;
+ return !is_referrers_class_ || needs_access_check_;
}
bool MustGenerateClinitCheck() const {
return generate_clinit_check_;
}
-
void SetMustGenerateClinitCheck(bool generate_clinit_check) {
generate_clinit_check_ = generate_clinit_check;
}
bool CanCallRuntime() const {
- return MustGenerateClinitCheck() || !is_referrers_class_;
+ return MustGenerateClinitCheck() || !is_referrers_class_ || needs_access_check_;
+ }
+
+ bool NeedsAccessCheck() const {
+ return needs_access_check_;
}
bool CanThrow() const OVERRIDE {
@@ -4582,6 +4591,7 @@ class HLoadClass : public HExpression<1> {
// Whether this instruction must generate the initialization check.
// Used for code generation.
bool generate_clinit_check_;
+ bool needs_access_check_;
ReferenceTypeInfo loaded_class_rti_;
@@ -4665,7 +4675,7 @@ class HStaticFieldGet : public HExpression<1> {
uint32_t field_idx,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HExpression(
field_type,
SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
@@ -4708,7 +4718,7 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
uint32_t field_idx,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HTemplateInstruction(
SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
@@ -4735,6 +4745,112 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
+class HUnresolvedInstanceFieldGet : public HExpression<1> {
+ public:
+ HUnresolvedInstanceFieldGet(HInstruction* obj,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc),
+ field_index_(field_index) {
+ SetRawInputAt(0, obj);
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return GetType(); }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet);
+
+ private:
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet);
+};
+
+class HUnresolvedInstanceFieldSet : public HTemplateInstruction<2> {
+ public:
+ HUnresolvedInstanceFieldSet(HInstruction* obj,
+ HInstruction* value,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
+ field_type_(field_type),
+ field_index_(field_index) {
+ DCHECK_EQ(field_type, value->GetType());
+ SetRawInputAt(0, obj);
+ SetRawInputAt(1, value);
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return field_type_; }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet);
+
+ private:
+ const Primitive::Type field_type_;
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet);
+};
+
+class HUnresolvedStaticFieldGet : public HExpression<0> {
+ public:
+ HUnresolvedStaticFieldGet(Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc),
+ field_index_(field_index) {
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return GetType(); }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedStaticFieldGet);
+
+ private:
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet);
+};
+
+class HUnresolvedStaticFieldSet : public HTemplateInstruction<1> {
+ public:
+ HUnresolvedStaticFieldSet(HInstruction* value,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
+ field_type_(field_type),
+ field_index_(field_index) {
+ DCHECK_EQ(field_type, value->GetType());
+ SetRawInputAt(0, value);
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return field_type_; }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedStaticFieldSet);
+
+ private:
+ const Primitive::Type field_type_;
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldSet);
+};
+
// Implement the move-exception DEX instruction.
class HLoadException : public HExpression<0> {
public:
@@ -4787,6 +4903,7 @@ class HThrow : public HTemplateInstruction<1> {
* or `HCheckCast`.
*/
enum class TypeCheckKind {
+ kUnresolvedCheck, // Check against an unresolved type.
kExactCheck, // Can do a single class compare.
kClassHierarchyCheck, // Can just walk the super class chain.
kAbstractClassCheck, // Can just walk the super class chain, starting one up.
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index ddc5730215..f7cc872419 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -23,7 +23,8 @@ namespace art {
class HX86ComputeBaseMethodAddress : public HExpression<0> {
public:
// Treat the value as an int32_t, but it is really a 32 bit native pointer.
- HX86ComputeBaseMethodAddress() : HExpression(Primitive::kPrimInt, SideEffects::None()) {}
+ HX86ComputeBaseMethodAddress()
+ : HExpression(Primitive::kPrimInt, SideEffects::None(), kNoDexPc) {}
DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
@@ -37,7 +38,7 @@ class HX86LoadFromConstantTable : public HExpression<2> {
HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base,
HConstant* constant,
bool needs_materialization = true)
- : HExpression(constant->GetType(), SideEffects::None()),
+ : HExpression(constant->GetType(), SideEffects::None(), kNoDexPc),
needs_materialization_(needs_materialization) {
SetRawInputAt(0, method_base);
SetRawInputAt(1, constant);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index dbfbd96e39..12d6b03a12 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -310,9 +310,6 @@ class OptimizingCompiler FINAL : public Compiler {
std::unique_ptr<std::ostream> visualizer_output_;
- // Delegate to Quick in case the optimizing compiler cannot compile a method.
- std::unique_ptr<Compiler> delegate_;
-
DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
};
@@ -321,11 +318,9 @@ static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
: Compiler(driver, kMaximumCompilationTimeBeforeWarning),
run_optimizations_(
- driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime),
- delegate_(Create(driver, Compiler::Kind::kQuick)) {}
+ driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime) {}
void OptimizingCompiler::Init() {
- delegate_->Init();
// Enable C1visualizer output. Must be done in Init() because the compiler
// driver is not fully initialized when passed to the compiler's constructor.
CompilerDriver* driver = GetCompilerDriver();
@@ -344,7 +339,6 @@ void OptimizingCompiler::Init() {
}
void OptimizingCompiler::UnInit() const {
- delegate_->UnInit();
}
OptimizingCompiler::~OptimizingCompiler() {
@@ -353,8 +347,7 @@ OptimizingCompiler::~OptimizingCompiler() {
}
}
-void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const {
- delegate_->InitCompilationUnit(cu);
+void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu ATTRIBUTE_UNUSED) const {
}
bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
@@ -569,6 +562,9 @@ static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen)
return linker_patches;
}
+// TODO: The function below uses too much stack space. Bug: 24698147
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
CodeGenerator* codegen,
CompilerDriver* compiler_driver,
@@ -618,6 +614,7 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
soa.Self()->TransitionFromSuspendedToRunnable();
return compiled_method;
}
+#pragma GCC diagnostic pop
CompiledMethod* OptimizingCompiler::CompileBaseline(
CodeGenerator* codegen,
@@ -829,8 +826,12 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
return compiled_method;
}
-static bool HasOnlyUnresolvedFailures(const VerifiedMethod* verified_method) {
- uint32_t unresolved_mask = verifier::VerifyError::VERIFY_ERROR_NO_CLASS;
+static bool CanHandleVerificationFailure(const VerifiedMethod* verified_method) {
+ // For access errors the compiler will use the unresolved helpers (e.g. HInvokeUnresolved).
+ uint32_t unresolved_mask = verifier::VerifyError::VERIFY_ERROR_NO_CLASS
+ | verifier::VerifyError::VERIFY_ERROR_ACCESS_CLASS
+ | verifier::VerifyError::VERIFY_ERROR_ACCESS_FIELD
+ | verifier::VerifyError::VERIFY_ERROR_ACCESS_METHOD;
return (verified_method->GetEncounteredVerificationFailures() & (~unresolved_mask)) == 0;
}
@@ -847,7 +848,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
DCHECK(!verified_method->HasRuntimeThrow());
if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
- || HasOnlyUnresolvedFailures(verified_method)) {
+ || CanHandleVerificationFailure(verified_method)) {
method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
method_idx, jclass_loader, dex_file, dex_cache);
} else {
@@ -858,15 +859,6 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
}
- if (method != nullptr) {
- return method;
- }
- method = delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
- jclass_loader, dex_file, dex_cache);
-
- if (method != nullptr) {
- MaybeRecordStat(MethodCompilationStat::kCompiledQuick);
- }
return method;
}
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index c7701b70ad..6375cf1a56 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -29,11 +29,12 @@ enum MethodCompilationStat {
kAttemptCompilation = 0,
kCompiledBaseline,
kCompiledOptimized,
- kCompiledQuick,
kInlinedInvoke,
kInstructionSimplifications,
kInstructionSimplificationsArch,
kUnresolvedMethod,
+ kUnresolvedField,
+ kUnresolvedFieldNotAFastAccess,
kNotCompiledBranchOutsideMethodCode,
kNotCompiledCannotBuildSSA,
kNotCompiledCantAccesType,
@@ -45,7 +46,6 @@ enum MethodCompilationStat {
kNotCompiledPathological,
kNotCompiledSpaceFilter,
kNotCompiledUnhandledInstruction,
- kNotCompiledUnresolvedField,
kNotCompiledUnsupportedIsa,
kNotCompiledVerifyAtRuntime,
kNotOptimizedDisabled,
@@ -73,14 +73,11 @@ class OptimizingCompilerStats {
compile_stats_[kCompiledBaseline] * 100 / compile_stats_[kAttemptCompilation];
size_t optimized_percent =
compile_stats_[kCompiledOptimized] * 100 / compile_stats_[kAttemptCompilation];
- size_t quick_percent =
- compile_stats_[kCompiledQuick] * 100 / compile_stats_[kAttemptCompilation];
std::ostringstream oss;
oss << "Attempted compilation of " << compile_stats_[kAttemptCompilation] << " methods: ";
oss << unoptimized_percent << "% (" << compile_stats_[kCompiledBaseline] << ") unoptimized, ";
oss << optimized_percent << "% (" << compile_stats_[kCompiledOptimized] << ") optimized, ";
- oss << quick_percent << "% (" << compile_stats_[kCompiledQuick] << ") quick.";
LOG(INFO) << oss.str();
@@ -99,11 +96,12 @@ class OptimizingCompilerStats {
case kAttemptCompilation : return "kAttemptCompilation";
case kCompiledBaseline : return "kCompiledBaseline";
case kCompiledOptimized : return "kCompiledOptimized";
- case kCompiledQuick : return "kCompiledQuick";
case kInlinedInvoke : return "kInlinedInvoke";
case kInstructionSimplifications: return "kInstructionSimplifications";
case kInstructionSimplificationsArch: return "kInstructionSimplificationsArch";
case kUnresolvedMethod : return "kUnresolvedMethod";
+ case kUnresolvedField : return "kUnresolvedField";
+ case kUnresolvedFieldNotAFastAccess : return "kUnresolvedFieldNotAFastAccess";
case kNotCompiledBranchOutsideMethodCode: return "kNotCompiledBranchOutsideMethodCode";
case kNotCompiledCannotBuildSSA : return "kNotCompiledCannotBuildSSA";
case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
@@ -115,7 +113,6 @@ class OptimizingCompilerStats {
case kNotCompiledPathological : return "kNotCompiledPathological";
case kNotCompiledSpaceFilter : return "kNotCompiledSpaceFilter";
case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
- case kNotCompiledUnresolvedField : return "kNotCompiledUnresolvedField";
case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
case kNotCompiledVerifyAtRuntime : return "kNotCompiledVerifyAtRuntime";
case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index fe837e4545..f7a7e420bb 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -52,6 +52,8 @@ class RTPVisitor : public HGraphDelegateVisitor {
void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE;
void VisitInvoke(HInvoke* instr) OVERRIDE;
void VisitArrayGet(HArrayGet* instr) OVERRIDE;
void VisitCheckCast(HCheckCast* instr) OVERRIDE;
@@ -119,8 +121,9 @@ void ReferenceTypePropagation::Run() {
if (instr->IsBoundType()) {
DCHECK(instr->AsBoundType()->GetUpperBound().IsValid());
} else if (instr->IsLoadClass()) {
- DCHECK(instr->AsLoadClass()->GetReferenceTypeInfo().IsExact());
- DCHECK(instr->AsLoadClass()->GetLoadedClassRTI().IsValid());
+ HLoadClass* cls = instr->AsLoadClass();
+ DCHECK(cls->GetReferenceTypeInfo().IsExact());
+ DCHECK(!cls->GetLoadedClassRTI().IsValid() || cls->GetLoadedClassRTI().IsExact());
} else if (instr->IsNullCheck()) {
DCHECK(instr->GetReferenceTypeInfo().IsEqual(instr->InputAt(0)->GetReferenceTypeInfo()))
<< "NullCheck " << instr->GetReferenceTypeInfo()
@@ -166,6 +169,7 @@ static HBoundType* CreateBoundType(ArenaAllocator* arena,
SHARED_REQUIRES(Locks::mutator_lock_) {
ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ DCHECK(class_rti.IsValid());
HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null);
// Narrow the type as much as possible.
if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
@@ -314,6 +318,15 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
return;
}
+ HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!class_rti.IsValid()) {
+ // He have loaded an unresolved class. Don't bother bounding the type.
+ return;
+ }
+ }
// We only need to bound the type if we have uses in the relevant block.
// So start with null and create the HBoundType lazily, only if it's needed.
HBoundType* bound_type = nullptr;
@@ -334,8 +347,6 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
if (instanceOfTrueBlock->Dominates(user->GetBlock())) {
if (bound_type == nullptr) {
ScopedObjectAccess soa(Thread::Current());
- HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
- ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
bound_type = CreateBoundType(
@@ -450,6 +461,22 @@ void RTPVisitor::VisitStaticFieldGet(HStaticFieldGet* instr) {
UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
}
+void RTPVisitor::VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) {
+ // TODO: Use descriptor to get the actual type.
+ if (instr->GetFieldType() == Primitive::kPrimNot) {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
+ }
+}
+
+void RTPVisitor::VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) {
+ // TODO: Use descriptor to get the actual type.
+ if (instr->GetFieldType() == Primitive::kPrimNot) {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
+ }
+}
+
void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache =
@@ -457,10 +484,10 @@ void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
// Get type from dex cache assuming it was populated by the verifier.
mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex());
// TODO: investigating why we are still getting unresolved classes: b/22821472.
- ReferenceTypeInfo::TypeHandle handle = (resolved_class != nullptr)
- ? handles_->NewHandle(resolved_class)
- : object_class_handle_;
- instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true));
+ if (resolved_class != nullptr) {
+ instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
+ handles_->NewHandle(resolved_class), /* is_exact */ true));
+ }
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_class_handle_, /* is_exact */ true));
}
@@ -499,6 +526,15 @@ void RTPVisitor::VisitFakeString(HFakeString* instr) {
}
void RTPVisitor::VisitCheckCast(HCheckCast* check_cast) {
+ HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!class_rti.IsValid()) {
+ // He have loaded an unresolved class. Don't bother bounding the type.
+ return;
+ }
+ }
HInstruction* obj = check_cast->InputAt(0);
HBoundType* bound_type = nullptr;
for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
@@ -506,8 +542,6 @@ void RTPVisitor::VisitCheckCast(HCheckCast* check_cast) {
if (check_cast->StrictlyDominates(user)) {
if (bound_type == nullptr) {
ScopedObjectAccess soa(Thread::Current());
- HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
- ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
if (ShouldCreateBoundType(check_cast->GetNext(), obj, class_rti, check_cast, nullptr)) {
bound_type = CreateBoundType(
GetGraph()->GetArena(),
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 2bb5a8bb08..21b36cb54b 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -488,7 +488,8 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache);
+ dex_cache,
+ 0);
block->AddInstruction(test);
block->AddInstruction(new (allocator) HIf(test));
HBasicBlock* then = new (allocator) HBasicBlock(graph);
@@ -513,14 +514,16 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache);
+ dex_cache,
+ 0);
*input2 = new (allocator) HInstanceFieldGet(parameter,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache);
+ dex_cache,
+ 0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
join->AddInstruction(new (allocator) HExit());
@@ -634,7 +637,8 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache);
+ dex_cache,
+ 0);
block->AddInstruction(*field);
*ret = new (allocator) HReturn(*field);
block->AddInstruction(*ret);
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index f27cecc8fa..a095809ce1 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -24,6 +24,7 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
uint32_t num_dex_registers,
uint8_t inlining_depth) {
DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
+ DCHECK_NE(dex_pc, static_cast<uint32_t>(-1)) << "invalid dex_pc";
current_entry_.dex_pc = dex_pc;
current_entry_.native_pc_offset = native_pc_offset;
current_entry_.register_mask = register_mask;
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index 2a0912e02d..43805966a9 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -287,7 +287,7 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
case 1:
return Base::REG2_TOKEN;
case 2:
- return REG3_TOKEN;
+ return Base::REG3_TOKEN;
case 3:
return REG4_TOKEN;
default:
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 017402dbd3..bd994f46fc 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -92,6 +92,17 @@ class AssemblerTest : public testing::Test {
fmt);
}
+ std::string RepeatRRR(void (Ass::*f)(Reg, Reg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<Reg, Reg, Reg>(f,
+ GetRegisters(),
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
std::string Repeatrb(void (Ass::*f)(Reg, Reg), std::string fmt) {
return RepeatTemplatedRegisters<Reg, Reg>(f,
GetRegisters(),
@@ -118,6 +129,66 @@ class AssemblerTest : public testing::Test {
return RepeatRegisterImm<RegisterView::kUseSecondaryName>(f, imm_bytes, fmt);
}
+ template <typename Reg1Type, typename Reg2Type, typename ImmType,
+ RegisterView Reg1View, RegisterView Reg2View>
+ std::string RepeatRegRegImmBits(void (Ass::*f)(Reg1Type, Reg2Type, ImmType),
+ int imm_bits,
+ std::string fmt) {
+ const std::vector<Reg1Type*> reg1_registers = GetRegisters();
+ const std::vector<Reg2Type*> reg2_registers = GetRegisters();
+ std::string str;
+ std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), imm_bits > 0);
+
+ for (auto reg1 : reg1_registers) {
+ for (auto reg2 : reg2_registers) {
+ for (int64_t imm : imms) {
+ ImmType new_imm = CreateImmediate(imm);
+ (assembler_.get()->*f)(*reg1, *reg2, new_imm);
+ std::string base = fmt;
+
+ std::string reg1_string = GetRegName<Reg1View>(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
+ base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
+ }
+
+ std::string reg2_string = GetRegName<Reg2View>(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
+ base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
+ }
+
+ size_t imm_index = base.find(IMM_TOKEN);
+ if (imm_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << imm;
+ std::string imm_string = sreg.str();
+ base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
+ template <typename Reg1Type, typename Reg2Type, typename ImmType>
+ std::string RepeatRRIb(void (Ass::*f)(Reg1Type, Reg2Type, ImmType),
+ int imm_bits,
+ std::string fmt) {
+ return RepeatRegRegImmBits<Reg1Type,
+ Reg2Type,
+ ImmType,
+ RegisterView::kUsePrimaryName,
+ RegisterView::kUsePrimaryName>(f, imm_bits, fmt);
+ }
+
std::string RepeatFF(void (Ass::*f)(FPReg, FPReg), std::string fmt) {
return RepeatTemplatedRegisters<FPReg, FPReg>(f,
GetFPRegisters(),
@@ -127,14 +198,27 @@ class AssemblerTest : public testing::Test {
fmt);
}
- std::string RepeatFFI(void (Ass::*f)(FPReg, FPReg, const Imm&), size_t imm_bytes, std::string fmt) {
+ std::string RepeatFFF(void (Ass::*f)(FPReg, FPReg, FPReg), std::string fmt) {
+ return RepeatTemplatedRegisters<FPReg, FPReg, FPReg>(f,
+ GetFPRegisters(),
+ GetFPRegisters(),
+ GetFPRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetFPRegName,
+ fmt);
+ }
+
+ std::string RepeatFFI(void (Ass::*f)(FPReg, FPReg, const Imm&),
+ size_t imm_bytes,
+ std::string fmt) {
return RepeatTemplatedRegistersImm<FPReg, FPReg>(f,
- GetFPRegisters(),
- GetFPRegisters(),
- &AssemblerTest::GetFPRegName,
- &AssemblerTest::GetFPRegName,
- imm_bytes,
- fmt);
+ GetFPRegisters(),
+ GetFPRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetFPRegName,
+ imm_bytes,
+ fmt);
}
std::string RepeatFR(void (Ass::*f)(FPReg, Reg), std::string fmt) {
@@ -339,6 +423,63 @@ class AssemblerTest : public testing::Test {
return res;
}
+ const int kMaxBitsExhaustiveTest = 8;
+
+ // Create a couple of immediate values up to the number of bits given.
+ virtual std::vector<int64_t> CreateImmediateValuesBits(const int imm_bits, bool as_uint = false) {
+ CHECK_GT(imm_bits, 0);
+ CHECK_LE(imm_bits, 64);
+ std::vector<int64_t> res;
+
+ if (imm_bits <= kMaxBitsExhaustiveTest) {
+ if (as_uint) {
+ for (uint64_t i = MinInt<uint64_t>(imm_bits); i <= MaxInt<uint64_t>(imm_bits); i++) {
+ res.push_back(static_cast<int64_t>(i));
+ }
+ } else {
+ for (int64_t i = MinInt<int64_t>(imm_bits); i <= MaxInt<int64_t>(imm_bits); i++) {
+ res.push_back(i);
+ }
+ }
+ } else {
+ if (as_uint) {
+ for (uint64_t i = MinInt<uint64_t>(kMaxBitsExhaustiveTest);
+ i <= MaxInt<uint64_t>(kMaxBitsExhaustiveTest);
+ i++) {
+ res.push_back(static_cast<int64_t>(i));
+ }
+ for (int i = 0; i <= imm_bits; i++) {
+ uint64_t j = (MaxInt<uint64_t>(kMaxBitsExhaustiveTest) + 1) +
+ ((MaxInt<uint64_t>(imm_bits) -
+ (MaxInt<uint64_t>(kMaxBitsExhaustiveTest) + 1))
+ * i / imm_bits);
+ res.push_back(static_cast<int64_t>(j));
+ }
+ } else {
+ for (int i = 0; i <= imm_bits; i++) {
+ int64_t j = MinInt<int64_t>(imm_bits) +
+ ((((MinInt<int64_t>(kMaxBitsExhaustiveTest) - 1) -
+ MinInt<int64_t>(imm_bits))
+ * i) / imm_bits);
+ res.push_back(static_cast<int64_t>(j));
+ }
+ for (int64_t i = MinInt<int64_t>(kMaxBitsExhaustiveTest);
+ i <= MaxInt<int64_t>(kMaxBitsExhaustiveTest);
+ i++) {
+ res.push_back(static_cast<int64_t>(i));
+ }
+ for (int i = 0; i <= imm_bits; i++) {
+ int64_t j = (MaxInt<int64_t>(kMaxBitsExhaustiveTest) + 1) +
+ ((MaxInt<int64_t>(imm_bits) - (MaxInt<int64_t>(kMaxBitsExhaustiveTest) + 1))
+ * i / imm_bits);
+ res.push_back(static_cast<int64_t>(j));
+ }
+ }
+ }
+
+ return res;
+ }
+
// Create an immediate from the specific value.
virtual Imm CreateImmediate(int64_t imm_value) = 0;
@@ -406,6 +547,52 @@ class AssemblerTest : public testing::Test {
return str;
}
+ template <typename Reg1, typename Reg2, typename Reg3>
+ std::string RepeatTemplatedRegisters(void (Ass::*f)(Reg1, Reg2, Reg3),
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ const std::vector<Reg3*> reg3_registers,
+ std::string (AssemblerTest::*GetName1)(const Reg1&),
+ std::string (AssemblerTest::*GetName2)(const Reg2&),
+ std::string (AssemblerTest::*GetName3)(const Reg3&),
+ std::string fmt) {
+ std::string str;
+ for (auto reg1 : reg1_registers) {
+ for (auto reg2 : reg2_registers) {
+ for (auto reg3 : reg3_registers) {
+ (assembler_.get()->*f)(*reg1, *reg2, *reg3);
+ std::string base = fmt;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
+ base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
+ }
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
+ base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
+ }
+
+ std::string reg3_string = (this->*GetName3)(*reg3);
+ size_t reg3_index;
+ while ((reg3_index = base.find(REG3_TOKEN)) != std::string::npos) {
+ base.replace(reg3_index, ConstexprStrLen(REG3_TOKEN), reg3_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
template <typename Reg1, typename Reg2>
std::string RepeatTemplatedRegistersImm(void (Ass::*f)(Reg1, Reg2, const Imm&),
const std::vector<Reg1*> reg1_registers,
@@ -500,6 +687,7 @@ class AssemblerTest : public testing::Test {
static constexpr const char* REG_TOKEN = "{reg}";
static constexpr const char* REG1_TOKEN = "{reg1}";
static constexpr const char* REG2_TOKEN = "{reg2}";
+ static constexpr const char* REG3_TOKEN = "{reg3}";
static constexpr const char* IMM_TOKEN = "{imm}";
private:
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
index c8b3fe58a8..43c9d942ed 100644
--- a/compiler/utils/assembler_test_base.h
+++ b/compiler/utils/assembler_test_base.h
@@ -67,12 +67,20 @@ class AssemblerTestInfrastructure {
// This is intended to be run as a test.
bool CheckTools() {
- if (!FileExists(FindTool(assembler_cmd_name_))) {
+ std::string asm_tool = FindTool(assembler_cmd_name_);
+ if (!FileExists(asm_tool)) {
+ LOG(ERROR) << "Could not find assembler from " << assembler_cmd_name_;
+ LOG(ERROR) << "FindTool returned " << asm_tool;
+ FindToolDump(assembler_cmd_name_);
return false;
}
LOG(INFO) << "Chosen assembler command: " << GetAssemblerCommand();
- if (!FileExists(FindTool(objdump_cmd_name_))) {
+ std::string objdump_tool = FindTool(objdump_cmd_name_);
+ if (!FileExists(objdump_tool)) {
+ LOG(ERROR) << "Could not find objdump from " << objdump_cmd_name_;
+ LOG(ERROR) << "FindTool returned " << objdump_tool;
+ FindToolDump(objdump_cmd_name_);
return false;
}
LOG(INFO) << "Chosen objdump command: " << GetObjdumpCommand();
@@ -80,7 +88,11 @@ class AssemblerTestInfrastructure {
// Disassembly is optional.
std::string disassembler = GetDisassembleCommand();
if (disassembler.length() != 0) {
- if (!FileExists(FindTool(disassembler_cmd_name_))) {
+ std::string disassembler_tool = FindTool(disassembler_cmd_name_);
+ if (!FileExists(disassembler_tool)) {
+ LOG(ERROR) << "Could not find disassembler from " << disassembler_cmd_name_;
+ LOG(ERROR) << "FindTool returned " << disassembler_tool;
+ FindToolDump(disassembler_cmd_name_);
return false;
}
LOG(INFO) << "Chosen disassemble command: " << GetDisassembleCommand();
@@ -493,7 +505,7 @@ class AssemblerTestInfrastructure {
std::string error_msg;
if (!Exec(args, &error_msg)) {
EXPECT_TRUE(false) << error_msg;
- return "";
+ UNREACHABLE();
}
std::ifstream in(tmp_file.c_str());
@@ -508,6 +520,39 @@ class AssemblerTestInfrastructure {
return line;
}
+ // For debug purposes.
+ void FindToolDump(std::string tool_name) {
+ // Find the current tool. Wild-card pattern is "arch-string*tool-name".
+ std::string gcc_path = GetRootPath() + GetGCCRootPath();
+ std::vector<std::string> args;
+ args.push_back("find");
+ args.push_back(gcc_path);
+ args.push_back("-name");
+ args.push_back(architecture_string_ + "*" + tool_name);
+ args.push_back("|");
+ args.push_back("sort");
+ std::string tmp_file = GetTmpnam();
+ args.push_back(">");
+ args.push_back(tmp_file);
+ std::string sh_args = Join(args, ' ');
+
+ args.clear();
+ args.push_back("/bin/sh");
+ args.push_back("-c");
+ args.push_back(sh_args);
+
+ std::string error_msg;
+ if (!Exec(args, &error_msg)) {
+ EXPECT_TRUE(false) << error_msg;
+ UNREACHABLE();
+ }
+
+ std::ifstream in(tmp_file.c_str());
+ if (in) {
+ LOG(ERROR) << in.rdbuf();
+ }
+ }
+
// Use a consistent tmpnam, so store it.
std::string GetTmpnam() {
if (tmpnam_.length() == 0) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index c170313728..d083eb4306 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -46,6 +46,20 @@ enum StoreOperandType {
kStoreDoubleword
};
+// Used to test the values returned by ClassS/ClassD.
+enum FPClassMaskType {
+ kSignalingNaN = 0x001,
+ kQuietNaN = 0x002,
+ kNegativeInfinity = 0x004,
+ kNegativeNormal = 0x008,
+ kNegativeSubnormal = 0x010,
+ kNegativeZero = 0x020,
+ kPositiveInfinity = 0x040,
+ kPositiveNormal = 0x080,
+ kPositiveSubnormal = 0x100,
+ kPositiveZero = 0x200,
+};
+
class Mips64Assembler FINAL : public Assembler {
public:
Mips64Assembler() {}
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
new file mode 100644
index 0000000000..2071aca546
--- /dev/null
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -0,0 +1,380 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_mips64.h"
+
+#include <inttypes.h>
+#include <map>
+#include <random>
+
+#include "base/bit_utils.h"
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
+
+namespace art {
+
+struct MIPS64CpuRegisterCompare {
+ bool operator()(const mips64::GpuRegister& a, const mips64::GpuRegister& b) const {
+ return a < b;
+ }
+};
+
+class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
+ mips64::GpuRegister,
+ mips64::FpuRegister,
+ uint32_t> {
+ public:
+ typedef AssemblerTest<mips64::Mips64Assembler,
+ mips64::GpuRegister,
+ mips64::FpuRegister,
+ uint32_t> Base;
+
+ protected:
+ // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
+ std::string GetArchitectureString() OVERRIDE {
+ return "mips64";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " --no-warn -march=mips64r6";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -mmips:isa64r6";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.push_back(new mips64::GpuRegister(mips64::ZERO));
+ registers_.push_back(new mips64::GpuRegister(mips64::AT));
+ registers_.push_back(new mips64::GpuRegister(mips64::V0));
+ registers_.push_back(new mips64::GpuRegister(mips64::V1));
+ registers_.push_back(new mips64::GpuRegister(mips64::A0));
+ registers_.push_back(new mips64::GpuRegister(mips64::A1));
+ registers_.push_back(new mips64::GpuRegister(mips64::A2));
+ registers_.push_back(new mips64::GpuRegister(mips64::A3));
+ registers_.push_back(new mips64::GpuRegister(mips64::A4));
+ registers_.push_back(new mips64::GpuRegister(mips64::A5));
+ registers_.push_back(new mips64::GpuRegister(mips64::A6));
+ registers_.push_back(new mips64::GpuRegister(mips64::A7));
+ registers_.push_back(new mips64::GpuRegister(mips64::T0));
+ registers_.push_back(new mips64::GpuRegister(mips64::T1));
+ registers_.push_back(new mips64::GpuRegister(mips64::T2));
+ registers_.push_back(new mips64::GpuRegister(mips64::T3));
+ registers_.push_back(new mips64::GpuRegister(mips64::S0));
+ registers_.push_back(new mips64::GpuRegister(mips64::S1));
+ registers_.push_back(new mips64::GpuRegister(mips64::S2));
+ registers_.push_back(new mips64::GpuRegister(mips64::S3));
+ registers_.push_back(new mips64::GpuRegister(mips64::S4));
+ registers_.push_back(new mips64::GpuRegister(mips64::S5));
+ registers_.push_back(new mips64::GpuRegister(mips64::S6));
+ registers_.push_back(new mips64::GpuRegister(mips64::S7));
+ registers_.push_back(new mips64::GpuRegister(mips64::T8));
+ registers_.push_back(new mips64::GpuRegister(mips64::T9));
+ registers_.push_back(new mips64::GpuRegister(mips64::K0));
+ registers_.push_back(new mips64::GpuRegister(mips64::K1));
+ registers_.push_back(new mips64::GpuRegister(mips64::GP));
+ registers_.push_back(new mips64::GpuRegister(mips64::SP));
+ registers_.push_back(new mips64::GpuRegister(mips64::S8));
+ registers_.push_back(new mips64::GpuRegister(mips64::RA));
+
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::ZERO), "zero");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::AT), "at");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::V0), "v0");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::V1), "v1");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::A0), "a0");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::A1), "a1");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::A2), "a2");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::A3), "a3");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::A4), "a4");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::A5), "a5");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::A6), "a6");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::A7), "a7");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::T0), "t0");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::T1), "t1");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::T2), "t2");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::T3), "t3");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::S0), "s0");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::S1), "s1");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::S2), "s2");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::S3), "s3");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::S4), "s4");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::S5), "s5");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::S6), "s6");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::S7), "s7");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::T8), "t8");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::T9), "t9");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::K0), "k0");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::K1), "k1");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::GP), "gp");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::SP), "sp");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::S8), "s8");
+ secondary_register_names_.emplace(mips64::GpuRegister(mips64::RA), "ra");
+
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F0));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F1));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F2));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F3));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F4));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F5));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F6));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F7));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F8));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F9));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F10));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F11));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F12));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F13));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F14));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F15));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F16));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F17));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F18));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F19));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F20));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F21));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F22));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F23));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F24));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F25));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F26));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F27));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F28));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F29));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F30));
+ fp_registers_.push_back(new mips64::FpuRegister(mips64::F31));
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(&registers_);
+ STLDeleteElements(&fp_registers_);
+ }
+
+ std::vector<mips64::GpuRegister*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ std::vector<mips64::FpuRegister*> GetFPRegisters() OVERRIDE {
+ return fp_registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) OVERRIDE {
+ CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
+ return secondary_register_names_[reg];
+ }
+
+ private:
+ std::vector<mips64::GpuRegister*> registers_;
+ std::map<mips64::GpuRegister, std::string, MIPS64CpuRegisterCompare> secondary_register_names_;
+
+ std::vector<mips64::FpuRegister*> fp_registers_;
+};
+
+
+TEST_F(AssemblerMIPS64Test, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+
+///////////////////
+// FP Operations //
+///////////////////
+
+TEST_F(AssemblerMIPS64Test, SqrtS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::SqrtS, "sqrt.s ${reg1}, ${reg2}"), "sqrt.s");
+}
+
+TEST_F(AssemblerMIPS64Test, SqrtD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::SqrtD, "sqrt.d ${reg1}, ${reg2}"), "sqrt.d");
+}
+
+TEST_F(AssemblerMIPS64Test, AbsS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::AbsS, "abs.s ${reg1}, ${reg2}"), "abs.s");
+}
+
+TEST_F(AssemblerMIPS64Test, AbsD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::AbsD, "abs.d ${reg1}, ${reg2}"), "abs.d");
+}
+
+TEST_F(AssemblerMIPS64Test, RoundLS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundLS, "round.l.s ${reg1}, ${reg2}"), "round.l.s");
+}
+
+TEST_F(AssemblerMIPS64Test, RoundLD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundLD, "round.l.d ${reg1}, ${reg2}"), "round.l.d");
+}
+
+TEST_F(AssemblerMIPS64Test, RoundWS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundWS, "round.w.s ${reg1}, ${reg2}"), "round.w.s");
+}
+
+TEST_F(AssemblerMIPS64Test, RoundWD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundWD, "round.w.d ${reg1}, ${reg2}"), "round.w.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CeilLS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::CeilLS, "ceil.l.s ${reg1}, ${reg2}"), "ceil.l.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CeilLD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::CeilLD, "ceil.l.d ${reg1}, ${reg2}"), "ceil.l.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CeilWS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::CeilWS, "ceil.w.s ${reg1}, ${reg2}"), "ceil.w.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CeilWD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::CeilWD, "ceil.w.d ${reg1}, ${reg2}"), "ceil.w.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FloorLS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::FloorLS, "floor.l.s ${reg1}, ${reg2}"), "floor.l.s");
+}
+
+TEST_F(AssemblerMIPS64Test, FloorLD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::FloorLD, "floor.l.d ${reg1}, ${reg2}"), "floor.l.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FloorWS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::FloorWS, "floor.w.s ${reg1}, ${reg2}"), "floor.w.s");
+}
+
+TEST_F(AssemblerMIPS64Test, FloorWD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::FloorWD, "floor.w.d ${reg1}, ${reg2}"), "floor.w.d");
+}
+
+TEST_F(AssemblerMIPS64Test, SelS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::SelS, "sel.s ${reg1}, ${reg2}, ${reg3}"), "sel.s");
+}
+
+TEST_F(AssemblerMIPS64Test, SelD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::SelD, "sel.d ${reg1}, ${reg2}, ${reg3}"), "sel.d");
+}
+
+TEST_F(AssemblerMIPS64Test, RintS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::RintS, "rint.s ${reg1}, ${reg2}"), "rint.s");
+}
+
+TEST_F(AssemblerMIPS64Test, RintD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::RintD, "rint.d ${reg1}, ${reg2}"), "rint.d");
+}
+
+TEST_F(AssemblerMIPS64Test, ClassS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::ClassS, "class.s ${reg1}, ${reg2}"), "class.s");
+}
+
+TEST_F(AssemblerMIPS64Test, ClassD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::ClassD, "class.d ${reg1}, ${reg2}"), "class.d");
+}
+
+TEST_F(AssemblerMIPS64Test, MinS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::MinS, "min.s ${reg1}, ${reg2}, ${reg3}"), "min.s");
+}
+
+TEST_F(AssemblerMIPS64Test, MinD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::MinD, "min.d ${reg1}, ${reg2}, ${reg3}"), "min.d");
+}
+
+TEST_F(AssemblerMIPS64Test, MaxS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::MaxS, "max.s ${reg1}, ${reg2}, ${reg3}"), "max.s");
+}
+
+TEST_F(AssemblerMIPS64Test, MaxD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::MaxD, "max.d ${reg1}, ${reg2}, ${reg3}"), "max.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CvtDL) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdl, "cvt.d.l ${reg1}, ${reg2}"), "cvt.d.l");
+}
+
+//////////
+// MISC //
+//////////
+
+TEST_F(AssemblerMIPS64Test, Bitswap) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
+}
+
+TEST_F(AssemblerMIPS64Test, Dbitswap) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Dbitswap, "dbitswap ${reg1}, ${reg2}"), "dbitswap");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsbh) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Dsbh, "dsbh ${reg1}, ${reg2}"), "dsbh");
+}
+
+TEST_F(AssemblerMIPS64Test, Dshd) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Dshd, "dshd ${reg1}, ${reg2}"), "dshd");
+}
+
+TEST_F(AssemblerMIPS64Test, Wsbh) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Wsbh, "wsbh ${reg1}, ${reg2}"), "wsbh");
+}
+
+TEST_F(AssemblerMIPS64Test, Sc) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sc, -9, "sc ${reg1}, {imm}(${reg2})"), "sc");
+}
+
+TEST_F(AssemblerMIPS64Test, Scd) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Scd, -9, "scd ${reg1}, {imm}(${reg2})"), "scd");
+}
+
+TEST_F(AssemblerMIPS64Test, Ll) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Ll, -9, "ll ${reg1}, {imm}(${reg2})"), "ll");
+}
+
+TEST_F(AssemblerMIPS64Test, Lld) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lld, -9, "lld ${reg1}, {imm}(${reg2})"), "lld");
+}
+
+TEST_F(AssemblerMIPS64Test, Rotr) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr");
+}
+
+TEST_F(AssemblerMIPS64Test, Seleqz) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"),
+ "seleqz");
+}
+
+TEST_F(AssemblerMIPS64Test, Selnez) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Selnez, "selnez ${reg1}, ${reg2}, ${reg3}"),
+ "selnez");
+}
+
+TEST_F(AssemblerMIPS64Test, Clz) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Clz, "clz ${reg1}, ${reg2}"), "clz");
+}
+
+TEST_F(AssemblerMIPS64Test, Clo) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Clo, "clo ${reg1}, ${reg2}"), "clo");
+}
+
+TEST_F(AssemblerMIPS64Test, Dclz) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Dclz, "dclz ${reg1}, ${reg2}"), "dclz");
+}
+
+TEST_F(AssemblerMIPS64Test, Dclo) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Dclo, "dclo ${reg1}, ${reg2}"), "dclo");
+}
+
+} // namespace art