summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc7
-rw-r--r--compiler/optimizing/code_generator_mips.cc10
-rw-r--r--compiler/optimizing/code_generator_mips64.cc10
-rw-r--r--compiler/optimizing/code_generator_x86.cc10
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc10
-rw-r--r--compiler/optimizing/instruction_builder.cc14
-rw-r--r--compiler/optimizing/load_store_analysis.h6
-rw-r--r--compiler/optimizing/load_store_analysis_test.cc64
-rw-r--r--compiler/optimizing/nodes.h34
-rw-r--r--compiler/optimizing/nodes_shared.h32
10 files changed, 153 insertions, 44 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 5625f04726..015a6a04d3 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -919,10 +919,9 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
}
ArenaVector<size_t> covered(
loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
- IterationRange<DexInstructionIterator> instructions = code_item.Instructions();
- for (auto it = instructions.begin(); it != instructions.end(); ++it) {
- const uint32_t dex_pc = it.GetDexPC(instructions.begin());
- const Instruction& instruction = *it;
+ for (const DexInstructionPcPair& pair : code_item.Instructions()) {
+ const uint32_t dex_pc = pair.DexPc();
+ const Instruction& instruction = pair.Inst();
if (instruction.IsBranch()) {
uint32_t target = dex_pc + instruction.GetTargetOffset();
CheckCovers(target, graph, code_info, loop_headers, &covered);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index b3fed079d8..60c7a8f2ef 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -9245,6 +9245,16 @@ void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instructio
}
}
+void LocationsBuilderMIPS::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
#undef __
#undef QUICK_ENTRY_POINT
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 53a7f26c81..5292638c07 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -7147,5 +7147,15 @@ void InstructionCodeGeneratorMIPS64::VisitClassTableGet(HClassTableGet* instruct
}
}
+void LocationsBuilderMIPS64::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS64::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
} // namespace mips64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f84dd0045e..a1500825f8 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -7825,6 +7825,16 @@ void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_da
}
}
+void LocationsBuilderX86::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorX86::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
#undef __
} // namespace x86
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 16d1f183a1..db7e53e74b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -6833,6 +6833,16 @@ void InstructionCodeGeneratorX86_64::VisitPackedSwitch(HPackedSwitch* switch_ins
__ jmp(temp_reg);
}
+void LocationsBuilderX86_64::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorX86_64::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
void CodeGeneratorX86_64::Load32BitValue(CpuRegister dest, int32_t value) {
if (value == 0) {
__ xorl(dest, dest);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 902985e4ee..0f0be20961 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -382,16 +382,18 @@ ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
dex_file_->DecodeDebugPositionInfo(&code_item_, Callback::Position, locations);
// Instruction-specific tweaks.
IterationRange<DexInstructionIterator> instructions = code_item_.Instructions();
- for (const Instruction& inst : instructions) {
- switch (inst.Opcode()) {
+ for (DexInstructionIterator it = instructions.begin(); it != instructions.end(); ++it) {
+ switch (it->Opcode()) {
case Instruction::MOVE_EXCEPTION: {
// Stop in native debugger after the exception has been moved.
// The compiler also expects the move at the start of basic block so
// we do not want to interfere by inserting native-debug-info before it.
- locations->ClearBit(inst.GetDexPc(code_item_.insns_));
- const Instruction* next = inst.Next();
- if (DexInstructionIterator(next) != instructions.end()) {
- locations->SetBit(next->GetDexPc(code_item_.insns_));
+ locations->ClearBit(it.DexPc());
+ DexInstructionIterator next = it;
+ ++next;
+ DCHECK(next != it);
+ if (next != instructions.end()) {
+ locations->SetBit(next.DexPc());
}
break;
}
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 5940ee755f..5a1df45914 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -196,8 +196,12 @@ class HeapLocationCollector : public HGraphVisitor {
}
HInstruction* HuntForOriginalReference(HInstruction* ref) const {
+ // An original reference can be transformed by instructions like:
+ // i0 NewArray
+ // i1 HInstruction(i0) <-- NullCheck, BoundType, IntermediateAddress.
+ // i2 ArrayGet(i1, index)
DCHECK(ref != nullptr);
- while (ref->IsNullCheck() || ref->IsBoundType()) {
+ while (ref->IsNullCheck() || ref->IsBoundType() || ref->IsIntermediateAddress()) {
ref = ref->InputAt(0);
}
return ref;
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index 86696d02a1..b41e1e4d00 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -389,4 +389,68 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
}
+TEST_F(LoadStoreAnalysisTest, TestHuntOriginalRef) {
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+
+ // Different ways where orignal array reference are transformed & passed to ArrayGet.
+ // ParameterValue --> ArrayGet
+ // ParameterValue --> BoundType --> ArrayGet
+ // ParameterValue --> BoundType --> NullCheck --> ArrayGet
+ // ParameterValue --> BoundType --> NullCheck --> IntermediateAddress --> ArrayGet
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* array = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
+ HInstruction* array_get1 = new (GetAllocator()) HArrayGet(array,
+ c1,
+ DataType::Type::kInt32,
+ 0);
+
+ HInstruction* bound_type = new (GetAllocator()) HBoundType(array);
+ HInstruction* array_get2 = new (GetAllocator()) HArrayGet(bound_type,
+ c1,
+ DataType::Type::kInt32,
+ 0);
+
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(bound_type, 0);
+ HInstruction* array_get3 = new (GetAllocator()) HArrayGet(null_check,
+ c1,
+ DataType::Type::kInt32,
+ 0);
+
+ HInstruction* inter_addr = new (GetAllocator()) HIntermediateAddress(null_check, c1, 0);
+ HInstruction* array_get4 = new (GetAllocator()) HArrayGet(inter_addr,
+ c1,
+ DataType::Type::kInt32,
+ 0);
+ entry->AddInstruction(array);
+ entry->AddInstruction(array_get1);
+ entry->AddInstruction(bound_type);
+ entry->AddInstruction(array_get2);
+ entry->AddInstruction(null_check);
+ entry->AddInstruction(array_get3);
+ entry->AddInstruction(inter_addr);
+ entry->AddInstruction(array_get4);
+
+ HeapLocationCollector heap_location_collector(graph_);
+ heap_location_collector.VisitBasicBlock(entry);
+
+ // Test that the HeapLocationCollector should be able to tell
+ // that there is only ONE array location, no matter how many
+ // times the original reference has been transformed by BoundType,
+ // NullCheck, IntermediateAddress, etc.
+ ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 1U);
+ size_t loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, c1);
+ size_t loc2 = heap_location_collector.GetArrayAccessHeapLocation(bound_type, c1);
+ size_t loc3 = heap_location_collector.GetArrayAccessHeapLocation(null_check, c1);
+ size_t loc4 = heap_location_collector.GetArrayAccessHeapLocation(inter_addr, c1);
+ ASSERT_TRUE(loc1 != HeapLocationCollector::kHeapLocationNotFound);
+ ASSERT_EQ(loc1, loc2);
+ ASSERT_EQ(loc1, loc3);
+ ASSERT_EQ(loc1, loc4);
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 88609ea790..29c78a1e34 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1334,6 +1334,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(InstanceFieldSet, Instruction) \
M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
+ M(IntermediateAddress, Instruction) \
M(InvokeUnresolved, Invoke) \
M(InvokeInterface, Invoke) \
M(InvokeStaticOrDirect, Invoke) \
@@ -1418,7 +1419,6 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(BitwiseNegatedRight, Instruction) \
M(DataProcWithShifterOp, Instruction) \
M(MultiplyAccumulate, Instruction) \
- M(IntermediateAddress, Instruction) \
M(IntermediateAddressIndex, Instruction)
#endif
@@ -6966,6 +6966,38 @@ class HParallelMove FINAL : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HParallelMove);
};
+// This instruction computes an intermediate address pointing in the 'middle' of an object. The
+// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
+// never used across anything that can trigger GC.
+// The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`.
+// So we represent it by the type `DataType::Type::kInt`.
+class HIntermediateAddress FINAL : public HExpression<2> {
+ public:
+ HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
+ : HExpression(DataType::Type::kInt32, SideEffects::DependsOnGC(), dex_pc) {
+ DCHECK_EQ(DataType::Size(DataType::Type::kInt32),
+ DataType::Size(DataType::Type::kReference))
+ << "kPrimInt and kPrimNot have different sizes.";
+ SetRawInputAt(0, base_address);
+ SetRawInputAt(1, offset);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+ bool IsActualObject() const OVERRIDE { return false; }
+
+ HInstruction* GetBaseAddress() const { return InputAt(0); }
+ HInstruction* GetOffset() const { return InputAt(1); }
+
+ DECLARE_INSTRUCTION(IntermediateAddress);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HIntermediateAddress);
+};
+
+
} // namespace art
#include "nodes_vector.h"
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 14cbf85c3f..7b4f5f7cbb 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -118,38 +118,6 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HBitwiseNegatedRight);
};
-
-// This instruction computes an intermediate address pointing in the 'middle' of an object. The
-// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
-// never used across anything that can trigger GC.
-// The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`.
-// So we represent it by the type `DataType::Type::kInt`.
-class HIntermediateAddress FINAL : public HExpression<2> {
- public:
- HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
- : HExpression(DataType::Type::kInt32, SideEffects::DependsOnGC(), dex_pc) {
- DCHECK_EQ(DataType::Size(DataType::Type::kInt32),
- DataType::Size(DataType::Type::kReference))
- << "kPrimInt and kPrimNot have different sizes.";
- SetRawInputAt(0, base_address);
- SetRawInputAt(1, offset);
- }
-
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
- return true;
- }
- bool IsActualObject() const OVERRIDE { return false; }
-
- HInstruction* GetBaseAddress() const { return InputAt(0); }
- HInstruction* GetOffset() const { return InputAt(1); }
-
- DECLARE_INSTRUCTION(IntermediateAddress);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HIntermediateAddress);
-};
-
// This instruction computes part of the array access offset (data and index offset).
//
// For array accesses the element address has the following structure: