Recognize getClass() in RTP.
Also always keep around the resolved field
in related HInstructions to avoid resolving it again
and again.
Test: test-art-host, 631-checker-get-class
Change-Id: I3bc6be11f3eb175c635e746006f39865947e0669
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 437d35c..f8d37bd 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -28,7 +28,6 @@
TEST_F(GVNTest, LocalFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -45,53 +44,53 @@
entry->AddSuccessor(block);
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* to_remove = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(43),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* different_offset = block->GetLastInstruction();
// Kill the value.
block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* use_after_kill = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HExit());
@@ -113,7 +112,6 @@
TEST_F(GVNTest, GlobalFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -129,13 +127,13 @@
graph->AddBlock(block);
entry->AddSuccessor(block);
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
@@ -152,33 +150,33 @@
else_->AddSuccessor(join);
then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
then->AddInstruction(new (&allocator) HGoto());
else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
else_->AddInstruction(new (&allocator) HGoto());
join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
join->AddInstruction(new (&allocator) HExit());
@@ -196,7 +194,6 @@
TEST_F(GVNTest, LoopFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -213,13 +210,13 @@
graph->AddBlock(block);
entry->AddSuccessor(block);
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HGoto());
@@ -236,13 +233,13 @@
loop_body->AddSuccessor(loop_header);
loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
@@ -251,35 +248,35 @@
// and the body to be GVN'ed.
loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_set = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HGoto());
exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_get_in_exit = exit->GetLastInstruction();
exit->AddInstruction(new (&allocator) HExit());
@@ -319,7 +316,6 @@
TEST_F(GVNTest, LoopSideEffects) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC();
@@ -376,13 +372,13 @@
// Make one block with a side effect.
entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
SideEffectsAnalysis side_effects(graph);
@@ -401,13 +397,13 @@
outer_loop_body->InsertInstructionBefore(
new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0),
outer_loop_body->GetLastInstruction());
@@ -427,13 +423,13 @@
inner_loop_body->InsertInstructionBefore(
new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0),
inner_loop_body->GetLastInstruction());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d847879..1bf1fd4 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -428,13 +428,13 @@
DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet(
receiver,
+ field,
Primitive::kPrimNot,
field->GetOffset(),
field->IsVolatile(),
field->GetDexFieldIndex(),
field->GetDeclaringClass()->GetDexClassDefIndex(),
*field->GetDexFile(),
- handles_->NewHandle(field->GetDexCache()),
dex_pc);
// The class of a field is effectively final, and does not have any memory dependencies.
result->SetSideEffects(SideEffects::None());
@@ -613,6 +613,9 @@
} else {
one_target_inlined = true;
+ VLOG(compiler) << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method)
+ << " has inlined " << ArtMethod::PrettyMethod(method);
+
// If we have inlined all targets before, and this receiver is the last seen,
// we deoptimize instead of keeping the original invoke instruction.
bool deoptimize = all_targets_inlined &&
@@ -650,6 +653,7 @@
<< " of its targets could be inlined";
return false;
}
+
MaybeRecordStat(kInlinedPolymorphicCall);
// Run type propagation to get the guards typed.
@@ -1150,13 +1154,13 @@
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
obj,
+ resolved_field,
resolved_field->GetTypeAsPrimitiveType(),
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
*dex_cache->GetDexFile(),
- dex_cache,
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
@@ -1179,13 +1183,13 @@
HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
obj,
value,
+ resolved_field,
resolved_field->GetTypeAsPrimitiveType(),
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
*dex_cache->GetDexFile(),
- dex_cache,
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
@@ -1559,6 +1563,13 @@
/* declared_can_be_null */ true,
return_replacement)) {
return true;
+ } else if (return_replacement->IsInstanceFieldGet()) {
+ HInstanceFieldGet* field_get = return_replacement->AsInstanceFieldGet();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (field_get->GetFieldInfo().GetField() ==
+ class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0)) {
+ return true;
+ }
}
} else if (return_replacement->IsInstanceOf()) {
// Inlining InstanceOf into an If may put a tighter bound on reference types.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index af8e2c8..768b1d8 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1235,13 +1235,13 @@
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
field_set = new (arena_) HInstanceFieldSet(object,
value,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_compilation_unit_->GetDexCache(),
dex_pc);
}
AppendInstruction(field_set);
@@ -1256,13 +1256,13 @@
} else {
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
field_get = new (arena_) HInstanceFieldGet(object,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_compilation_unit_->GetDexCache(),
dex_pc);
}
AppendInstruction(field_get);
@@ -1311,9 +1311,9 @@
}
void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
- uint32_t dex_pc,
- bool is_put,
- Primitive::Type field_type) {
+ uint32_t dex_pc,
+ bool is_put,
+ Primitive::Type field_type) {
uint32_t source_or_dest_reg = instruction.VRegA_21c();
uint16_t field_index = instruction.VRegB_21c();
@@ -1400,23 +1400,23 @@
DCHECK_EQ(HPhi::ToPhiType(value->GetType()), HPhi::ToPhiType(field_type));
AppendInstruction(new (arena_) HStaticFieldSet(cls,
value,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_cache_,
dex_pc));
} else {
AppendInstruction(new (arena_) HStaticFieldGet(cls,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_cache_,
dex_pc));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 439e3b6..1d3c5bf 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1118,7 +1118,66 @@
VisitCondition(condition);
}
+// Recognize the following pattern:
+// obj.getClass() ==/!= Foo.class
+// And replace it with a constant value if the type of `obj` is statically known.
+static bool RecognizeAndSimplifyClassCheck(HCondition* condition) {
+ HInstruction* input_one = condition->InputAt(0);
+ HInstruction* input_two = condition->InputAt(1);
+ HLoadClass* load_class = input_one->IsLoadClass()
+ ? input_one->AsLoadClass()
+ : input_two->AsLoadClass();
+ if (load_class == nullptr) {
+ return false;
+ }
+
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ if (!class_rti.IsValid()) {
+ // Unresolved class.
+ return false;
+ }
+
+ HInstanceFieldGet* field_get = (load_class == input_one)
+ ? input_two->AsInstanceFieldGet()
+ : input_one->AsInstanceFieldGet();
+ if (field_get == nullptr) {
+ return false;
+ }
+
+ HInstruction* receiver = field_get->InputAt(0);
+ ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo();
+ if (!receiver_type.IsExact()) {
+ return false;
+ }
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0);
+ DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
+ if (field_get->GetFieldInfo().GetField() != field) {
+ return false;
+ }
+
+ // We can replace the compare.
+ int value = 0;
+ if (receiver_type.IsEqual(class_rti)) {
+ value = condition->IsEqual() ? 1 : 0;
+ } else {
+ value = condition->IsNotEqual() ? 1 : 0;
+ }
+ condition->ReplaceWith(condition->GetBlock()->GetGraph()->GetIntConstant(value));
+ return true;
+ }
+}
+
void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
+ if (condition->IsEqual() || condition->IsNotEqual()) {
+ if (RecognizeAndSimplifyClassCheck(condition)) {
+ return;
+ }
+ }
+
// Reverse condition if left is constant. Our code generators prefer constant
// on the right hand side.
if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) {
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 8c34dc6..5bcfa4c 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -111,20 +111,19 @@
BuildLoop();
// Populate the loop with instructions: set/get field with different types.
- ScopedNullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+ nullptr,
Primitive::kPrimLong,
MemberOffset(10),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph_->GetDexFile(),
- dex_cache,
0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
- parameter_, int_constant_, Primitive::kPrimInt, MemberOffset(20),
- false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0);
+ parameter_, int_constant_, nullptr, Primitive::kPrimInt, MemberOffset(20),
+ false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -140,24 +139,24 @@
// Populate the loop with instructions: set/get field with same types.
ScopedNullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+ nullptr,
Primitive::kPrimLong,
MemberOffset(10),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph_->GetDexFile(),
- dex_cache,
0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_,
get_field,
+ nullptr,
Primitive::kPrimLong,
MemberOffset(10),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph_->GetDexFile(),
- dex_cache,
0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index afa17ce..6c4f3dd 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -171,6 +171,7 @@
friend class HGraph;
friend class HInstruction;
friend class HInstructionIterator;
+ friend class HInstructionIteratorHandleChanges;
friend class HBackwardInstructionIterator;
DISALLOW_COPY_AND_ASSIGN(HInstructionList);
@@ -2312,6 +2313,9 @@
};
std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
+// Iterates over the instructions, while preserving the next instruction
+// in case the current instruction gets removed from the list by the user
+// of this iterator.
class HInstructionIterator : public ValueObject {
public:
explicit HInstructionIterator(const HInstructionList& instructions)
@@ -2333,6 +2337,28 @@
DISALLOW_COPY_AND_ASSIGN(HInstructionIterator);
};
+// Iterates over the instructions without saving the next instruction,
+// therefore handling changes in the graph potentially made by the user
+// of this iterator.
+class HInstructionIteratorHandleChanges : public ValueObject {
+ public:
+ explicit HInstructionIteratorHandleChanges(const HInstructionList& instructions)
+ : instruction_(instructions.first_instruction_) {
+ }
+
+ bool Done() const { return instruction_ == nullptr; }
+ HInstruction* Current() const { return instruction_; }
+ void Advance() {
+ instruction_ = instruction_->GetNext();
+ }
+
+ private:
+ HInstruction* instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInstructionIteratorHandleChanges);
+};
+
+
class HBackwardInstructionIterator : public ValueObject {
public:
explicit HBackwardInstructionIterator(const HInstructionList& instructions)
@@ -5056,60 +5082,62 @@
DISALLOW_COPY_AND_ASSIGN(HNullCheck);
};
+// Embeds an ArtField and all the information required by the compiler. We cache
+// that information to avoid requiring the mutator lock every time we need it.
class FieldInfo : public ValueObject {
public:
- FieldInfo(MemberOffset field_offset,
+ FieldInfo(ArtField* field,
+ MemberOffset field_offset,
Primitive::Type field_type,
bool is_volatile,
uint32_t index,
uint16_t declaring_class_def_index,
- const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
- : field_offset_(field_offset),
+ const DexFile& dex_file)
+ : field_(field),
+ field_offset_(field_offset),
field_type_(field_type),
is_volatile_(is_volatile),
index_(index),
declaring_class_def_index_(declaring_class_def_index),
- dex_file_(dex_file),
- dex_cache_(dex_cache) {}
+ dex_file_(dex_file) {}
+ ArtField* GetField() const { return field_; }
MemberOffset GetFieldOffset() const { return field_offset_; }
Primitive::Type GetFieldType() const { return field_type_; }
uint32_t GetFieldIndex() const { return index_; }
uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;}
const DexFile& GetDexFile() const { return dex_file_; }
bool IsVolatile() const { return is_volatile_; }
- Handle<mirror::DexCache> GetDexCache() const { return dex_cache_; }
private:
+ ArtField* const field_;
const MemberOffset field_offset_;
const Primitive::Type field_type_;
const bool is_volatile_;
const uint32_t index_;
const uint16_t declaring_class_def_index_;
const DexFile& dex_file_;
- const Handle<mirror::DexCache> dex_cache_;
};
class HInstanceFieldGet FINAL : public HExpression<1> {
public:
HInstanceFieldGet(HInstruction* value,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetRawInputAt(0, value);
}
@@ -5145,22 +5173,22 @@
public:
HInstanceFieldSet(HInstruction* object,
HInstruction* value,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetPackedFlag<kFlagValueCanBeNull>(true);
SetRawInputAt(0, object);
SetRawInputAt(1, value);
@@ -5949,22 +5977,22 @@
class HStaticFieldGet FINAL : public HExpression<1> {
public:
HStaticFieldGet(HInstruction* cls,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetRawInputAt(0, cls);
}
@@ -5997,22 +6025,22 @@
public:
HStaticFieldSet(HInstruction* cls,
HInstruction* value,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetPackedFlag<kFlagValueCanBeNull>(true);
SetRawInputAt(0, cls);
SetRawInputAt(1, value);
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 33b3875..f8a4469 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -76,6 +76,7 @@
worklist_(worklist),
is_first_run_(is_first_run) {}
+ void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE;
void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
@@ -151,38 +152,6 @@
instruction->Accept(&visitor);
}
-void ReferenceTypePropagation::Run() {
- worklist_.reserve(kDefaultWorklistSize);
-
- // To properly propagate type info we need to visit in the dominator-based order.
- // Reverse post order guarantees a node's dominators are visited first.
- // We take advantage of this order in `VisitBasicBlock`.
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- VisitBasicBlock(block);
- }
-
- ProcessWorklist();
- ValidateTypes();
-}
-
-void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
- // Handle Phis first as there might be instructions in the same block who depend on them.
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- VisitPhi(it.Current()->AsPhi());
- }
-
- // Handle instructions.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- instr->Accept(&visitor);
- }
-
- // Add extra nodes to bound types.
- BoundTypeForIfNotNull(block);
- BoundTypeForIfInstanceOf(block);
-}
-
// Check if we should create a bound type for the given object at the specified
// position. Because of inlining and the fact we run RTP more than once and we
// might have a HBoundType already. If we do, we should not create a new one.
@@ -225,6 +194,153 @@
return false;
}
+// Helper method to bound the type of `receiver` for all instructions dominated
+// by `start_block`, or `start_instruction` if `start_block` is null. The new
+// bound type will have its upper bound be `class_rti`.
+static void BoundTypeIn(HInstruction* receiver,
+ HBasicBlock* start_block,
+ HInstruction* start_instruction,
+ const ReferenceTypeInfo& class_rti) {
+ // We only need to bound the type if we have uses in the relevant block.
+ // So start with null and create the HBoundType lazily, only if it's needed.
+ HBoundType* bound_type = nullptr;
+ DCHECK(!receiver->IsLoadClass()) << "We should not replace HLoadClass instructions";
+ const HUseList<HInstruction*>& uses = receiver->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
+ ++it;
+ bool dominates = (start_instruction != nullptr)
+ ? start_instruction->StrictlyDominates(user)
+ : start_block->Dominates(user->GetBlock());
+ if (!dominates) {
+ continue;
+ }
+ if (bound_type == nullptr) {
+ ScopedObjectAccess soa(Thread::Current());
+ HInstruction* insert_point = (start_instruction != nullptr)
+ ? start_instruction->GetNext()
+ : start_block->GetFirstInstruction();
+ if (ShouldCreateBoundType(
+ insert_point, receiver, class_rti, start_instruction, start_block)) {
+ bound_type = new (receiver->GetBlock()->GetGraph()->GetArena()) HBoundType(receiver);
+ bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
+ start_block->InsertInstructionBefore(bound_type, insert_point);
+ // To comply with the RTP algorithm, don't type the bound type just yet, it will
+ // be handled in RTPVisitor::VisitBoundType.
+ } else {
+ // We already have a bound type on the position we would need to insert
+ // the new one. The existing bound type should dominate all the users
+ // (dchecked) so there's no need to continue.
+ break;
+ }
+ }
+ user->ReplaceInput(bound_type, index);
+ }
+ // If the receiver is a null check, also bound the type of the actual
+ // receiver.
+ if (receiver->IsNullCheck()) {
+ BoundTypeIn(receiver->InputAt(0), start_block, start_instruction, class_rti);
+ }
+}
+
+// Recognize the patterns:
+// if (obj.shadow$_klass_ == Foo.class) ...
+// deoptimize if (obj.shadow$_klass_ == Foo.class)
+static void BoundTypeForClassCheck(HInstruction* check) {
+ if (!check->IsIf() && !check->IsDeoptimize()) {
+ return;
+ }
+ HInstruction* compare = check->InputAt(0);
+ if (!compare->IsEqual() && !compare->IsNotEqual()) {
+ return;
+ }
+ HInstruction* input_one = compare->InputAt(0);
+ HInstruction* input_two = compare->InputAt(1);
+ HLoadClass* load_class = input_one->IsLoadClass()
+ ? input_one->AsLoadClass()
+ : input_two->AsLoadClass();
+ if (load_class == nullptr) {
+ return;
+ }
+
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ if (!class_rti.IsValid()) {
+ // We have loaded an unresolved class. Don't bother bounding the type.
+ return;
+ }
+
+ HInstanceFieldGet* field_get = (load_class == input_one)
+ ? input_two->AsInstanceFieldGet()
+ : input_one->AsInstanceFieldGet();
+ if (field_get == nullptr) {
+ return;
+ }
+ HInstruction* receiver = field_get->InputAt(0);
+ ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo();
+ if (receiver_type.IsExact()) {
+ // If we already know the receiver type, don't bother updating its users.
+ return;
+ }
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0);
+ DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
+ if (field_get->GetFieldInfo().GetField() != field) {
+ return;
+ }
+ }
+
+ if (check->IsIf()) {
+ HBasicBlock* trueBlock = check->IsEqual()
+ ? check->AsIf()->IfTrueSuccessor()
+ : check->AsIf()->IfFalseSuccessor();
+ BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
+ } else {
+ DCHECK(check->IsDeoptimize());
+ if (check->IsEqual()) {
+ BoundTypeIn(receiver, check->GetBlock(), check, class_rti);
+ }
+ }
+}
+
+void ReferenceTypePropagation::Run() {
+ worklist_.reserve(kDefaultWorklistSize);
+
+ // To properly propagate type info we need to visit in the dominator-based order.
+ // Reverse post order guarantees a node's dominators are visited first.
+ // We take advantage of this order in `VisitBasicBlock`.
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ VisitBasicBlock(block);
+ }
+
+ ProcessWorklist();
+ ValidateTypes();
+}
+
+void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
+ RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
+ // Handle Phis first as there might be instructions in the same block who depend on them.
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ VisitPhi(it.Current()->AsPhi());
+ }
+
+ // Handle instructions. Since RTP may add HBoundType instructions just after the
+ // last visited instruction, use `HInstructionIteratorHandleChanges` iterator.
+ for (HInstructionIteratorHandleChanges it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ instr->Accept(&visitor);
+ }
+
+ // Add extra nodes to bound types.
+ BoundTypeForIfNotNull(block);
+ BoundTypeForIfInstanceOf(block);
+ BoundTypeForClassCheck(block->GetLastInstruction());
+}
+
void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
HIf* ifInstruction = block->GetLastInstruction()->AsIf();
if (ifInstruction == nullptr) {
@@ -254,40 +370,14 @@
// We only need to bound the type if we have uses in the relevant block.
// So start with null and create the HBoundType lazily, only if it's needed.
- HBoundType* bound_type = nullptr;
HBasicBlock* notNullBlock = ifInput->IsNotEqual()
? ifInstruction->IfTrueSuccessor()
: ifInstruction->IfFalseSuccessor();
- const HUseList<HInstruction*>& uses = obj->GetUses();
- for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
- HInstruction* user = it->GetUser();
- size_t index = it->GetIndex();
- // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
- ++it;
- if (notNullBlock->Dominates(user->GetBlock())) {
- if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- HInstruction* insert_point = notNullBlock->GetFirstInstruction();
- ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
- handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
- if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) {
- bound_type = new (graph_->GetArena()) HBoundType(obj);
- bound_type->SetUpperBound(object_rti, /* bound_can_be_null */ false);
- if (obj->GetReferenceTypeInfo().IsValid()) {
- bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo());
- }
- notNullBlock->InsertInstructionBefore(bound_type, insert_point);
- } else {
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
- }
- }
- user->ReplaceInput(bound_type, index);
- }
- }
+ ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
+ handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
+
+ BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
}
// Returns true if one of the patterns below has been recognized. If so, the
@@ -378,15 +468,10 @@
HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- {
- if (!class_rti.IsValid()) {
- // He have loaded an unresolved class. Don't bother bounding the type.
- return;
- }
+ if (!class_rti.IsValid()) {
+ // He have loaded an unresolved class. Don't bother bounding the type.
+ return;
}
- // We only need to bound the type if we have uses in the relevant block.
- // So start with null and create the HBoundType lazily, only if it's needed.
- HBoundType* bound_type = nullptr;
HInstruction* obj = instanceOf->InputAt(0);
if (obj->GetReferenceTypeInfo().IsExact() && !obj->IsPhi()) {
@@ -398,33 +483,14 @@
// input.
return;
}
- DCHECK(!obj->IsLoadClass()) << "We should not replace HLoadClass instructions";
- const HUseList<HInstruction*>& uses = obj->GetUses();
- for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
- HInstruction* user = it->GetUser();
- size_t index = it->GetIndex();
- // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
- ++it;
- if (instanceOfTrueBlock->Dominates(user->GetBlock())) {
- if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
- if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
- bound_type = new (graph_->GetArena()) HBoundType(obj);
- bool is_exact = class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes();
- bound_type->SetUpperBound(ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), is_exact),
- /* InstanceOf fails for null. */ false);
- instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point);
- } else {
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
- }
- }
- user->ReplaceInput(bound_type, index);
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
+ class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false);
}
}
+ BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti);
}
void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
@@ -464,6 +530,10 @@
}
}
+void ReferenceTypePropagation::RTPVisitor::VisitDeoptimize(HDeoptimize* instr) {
+ BoundTypeForClassCheck(instr);
+}
+
void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr,
dex::TypeIndex type_idx,
const DexFile& dex_file,
@@ -515,16 +585,9 @@
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> klass;
- // The field index is unknown only during tests.
- if (info.GetFieldIndex() != kUnknownFieldIndex) {
- ClassLinker* cl = Runtime::Current()->GetClassLinker();
- ArtField* field = cl->GetResolvedField(info.GetFieldIndex(),
- MakeObjPtr(info.GetDexCache().Get()));
- // TODO: There are certain cases where we can't resolve the field.
- // b/21914925 is open to keep track of a repro case for this issue.
- if (field != nullptr) {
- klass = field->GetType<false>();
- }
+ // The field is unknown only during tests.
+ if (info.GetField() != nullptr) {
+ klass = info.GetField()->GetType<false>();
}
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 559f409..2227872 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -492,7 +492,6 @@
HInstruction** input2) {
HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
- ScopedNullHandle<mirror::DexCache> dex_cache;
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (allocator) HParameterValue(
@@ -504,13 +503,13 @@
entry->AddSuccessor(block);
HInstruction* test = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(22),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
block->AddInstruction(test);
block->AddInstruction(new (allocator) HIf(test));
@@ -531,22 +530,22 @@
*phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt);
join->AddPhi(*phi);
*input1 = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
*input2 = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
@@ -654,7 +653,6 @@
HInstruction** field,
HInstruction** ret) {
HGraph* graph = CreateGraph(allocator);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -667,13 +665,13 @@
entry->AddSuccessor(block);
*field = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
block->AddInstruction(*field);
*ret = new (allocator) HReturn(*field);