summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/mir_optimization.cc45
-rw-r--r--compiler/dex/quick/quick_compiler.cc12
-rw-r--r--compiler/dex/quick/quick_compiler.h2
-rw-r--r--compiler/dex/verified_method.cc10
-rw-r--r--compiler/dex/verified_method.h12
-rw-r--r--compiler/optimizing/constant_folding.cc124
-rw-r--r--compiler/optimizing/constant_folding.h9
-rw-r--r--compiler/optimizing/graph_checker.cc5
-rw-r--r--compiler/optimizing/inliner.cc220
-rw-r--r--compiler/optimizing/inliner.h59
-rw-r--r--compiler/optimizing/instruction_simplifier.h7
-rw-r--r--compiler/optimizing/intrinsics.cc1
-rw-r--r--compiler/optimizing/intrinsics.h4
-rw-r--r--compiler/optimizing/licm.cc1
-rw-r--r--compiler/optimizing/licm.h5
-rw-r--r--compiler/optimizing/licm_test.cc2
-rw-r--r--compiler/optimizing/nodes.cc65
-rw-r--r--compiler/optimizing/nodes.h21
-rw-r--r--compiler/optimizing/optimizing_compiler.cc6
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h10
-rw-r--r--compiler/optimizing/select_generator.cc2
-rw-r--r--compiler/optimizing/select_generator.h4
-rw-r--r--compiler/optimizing/ssa_builder.cc30
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/base/mutex.cc4
-rw-r--r--runtime/base/mutex.h1
-rw-r--r--runtime/interpreter/interpreter_common.cc48
-rw-r--r--runtime/runtime.h8
-rw-r--r--runtime/verifier/method_verifier.cc20
-rw-r--r--runtime/verifier/method_verifier.h12
-rw-r--r--runtime/verifier/register_line.cc13
-rw-r--r--runtime/verifier/register_line.h5
-rw-r--r--test/575-checker-string-init-alias/expected.txt1
-rw-r--r--test/575-checker-string-init-alias/info.txt2
-rw-r--r--test/575-checker-string-init-alias/smali/TestCase.smali72
-rw-r--r--test/575-checker-string-init-alias/src/Main.java68
-rw-r--r--test/576-polymorphic-inlining/expected.txt0
-rw-r--r--test/576-polymorphic-inlining/info.txt1
-rw-r--r--test/576-polymorphic-inlining/src/Main.java103
39 files changed, 763 insertions, 252 deletions
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index eb4915b821..6f9dd6d268 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1679,9 +1679,7 @@ void MIRGraph::StringChange() {
if (opcode == Instruction::NEW_INSTANCE) {
uint32_t type_idx = mir->dalvikInsn.vB;
if (cu_->compiler_driver->IsStringTypeIndex(type_idx, cu_->dex_file)) {
- // Change NEW_INSTANCE into CONST_4 of 0
- mir->dalvikInsn.opcode = Instruction::CONST_4;
- mir->dalvikInsn.vB = 0;
+ LOG(FATAL) << "Quick cannot compile String allocations";
}
} else if ((opcode == Instruction::INVOKE_DIRECT) ||
(opcode == Instruction::INVOKE_DIRECT_RANGE)) {
@@ -1689,52 +1687,13 @@ void MIRGraph::StringChange() {
DexFileMethodInliner* inliner =
cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
if (inliner->IsStringInitMethodIndex(method_idx)) {
- bool is_range = (opcode == Instruction::INVOKE_DIRECT_RANGE);
- uint32_t orig_this_reg = is_range ? mir->dalvikInsn.vC : mir->dalvikInsn.arg[0];
- // Remove this pointer from string init and change to static call.
- mir->dalvikInsn.vA--;
- if (!is_range) {
- mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC;
- for (uint32_t i = 0; i < mir->dalvikInsn.vA; i++) {
- mir->dalvikInsn.arg[i] = mir->dalvikInsn.arg[i + 1];
- }
- } else {
- mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC_RANGE;
- mir->dalvikInsn.vC++;
- }
- // Insert a move-result instruction to the original this pointer reg.
- MIR* move_result_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
- move_result_mir->dalvikInsn.opcode = Instruction::MOVE_RESULT_OBJECT;
- move_result_mir->dalvikInsn.vA = orig_this_reg;
- move_result_mir->offset = mir->offset;
- move_result_mir->m_unit_index = mir->m_unit_index;
- bb->InsertMIRAfter(mir, move_result_mir);
- // Add additional moves if this pointer was copied to other registers.
- const VerifiedMethod* verified_method =
- cu_->compiler_driver->GetVerifiedMethod(cu_->dex_file, cu_->method_idx);
- DCHECK(verified_method != nullptr);
- const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
- verified_method->GetStringInitPcRegMap();
- auto map_it = string_init_map.find(mir->offset);
- if (map_it != string_init_map.end()) {
- const std::set<uint32_t>& reg_set = map_it->second;
- for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
- MIR* move_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
- move_mir->dalvikInsn.opcode = Instruction::MOVE_OBJECT;
- move_mir->dalvikInsn.vA = *set_it;
- move_mir->dalvikInsn.vB = orig_this_reg;
- move_mir->offset = mir->offset;
- move_mir->m_unit_index = mir->m_unit_index;
- bb->InsertMIRAfter(move_result_mir, move_mir);
- }
- }
+ LOG(FATAL) << "Quick cannot compile String allocations";
}
}
}
}
}
-
bool MIRGraph::EliminateSuspendChecksGate() {
if (kLeafOptimization || // Incompatible (could create loops without suspend checks).
(cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled.
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 027290f9b1..49768ded46 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -509,7 +509,8 @@ static bool CanCompileShorty(const char* shorty, InstructionSet instruction_set)
}
bool QuickCompiler::CanCompileInstruction(const MIR* mir,
- const DexFile& dex_file) const {
+ const DexFile& dex_file,
+ CompilationUnit* cu) const {
switch (mir->dalvikInsn.opcode) {
// Quick compiler won't support new instruction semantics to invoke-super into an interface
// method
@@ -522,6 +523,13 @@ bool QuickCompiler::CanCompileInstruction(const MIR* mir,
// False if we are an interface i.e. !(java_access_flags & kAccInterface)
return class_def != nullptr && ((class_def->GetJavaAccessFlags() & kAccInterface) == 0);
}
+ case Instruction::NEW_INSTANCE: {
+ uint32_t type_idx = mir->dalvikInsn.vB;
+ if (cu->compiler_driver->IsStringTypeIndex(type_idx, cu->dex_file)) {
+ return false;
+ }
+ return true;
+ }
default:
return true;
}
@@ -567,7 +575,7 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx,
<< MIRGraph::extended_mir_op_names_[opcode - kMirOpFirst];
}
return false;
- } else if (!CanCompileInstruction(mir, dex_file)) {
+ } else if (!CanCompileInstruction(mir, dex_file, cu)) {
VLOG(compiler) << "Cannot compile dalvik opcode : " << mir->dalvikInsn.opcode;
return false;
}
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
index 55f45f1ab0..f32cf866ca 100644
--- a/compiler/dex/quick/quick_compiler.h
+++ b/compiler/dex/quick/quick_compiler.h
@@ -75,7 +75,7 @@ class QuickCompiler : public Compiler {
explicit QuickCompiler(CompilerDriver* driver);
private:
- bool CanCompileInstruction(const MIR* mir, const DexFile& dex_file) const;
+ bool CanCompileInstruction(const MIR* mir, const DexFile& dex_file, CompilationUnit* cu) const;
std::unique_ptr<PassManager> pre_opt_pass_manager_;
std::unique_ptr<PassManager> post_opt_pass_manager_;
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 0355f116f1..9ae21648bf 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -37,20 +37,16 @@
namespace art {
-VerifiedMethod::VerifiedMethod(uint32_t encountered_error_types,
- bool has_runtime_throw,
- const SafeMap<uint32_t, std::set<uint32_t>>& string_init_pc_reg_map)
+VerifiedMethod::VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw)
: encountered_error_types_(encountered_error_types),
- has_runtime_throw_(has_runtime_throw),
- string_init_pc_reg_map_(string_init_pc_reg_map) {
+ has_runtime_throw_(has_runtime_throw) {
}
const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier,
bool compile) {
std::unique_ptr<VerifiedMethod> verified_method(
new VerifiedMethod(method_verifier->GetEncounteredFailureTypes(),
- method_verifier->HasInstructionThatWillThrow(),
- method_verifier->GetStringInitPcRegMap()));
+ method_verifier->HasInstructionThatWillThrow()));
if (compile) {
/* Generate a register map. */
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 74fcb07d27..12d0219058 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -83,14 +83,8 @@ class VerifiedMethod {
return has_runtime_throw_;
}
- const SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() const {
- return string_init_pc_reg_map_;
- }
-
private:
- VerifiedMethod(uint32_t encountered_error_types,
- bool has_runtime_throw,
- const SafeMap<uint32_t, std::set<uint32_t>>& string_init_pc_reg_map);
+ VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw);
/*
* Generate the GC map for a method that has just been verified (i.e. we're doing this as part of
@@ -129,10 +123,6 @@ class VerifiedMethod {
const uint32_t encountered_error_types_;
const bool has_runtime_throw_;
-
- // Copy of mapping generated by verifier of dex PCs of string init invocations
- // to the set of other registers that the receiver has been copied into.
- const SafeMap<uint32_t, std::set<uint32_t>> string_init_pc_reg_map_;
};
} // namespace art
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 014353d615..7ddabdee78 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -18,8 +18,28 @@
namespace art {
-// This visitor tries to simplify operations that yield a constant. For example
-// `input * 0` is replaced by a null constant.
+// This visitor tries to simplify instructions that can be evaluated
+// as constants.
+class HConstantFoldingVisitor : public HGraphDelegateVisitor {
+ public:
+ explicit HConstantFoldingVisitor(HGraph* graph)
+ : HGraphDelegateVisitor(graph) {}
+
+ private:
+ void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+
+ void VisitUnaryOperation(HUnaryOperation* inst) OVERRIDE;
+ void VisitBinaryOperation(HBinaryOperation* inst) OVERRIDE;
+
+ void VisitTypeConversion(HTypeConversion* inst) OVERRIDE;
+ void VisitDivZeroCheck(HDivZeroCheck* inst) OVERRIDE;
+
+ DISALLOW_COPY_AND_ASSIGN(HConstantFoldingVisitor);
+};
+
+// This visitor tries to simplify operations with an absorbing input,
+// yielding a constant. For example `input * 0` is replaced by a
+// null constant.
class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor {
public:
explicit InstructionWithAbsorbingInputSimplifier(HGraph* graph) : HGraphVisitor(graph) {}
@@ -44,59 +64,69 @@ class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor {
void VisitXor(HXor* instruction) OVERRIDE;
};
+
void HConstantFolding::Run() {
- InstructionWithAbsorbingInputSimplifier simplifier(graph_);
+ HConstantFoldingVisitor visitor(graph_);
// Process basic blocks in reverse post-order in the dominator tree,
// so that an instruction turned into a constant, used as input of
// another instruction, may possibly be used to turn that second
// instruction into a constant as well.
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
- // Traverse this block's instructions in (forward) order and
- // replace the ones that can be statically evaluated by a
- // compile-time counterpart.
- for (HInstructionIterator inst_it(block->GetInstructions());
- !inst_it.Done(); inst_it.Advance()) {
- HInstruction* inst = inst_it.Current();
- if (inst->IsBinaryOperation()) {
- // Constant folding: replace `op(a, b)' with a constant at
- // compile time if `a' and `b' are both constants.
- HConstant* constant = inst->AsBinaryOperation()->TryStaticEvaluation();
- if (constant != nullptr) {
- inst->ReplaceWith(constant);
- inst->GetBlock()->RemoveInstruction(inst);
- } else {
- inst->Accept(&simplifier);
- }
- } else if (inst->IsUnaryOperation()) {
- // Constant folding: replace `op(a)' with a constant at compile
- // time if `a' is a constant.
- HConstant* constant = inst->AsUnaryOperation()->TryStaticEvaluation();
- if (constant != nullptr) {
- inst->ReplaceWith(constant);
- inst->GetBlock()->RemoveInstruction(inst);
- }
- } else if (inst->IsTypeConversion()) {
- // Constant folding: replace `TypeConversion(a)' with a constant at
- // compile time if `a' is a constant.
- HConstant* constant = inst->AsTypeConversion()->TryStaticEvaluation();
- if (constant != nullptr) {
- inst->ReplaceWith(constant);
- inst->GetBlock()->RemoveInstruction(inst);
- }
- } else if (inst->IsDivZeroCheck()) {
- // We can safely remove the check if the input is a non-null constant.
- HDivZeroCheck* check = inst->AsDivZeroCheck();
- HInstruction* check_input = check->InputAt(0);
- if (check_input->IsConstant() && !check_input->AsConstant()->IsZero()) {
- check->ReplaceWith(check_input);
- check->GetBlock()->RemoveInstruction(check);
- }
- }
- }
+ visitor.VisitReversePostOrder();
+}
+
+
+void HConstantFoldingVisitor::VisitBasicBlock(HBasicBlock* block) {
+ // Traverse this block's instructions (phis don't need to be
+ // processed) in (forward) order and replace the ones that can be
+ // statically evaluated by a compile-time counterpart.
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ it.Current()->Accept(this);
}
}
+void HConstantFoldingVisitor::VisitUnaryOperation(HUnaryOperation* inst) {
+ // Constant folding: replace `op(a)' with a constant at compile
+ // time if `a' is a constant.
+ HConstant* constant = inst->TryStaticEvaluation();
+ if (constant != nullptr) {
+ inst->ReplaceWith(constant);
+ inst->GetBlock()->RemoveInstruction(inst);
+ }
+}
+
+void HConstantFoldingVisitor::VisitBinaryOperation(HBinaryOperation* inst) {
+ // Constant folding: replace `op(a, b)' with a constant at
+ // compile time if `a' and `b' are both constants.
+ HConstant* constant = inst->TryStaticEvaluation();
+ if (constant != nullptr) {
+ inst->ReplaceWith(constant);
+ inst->GetBlock()->RemoveInstruction(inst);
+ } else {
+ InstructionWithAbsorbingInputSimplifier simplifier(GetGraph());
+ inst->Accept(&simplifier);
+ }
+}
+
+void HConstantFoldingVisitor::VisitTypeConversion(HTypeConversion* inst) {
+ // Constant folding: replace `TypeConversion(a)' with a constant at
+ // compile time if `a' is a constant.
+ HConstant* constant = inst->AsTypeConversion()->TryStaticEvaluation();
+ if (constant != nullptr) {
+ inst->ReplaceWith(constant);
+ inst->GetBlock()->RemoveInstruction(inst);
+ }
+}
+
+void HConstantFoldingVisitor::VisitDivZeroCheck(HDivZeroCheck* inst) {
+ // We can safely remove the check if the input is a non-null constant.
+ HInstruction* check_input = inst->InputAt(0);
+ if (check_input->IsConstant() && !check_input->AsConstant()->IsZero()) {
+ inst->ReplaceWith(check_input);
+ inst->GetBlock()->RemoveInstruction(inst);
+ }
+}
+
+
void InstructionWithAbsorbingInputSimplifier::VisitShift(HBinaryOperation* instruction) {
DCHECK(instruction->IsShl() || instruction->IsShr() || instruction->IsUShr());
HInstruction* left = instruction->GetLeft();
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index 2698b2d690..e10b1d6b2e 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -26,13 +26,20 @@ namespace art {
* Optimization pass performing a simple constant-expression
* evaluation on the SSA form.
*
+ * Note that graph simplifications producing a constant should be
+ * implemented in art::HConstantFolding, while graph simplifications
+ * not producing constants should be implemented in
+ * art::InstructionSimplifier. (This convention is a choice that was
+ * made during the development of these parts of the compiler and is
+ * not bound by any technical requirement.)
+ *
* This class is named art::HConstantFolding to avoid name
* clashes with the art::ConstantPropagation class defined in
* compiler/dex/post_opt_passes.h.
*/
class HConstantFolding : public HOptimization {
public:
- explicit HConstantFolding(HGraph* graph, const char* name = kConstantFoldingPassName)
+ HConstantFolding(HGraph* graph, const char* name = kConstantFoldingPassName)
: HOptimization(graph, name) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index e6e9177841..4a49c83611 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -593,8 +593,9 @@ void GraphChecker::HandleLoop(HBasicBlock* loop_header) {
HBasicBlock* predecessor = loop_header->GetPredecessors()[i];
if (!loop_information->IsBackEdge(*predecessor)) {
AddError(StringPrintf(
- "Loop header %d has multiple incoming (non back edge) blocks.",
- id));
+ "Loop header %d has multiple incoming (non back edge) blocks: %d.",
+ id,
+ predecessor->GetBlockId()));
}
}
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 74cc24bdaa..e3c2f5b37d 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -360,11 +360,39 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
}
// We successfully inlined, now add a guard.
+ bool is_referrer =
+ (ic.GetMonomorphicType() == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+ AddTypeGuard(receiver,
+ cursor,
+ bb_cursor,
+ class_index,
+ is_referrer,
+ invoke_instruction,
+ /* with_deoptimization */ true);
+
+ // Run type propagation to get the guard typed, and eventually propagate the
+ // type of the receiver.
+ ReferenceTypePropagation rtp_fixup(graph_, handles_, /* is_first_run */ false);
+ rtp_fixup.Run();
+
+ MaybeRecordStat(kInlinedMonomorphicCall);
+ return true;
+}
+
+HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
+ HInstruction* cursor,
+ HBasicBlock* bb_cursor,
+ uint32_t class_index,
+ bool is_referrer,
+ HInstruction* invoke_instruction,
+ bool with_deoptimization) {
+ ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
HInstanceFieldGet* receiver_class = BuildGetReceiverClass(
class_linker, receiver, invoke_instruction->GetDexPc());
- bool is_referrer =
- (ic.GetMonomorphicType() == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+ const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
+ // Note that we will just compare the classes, so we don't need Java semantics access checks.
+ // Also, the caller of `AddTypeGuard` must have guaranteed that the class is in the dex cache.
HLoadClass* load_class = new (graph_->GetArena()) HLoadClass(graph_->GetCurrentMethod(),
class_index,
caller_dex_file,
@@ -374,8 +402,6 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
/* is_in_dex_cache */ true);
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
- HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- compare, invoke_instruction->GetDexPc());
// TODO: Extend reference type propagation to understand the guard.
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(receiver_class, cursor);
@@ -384,16 +410,13 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
}
bb_cursor->InsertInstructionAfter(load_class, receiver_class);
bb_cursor->InsertInstructionAfter(compare, load_class);
- bb_cursor->InsertInstructionAfter(deoptimize, compare);
- deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
-
- // Run type propagation to get the guard typed, and eventually propagate the
- // type of the receiver.
- ReferenceTypePropagation rtp_fixup(graph_, handles_, /* is_first_run */ false);
- rtp_fixup.Run();
-
- MaybeRecordStat(kInlinedMonomorphicCall);
- return true;
+ if (with_deoptimization) {
+ HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
+ compare, invoke_instruction->GetDexPc());
+ bb_cursor->InsertInstructionAfter(deoptimize, compare);
+ deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+ }
+ return compare;
}
bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
@@ -401,6 +424,173 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
const InlineCache& ic) {
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
+
+ if (TryInlinePolymorphicCallToSameTarget(invoke_instruction, resolved_method, ic)) {
+ return true;
+ }
+
+ ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
+ size_t pointer_size = class_linker->GetImagePointerSize();
+ const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
+
+ bool all_targets_inlined = true;
+ bool one_target_inlined = false;
+ for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
+ if (ic.GetTypeAt(i) == nullptr) {
+ break;
+ }
+ ArtMethod* method = nullptr;
+ if (invoke_instruction->IsInvokeInterface()) {
+ method = ic.GetTypeAt(i)->FindVirtualMethodForInterface(
+ resolved_method, pointer_size);
+ } else {
+ DCHECK(invoke_instruction->IsInvokeVirtual());
+ method = ic.GetTypeAt(i)->FindVirtualMethodForVirtual(
+ resolved_method, pointer_size);
+ }
+
+ HInstruction* receiver = invoke_instruction->InputAt(0);
+ HInstruction* cursor = invoke_instruction->GetPrevious();
+ HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
+
+ uint32_t class_index = FindClassIndexIn(ic.GetTypeAt(i), caller_dex_file);
+ HInstruction* return_replacement = nullptr;
+ if (class_index == DexFile::kDexNoIndex ||
+ !TryBuildAndInline(invoke_instruction, method, &return_replacement)) {
+ all_targets_inlined = false;
+ } else {
+ one_target_inlined = true;
+ bool is_referrer = (ic.GetTypeAt(i) == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+
+ // If we have inlined all targets before, and this receiver is the last seen,
+ // we deoptimize instead of keeping the original invoke instruction.
+ bool deoptimize = all_targets_inlined &&
+ (i != InlineCache::kIndividualCacheSize - 1) &&
+ (ic.GetTypeAt(i + 1) == nullptr);
+ HInstruction* compare = AddTypeGuard(
+ receiver, cursor, bb_cursor, class_index, is_referrer, invoke_instruction, deoptimize);
+ if (deoptimize) {
+ if (return_replacement != nullptr) {
+ invoke_instruction->ReplaceWith(return_replacement);
+ }
+ invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction);
+ // Because the inline cache data can be populated concurrently, we force the end of the
+ // iteration. Otherhwise, we could see a new receiver type.
+ break;
+ } else {
+ CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction);
+ }
+ }
+ }
+
+ if (!one_target_inlined) {
+ VLOG(compiler) << "Call to " << PrettyMethod(resolved_method)
+ << " from inline cache is not inlined because none"
+ << " of its targets could be inlined";
+ return false;
+ }
+ MaybeRecordStat(kInlinedPolymorphicCall);
+
+ // Run type propagation to get the guards typed.
+ ReferenceTypePropagation rtp_fixup(graph_, handles_, /* is_first_run */ false);
+ rtp_fixup.Run();
+ return true;
+}
+
+void HInliner::CreateDiamondPatternForPolymorphicInline(HInstruction* compare,
+ HInstruction* return_replacement,
+ HInstruction* invoke_instruction) {
+ uint32_t dex_pc = invoke_instruction->GetDexPc();
+ HBasicBlock* cursor_block = compare->GetBlock();
+ HBasicBlock* original_invoke_block = invoke_instruction->GetBlock();
+ ArenaAllocator* allocator = graph_->GetArena();
+
+ // Spit the block after the compare: `cursor_block` will now be the start of the diamond,
+ // and the returned block is the start of the then branch (that could contain multiple blocks).
+ HBasicBlock* then = cursor_block->SplitAfterForInlining(compare);
+
+ // Split the block containing the invoke before and after the invoke. The returned block
+ // of the split before will contain the invoke and will be the otherwise branch of
+ // the diamond. The returned block of the split after will be the merge block
+ // of the diamond.
+ HBasicBlock* end_then = invoke_instruction->GetBlock();
+ HBasicBlock* otherwise = end_then->SplitBeforeForInlining(invoke_instruction);
+ HBasicBlock* merge = otherwise->SplitAfterForInlining(invoke_instruction);
+
+ // If the methods we are inlining return a value, we create a phi in the merge block
+ // that will have the `invoke_instruction and the `return_replacement` as inputs.
+ if (return_replacement != nullptr) {
+ HPhi* phi = new (allocator) HPhi(
+ allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke_instruction->GetType()), dex_pc);
+ merge->AddPhi(phi);
+ invoke_instruction->ReplaceWith(phi);
+ phi->AddInput(return_replacement);
+ phi->AddInput(invoke_instruction);
+ }
+
+ // Add the control flow instructions.
+ otherwise->AddInstruction(new (allocator) HGoto(dex_pc));
+ end_then->AddInstruction(new (allocator) HGoto(dex_pc));
+ cursor_block->AddInstruction(new (allocator) HIf(compare, dex_pc));
+
+ // Add the newly created blocks to the graph.
+ graph_->AddBlock(then);
+ graph_->AddBlock(otherwise);
+ graph_->AddBlock(merge);
+
+ // Set up successor (and implictly predecessor) relations.
+ cursor_block->AddSuccessor(otherwise);
+ cursor_block->AddSuccessor(then);
+ end_then->AddSuccessor(merge);
+ otherwise->AddSuccessor(merge);
+
+ // Set up dominance information.
+ then->SetDominator(cursor_block);
+ cursor_block->AddDominatedBlock(then);
+ otherwise->SetDominator(cursor_block);
+ cursor_block->AddDominatedBlock(otherwise);
+ merge->SetDominator(cursor_block);
+ cursor_block->AddDominatedBlock(merge);
+
+ // Update the revert post order.
+ size_t index = IndexOfElement(graph_->reverse_post_order_, cursor_block);
+ MakeRoomFor(&graph_->reverse_post_order_, 1, index);
+ graph_->reverse_post_order_[++index] = then;
+ index = IndexOfElement(graph_->reverse_post_order_, end_then);
+ MakeRoomFor(&graph_->reverse_post_order_, 2, index);
+ graph_->reverse_post_order_[++index] = otherwise;
+ graph_->reverse_post_order_[++index] = merge;
+
+ // Set the loop information of the newly created blocks.
+ HLoopInformation* loop_info = cursor_block->GetLoopInformation();
+ if (loop_info != nullptr) {
+ then->SetLoopInformation(cursor_block->GetLoopInformation());
+ merge->SetLoopInformation(cursor_block->GetLoopInformation());
+ otherwise->SetLoopInformation(cursor_block->GetLoopInformation());
+ for (HLoopInformationOutwardIterator loop_it(*cursor_block);
+ !loop_it.Done();
+ loop_it.Advance()) {
+ loop_it.Current()->Add(then);
+ loop_it.Current()->Add(merge);
+ loop_it.Current()->Add(otherwise);
+ }
+ // In case the original invoke location was a back edge, we need to update
+ // the loop to now have the merge block as a back edge.
+ if (loop_info->IsBackEdge(*original_invoke_block)) {
+ loop_info->RemoveBackEdge(original_invoke_block);
+ loop_info->AddBackEdge(merge);
+ }
+ }
+
+ // Set the try/catch information of the newly created blocks.
+ then->SetTryCatchInformation(cursor_block->GetTryCatchInformation());
+ merge->SetTryCatchInformation(cursor_block->GetTryCatchInformation());
+ otherwise->SetTryCatchInformation(cursor_block->GetTryCatchInformation());
+}
+
+bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
+ ArtMethod* resolved_method,
+ const InlineCache& ic) {
// This optimization only works under JIT for now.
DCHECK(Runtime::Current()->UseJit());
if (graph_->GetInstructionSet() == kMips64) {
@@ -893,7 +1083,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
HConstantFolding fold(callee_graph);
HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
InstructionSimplifier simplify(callee_graph, stats_);
- IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_);
+ IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_, stats_);
HOptimization* optimizations[] = {
&intrinsics,
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 9dd9bf5ad8..cdb2167082 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -101,12 +101,18 @@ class HInliner : public HOptimization {
const InlineCache& ic)
SHARED_REQUIRES(Locks::mutator_lock_);
- // Try to inline targets of a polymorphic call. Currently unimplemented.
+ // Try to inline targets of a polymorphic call.
bool TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
+ ArtMethod* resolved_method,
+ const InlineCache& ic)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+
HInstanceFieldGet* BuildGetReceiverClass(ClassLinker* class_linker,
HInstruction* receiver,
uint32_t dex_pc) const
@@ -118,6 +124,57 @@ class HInliner : public HOptimization {
bool do_rtp)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Add a type guard on the given `receiver`. This will add to the graph:
+ // i0 = HFieldGet(receiver, klass)
+ // i1 = HLoadClass(class_index, is_referrer)
+ // i2 = HNotEqual(i0, i1)
+ //
+ // And if `with_deoptimization` is true:
+ // HDeoptimize(i2)
+ //
+ // The method returns the `HNotEqual`, that will be used for polymorphic inlining.
+ HInstruction* AddTypeGuard(HInstruction* receiver,
+ HInstruction* cursor,
+ HBasicBlock* bb_cursor,
+ uint32_t class_index,
+ bool is_referrer,
+ HInstruction* invoke_instruction,
+ bool with_deoptimization)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ /*
+ * Ad-hoc implementation for implementing a diamond pattern in the graph for
+ * polymorphic inlining:
+ * 1) `compare` becomes the input of the new `HIf`.
+ * 2) Everything up until `invoke_instruction` is in the then branch (could
+ * contain multiple blocks).
+ * 3) `invoke_instruction` is moved to the otherwise block.
+ * 4) If `return_replacement` is not null, the merge block will have
+ * a phi whose inputs are `return_replacement` and `invoke_instruction`.
+ *
+ * Before:
+ * Block1
+ * compare
+ * ...
+ * invoke_instruction
+ *
+ * After:
+ * Block1
+ * compare
+ * if
+ * / \
+ * / \
+ * Then block Otherwise block
+ * ... invoke_instruction
+ * \ /
+ * \ /
+ * Merge block
+ * phi(return_replacement, invoke_instruction)
+ */
+ void CreateDiamondPatternForPolymorphicInline(HInstruction* compare,
+ HInstruction* return_replacement,
+ HInstruction* invoke_instruction);
+
HGraph* const outermost_graph_;
const DexCompilationUnit& outer_compilation_unit_;
const DexCompilationUnit& caller_compilation_unit_;
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index cc4b6f6adc..7905104ed4 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -25,6 +25,13 @@ namespace art {
/**
* Implements optimizations specific to each instruction.
+ *
+ * Note that graph simplifications producing a constant should be
+ * implemented in art::HConstantFolding, while graph simplifications
+ * not producing constants should be implemented in
+ * art::InstructionSimplifier. (This convention is a choice that was
+ * made during the development of these parts of the compiler and is
+ * not bound by any technical requirement.)
*/
class InstructionSimplifier : public HOptimization {
public:
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index db39bc8eec..316e86b4c9 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -570,6 +570,7 @@ void IntrinsicsRecognizer::Run() {
NeedsEnvironmentOrCache(intrinsic),
GetSideEffects(intrinsic),
GetExceptions(intrinsic));
+ MaybeRecordStat(MethodCompilationStat::kIntrinsicRecognized);
}
}
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 3bf3f7ffae..2ab50bb436 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -33,8 +33,8 @@ static constexpr bool kRoundIsPlusPointFive = false;
// Recognize intrinsics from HInvoke nodes.
class IntrinsicsRecognizer : public HOptimization {
public:
- IntrinsicsRecognizer(HGraph* graph, CompilerDriver* driver)
- : HOptimization(graph, kIntrinsicsRecognizerPassName),
+ IntrinsicsRecognizer(HGraph* graph, CompilerDriver* driver, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kIntrinsicsRecognizerPassName, stats),
driver_(driver) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index a6b4078f46..33bb2e8f30 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -141,6 +141,7 @@ void LICM::Run() {
DCHECK(!instruction->HasEnvironment());
}
instruction->MoveBefore(pre_header->GetLastInstruction());
+ MaybeRecordStat(MethodCompilationStat::kLoopInvariantMoved);
} else if (instruction->CanThrow()) {
// If `instruction` can throw, we cannot move further instructions
// that can throw as well.
diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h
index 0b5a0f103b..bf56f53d46 100644
--- a/compiler/optimizing/licm.h
+++ b/compiler/optimizing/licm.h
@@ -26,8 +26,9 @@ class SideEffectsAnalysis;
class LICM : public HOptimization {
public:
- LICM(HGraph* graph, const SideEffectsAnalysis& side_effects)
- : HOptimization(graph, kLoopInvariantCodeMotionPassName), side_effects_(side_effects) {}
+ LICM(HGraph* graph, const SideEffectsAnalysis& side_effects, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kLoopInvariantCodeMotionPassName, stats),
+ side_effects_(side_effects) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 9fb32f4001..d446539700 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -79,7 +79,7 @@ class LICMTest : public CommonCompilerTest {
graph_->BuildDominatorTree();
SideEffectsAnalysis side_effects(graph_);
side_effects.Run();
- LICM(graph_, side_effects).Run();
+ LICM(graph_, side_effects, nullptr).Run();
}
// General building fields.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index b26ce0aa13..f36dc6e2fd 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1420,7 +1420,38 @@ HBasicBlock* HBasicBlock::SplitCatchBlockAfterMoveException() {
}
}
-HBasicBlock* HBasicBlock::SplitAfter(HInstruction* cursor) {
+HBasicBlock* HBasicBlock::SplitBeforeForInlining(HInstruction* cursor) {
+ DCHECK_EQ(cursor->GetBlock(), this);
+
+ HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(),
+ cursor->GetDexPc());
+ new_block->instructions_.first_instruction_ = cursor;
+ new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
+ instructions_.last_instruction_ = cursor->previous_;
+ if (cursor->previous_ == nullptr) {
+ instructions_.first_instruction_ = nullptr;
+ } else {
+ cursor->previous_->next_ = nullptr;
+ cursor->previous_ = nullptr;
+ }
+
+ new_block->instructions_.SetBlockOfInstructions(new_block);
+
+ for (HBasicBlock* successor : GetSuccessors()) {
+ new_block->successors_.push_back(successor);
+ successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
+ }
+ successors_.clear();
+
+ for (HBasicBlock* dominated : GetDominatedBlocks()) {
+ dominated->dominator_ = new_block;
+ new_block->dominated_blocks_.push_back(dominated);
+ }
+ dominated_blocks_.clear();
+ return new_block;
+}
+
+HBasicBlock* HBasicBlock::SplitAfterForInlining(HInstruction* cursor) {
DCHECK(!cursor->IsControlFlow());
DCHECK_NE(instructions_.last_instruction_, cursor);
DCHECK_EQ(cursor->GetBlock(), this);
@@ -1573,6 +1604,20 @@ void HInstructionList::AddAfter(HInstruction* cursor, const HInstructionList& in
}
}
+void HInstructionList::AddBefore(HInstruction* cursor, const HInstructionList& instruction_list) {
+ DCHECK(Contains(cursor));
+ if (!instruction_list.IsEmpty()) {
+ if (cursor == first_instruction_) {
+ first_instruction_ = instruction_list.first_instruction_;
+ } else {
+ cursor->previous_->next_ = instruction_list.first_instruction_;
+ }
+ instruction_list.last_instruction_->next_ = cursor;
+ instruction_list.first_instruction_->previous_ = cursor->previous_;
+ cursor->previous_ = instruction_list.last_instruction_;
+ }
+}
+
void HInstructionList::Add(const HInstructionList& instruction_list) {
if (IsEmpty()) {
first_instruction_ = instruction_list.first_instruction_;
@@ -1815,18 +1860,6 @@ void HBasicBlock::ReplaceWith(HBasicBlock* other) {
graph_ = nullptr;
}
-// Create space in `blocks` for adding `number_of_new_blocks` entries
-// starting at location `at`. Blocks after `at` are moved accordingly.
-static void MakeRoomFor(ArenaVector<HBasicBlock*>* blocks,
- size_t number_of_new_blocks,
- size_t after) {
- DCHECK_LT(after, blocks->size());
- size_t old_size = blocks->size();
- size_t new_size = old_size + number_of_new_blocks;
- blocks->resize(new_size);
- std::copy_backward(blocks->begin() + after + 1u, blocks->begin() + old_size, blocks->end());
-}
-
void HGraph::DeleteDeadEmptyBlock(HBasicBlock* block) {
DCHECK_EQ(block->GetGraph(), this);
DCHECK(block->GetSuccessors().empty());
@@ -1880,7 +1913,8 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
DCHECK(!body->IsInLoop());
HInstruction* last = body->GetLastInstruction();
- invoke->GetBlock()->instructions_.AddAfter(invoke, body->GetInstructions());
+ // Note that we add instructions before the invoke only to simplify polymorphic inlining.
+ invoke->GetBlock()->instructions_.AddBefore(invoke, body->GetInstructions());
body->GetInstructions().SetBlockOfInstructions(invoke->GetBlock());
// Replace the invoke with the return value of the inlined graph.
@@ -1898,7 +1932,8 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// with the second half.
ArenaAllocator* allocator = outer_graph->GetArena();
HBasicBlock* at = invoke->GetBlock();
- HBasicBlock* to = at->SplitAfter(invoke);
+ // Note that we split before the invoke only to simplify polymorphic inlining.
+ HBasicBlock* to = at->SplitBeforeForInlining(invoke);
HBasicBlock* first = entry_block_->GetSuccessors()[0];
DCHECK(!first->IsInLoop());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6d52925976..399afabea6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -131,6 +131,7 @@ class HInstructionList : public ValueObject {
void SetBlockOfInstructions(HBasicBlock* block) const;
void AddAfter(HInstruction* cursor, const HInstructionList& instruction_list);
+ void AddBefore(HInstruction* cursor, const HInstructionList& instruction_list);
void Add(const HInstructionList& instruction_list);
// Return the number of instructions in the list. This is an expensive operation.
@@ -618,6 +619,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
friend class SsaBuilder; // For caching constants.
friend class SsaLivenessAnalysis; // For the linear order.
+ friend class HInliner; // For the reverse post order.
ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -972,12 +974,15 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
// loop and try/catch information.
HBasicBlock* SplitBefore(HInstruction* cursor);
- // Split the block into two blocks just after `cursor`. Returns the newly
+ // Split the block into two blocks just before `cursor`. Returns the newly
// created block. Note that this method just updates raw block information,
// like predecessors, successors, dominators, and instruction list. It does not
// update the graph, reverse post order, loop information, nor make sure the
// blocks are consistent (for example ending with a control flow instruction).
- HBasicBlock* SplitAfter(HInstruction* cursor);
+ HBasicBlock* SplitBeforeForInlining(HInstruction* cursor);
+
+ // Similar to `SplitBeforeForInlining` but does it after `cursor`.
+ HBasicBlock* SplitAfterForInlining(HInstruction* cursor);
// Split catch block into two blocks after the original move-exception bytecode
// instruction, or at the beginning if not present. Returns the newly created,
@@ -6370,6 +6375,18 @@ class SwitchTable : public ValueObject {
DISALLOW_COPY_AND_ASSIGN(SwitchTable);
};
+// Create space in `blocks` for adding `number_of_new_blocks` entries
+// starting at location `at`. Blocks after `at` are moved accordingly.
+inline void MakeRoomFor(ArenaVector<HBasicBlock*>* blocks,
+ size_t number_of_new_blocks,
+ size_t after) {
+ DCHECK_LT(after, blocks->size());
+ size_t old_size = blocks->size();
+ size_t new_size = old_size + number_of_new_blocks;
+ blocks->resize(new_size);
+ std::copy_backward(blocks->begin() + after + 1u, blocks->begin() + old_size, blocks->end());
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 12b748b7b6..ae638dfd0b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -505,12 +505,12 @@ static void RunOptimizations(HGraph* graph,
graph, stats, HDeadCodeElimination::kFinalDeadCodeEliminationPassName);
HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
- HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph);
+ HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats);
HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining");
HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding_after_bce");
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
- LICM* licm = new (arena) LICM(graph, *side_effects);
+ LICM* licm = new (arena) LICM(graph, *side_effects, stats);
LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects);
HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction);
@@ -519,7 +519,7 @@ static void RunOptimizations(HGraph* graph,
graph, stats, "instruction_simplifier_after_bce");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_before_codegen");
- IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver);
+ IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver, stats);
HOptimization* optimizations1[] = {
intrinsics,
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 52a7b10cad..179004bd40 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -56,6 +56,10 @@ enum MethodCompilationStat {
kMonomorphicCall,
kPolymorphicCall,
kMegamorphicCall,
+ kBooleanSimplified,
+ kIntrinsicRecognized,
+ kLoopInvariantMoved,
+ kSelectGenerated,
kLastStat
};
@@ -124,7 +128,11 @@ class OptimizingCompilerStats {
case kInlinedPolymorphicCall: name = "InlinedPolymorphicCall"; break;
case kMonomorphicCall: name = "MonomorphicCall"; break;
case kPolymorphicCall: name = "PolymorphicCall"; break;
- case kMegamorphicCall: name = "kMegamorphicCall"; break;
+ case kMegamorphicCall: name = "MegamorphicCall"; break;
+ case kBooleanSimplified : name = "BooleanSimplified"; break;
+ case kIntrinsicRecognized : name = "IntrinsicRecognized"; break;
+ case kLoopInvariantMoved : name = "LoopInvariantMoved"; break;
+ case kSelectGenerated : name = "SelectGenerated"; break;
case kLastStat:
LOG(FATAL) << "invalid stat "
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 105b30ae5d..e52476ea03 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -141,6 +141,8 @@ void HSelectGenerator::Run() {
block->MergeWith(merge_block);
}
+ MaybeRecordStat(MethodCompilationStat::kSelectGenerated);
+
// No need to update dominance information, as we are simplifying
// a simple diamond shape, where the join block is merged with the
// entry block. Any following blocks would have had the join block
diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h
index f9d6d4d8de..c6dca581cc 100644
--- a/compiler/optimizing/select_generator.h
+++ b/compiler/optimizing/select_generator.h
@@ -47,8 +47,8 @@ namespace art {
class HSelectGenerator : public HOptimization {
public:
- explicit HSelectGenerator(HGraph* graph)
- : HOptimization(graph, kSelectGeneratorPassName) {}
+ HSelectGenerator(HGraph* graph, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kSelectGeneratorPassName, stats) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 43f2499b24..09ca8b7b44 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -422,6 +422,34 @@ bool SsaBuilder::FixAmbiguousArrayOps() {
return true;
}
+static bool HasAliasInEnvironments(HInstruction* instruction) {
+ for (HUseIterator<HEnvironment*> use_it(instruction->GetEnvUses());
+ !use_it.Done();
+ use_it.Advance()) {
+ HEnvironment* use = use_it.Current()->GetUser();
+ HUseListNode<HEnvironment*>* next = use_it.Current()->GetNext();
+ if (next != nullptr && next->GetUser() == use) {
+ return true;
+ }
+ }
+
+ if (kIsDebugBuild) {
+ // Do a quadratic search to ensure same environment uses are next
+ // to each other.
+ for (HUseIterator<HEnvironment*> use_it(instruction->GetEnvUses());
+ !use_it.Done();
+ use_it.Advance()) {
+ HUseListNode<HEnvironment*>* current = use_it.Current();
+ HUseListNode<HEnvironment*>* next = current->GetNext();
+ while (next != nullptr) {
+ DCHECK(next->GetUser() != current->GetUser());
+ next = next->GetNext();
+ }
+ }
+ }
+ return false;
+}
+
void SsaBuilder::RemoveRedundantUninitializedStrings() {
if (GetGraph()->IsDebuggable()) {
// Do not perform the optimization for consistency with the interpreter
@@ -433,7 +461,7 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() {
// Replace NewInstance of String with NullConstant if not used prior to
// calling StringFactory. In case of deoptimization, the interpreter is
// expected to skip null check on the `this` argument of the StringFactory call.
- if (!new_instance->HasNonEnvironmentUses()) {
+ if (!new_instance->HasNonEnvironmentUses() && !HasAliasInEnvironments(new_instance)) {
new_instance->ReplaceWith(GetGraph()->GetNullConstant());
new_instance->GetBlock()->RemoveInstruction(new_instance);
diff --git a/runtime/Android.mk b/runtime/Android.mk
index e9f7add1af..947de8a79e 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -238,6 +238,7 @@ LIBART_HOST_LDFLAGS :=
# (empty) body is called.
JIT_DEBUG_REGISTER_CODE_LDFLAGS := -Wl,--keep-unique,__jit_debug_register_code
LIBART_TARGET_LDFLAGS_arm := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
+LIBART_TARGET_LDFLAGS_arm64 := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
LIBART_TARGET_LDFLAGS_x86 := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
LIBART_TARGET_LDFLAGS_x86_64 := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
JIT_DEBUG_REGISTER_CODE_LDFLAGS :=
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 82a5f9611c..6972b3ef3f 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -1009,10 +1009,6 @@ void Locks::Init() {
DCHECK(alloc_tracker_lock_ == nullptr);
alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kInterpreterStringInitMapLock);
- DCHECK(interpreter_string_init_map_lock_ == nullptr);
- interpreter_string_init_map_lock_ = new Mutex("Interpreter String initializer reference map lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
DCHECK(thread_list_lock_ == nullptr);
thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index f674a6f3c8..e72f2a2e7b 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -102,7 +102,6 @@ enum LockLevel {
kMonitorListLock,
kJniLoadLibraryLock,
kThreadListLock,
- kInterpreterStringInitMapLock,
kAllocTrackerLock,
kDeoptimizationLock,
kProfilerLock,
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index cbaa8173d2..3453abcd64 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -733,39 +733,21 @@ static inline bool DoCallCommon(ArtMethod* called_method,
}
if (string_init && !self->IsExceptionPending()) {
- // Set the new string result of the StringFactory.
- shadow_frame.SetVRegReference(string_init_vreg_this, result->GetL());
- // Overwrite all potential copies of the original result of the new-instance of string with the
- // new result of the StringFactory. Use the verifier to find this set of registers.
- ArtMethod* method = shadow_frame.GetMethod();
- MethodReference method_ref = method->ToMethodReference();
- SafeMap<uint32_t, std::set<uint32_t>>* string_init_map_ptr = nullptr;
- MethodRefToStringInitRegMap& method_to_string_init_map = Runtime::Current()->GetStringInitMap();
- {
- MutexLock mu(self, *Locks::interpreter_string_init_map_lock_);
- auto it = method_to_string_init_map.find(method_ref);
- if (it != method_to_string_init_map.end()) {
- string_init_map_ptr = &it->second;
- }
- }
- if (string_init_map_ptr == nullptr) {
- SafeMap<uint32_t, std::set<uint32_t>> string_init_map =
- verifier::MethodVerifier::FindStringInitMap(method);
- MutexLock mu(self, *Locks::interpreter_string_init_map_lock_);
- auto it = method_to_string_init_map.lower_bound(method_ref);
- if (it == method_to_string_init_map.end() ||
- method_to_string_init_map.key_comp()(method_ref, it->first)) {
- it = method_to_string_init_map.PutBefore(it, method_ref, std::move(string_init_map));
- }
- string_init_map_ptr = &it->second;
- }
- if (string_init_map_ptr->size() != 0) {
- uint32_t dex_pc = shadow_frame.GetDexPC();
- auto map_it = string_init_map_ptr->find(dex_pc);
- if (map_it != string_init_map_ptr->end()) {
- const std::set<uint32_t>& reg_set = map_it->second;
- for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
- shadow_frame.SetVRegReference(*set_it, result->GetL());
+ mirror::Object* existing = shadow_frame.GetVRegReference(string_init_vreg_this);
+ if (existing == nullptr) {
+ // If it's null, we come from compiled code that was deoptimized. Nothing to do,
+ // as the compiler verified there was no alias.
+ // Set the new string result of the StringFactory.
+ shadow_frame.SetVRegReference(string_init_vreg_this, result->GetL());
+ } else {
+ // Replace the fake string that was allocated with the StringFactory result.
+ for (uint32_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
+ if (shadow_frame.GetVRegReference(i) == existing) {
+ DCHECK_EQ(shadow_frame.GetVRegReference(i),
+ reinterpret_cast<mirror::Object*>(shadow_frame.GetVReg(i)));
+ shadow_frame.SetVRegReference(i, result->GetL());
+ DCHECK_EQ(shadow_frame.GetVRegReference(i),
+ reinterpret_cast<mirror::Object*>(shadow_frame.GetVReg(i)));
}
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 1956bae52a..cbb3e89444 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -94,8 +94,6 @@ struct TraceConfig;
class Transaction;
typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
-typedef SafeMap<MethodReference, SafeMap<uint32_t, std::set<uint32_t>>,
- MethodReferenceComparator> MethodRefToStringInitRegMap;
// Not all combinations of flags are valid. You may not visit all roots as well as the new roots
// (no logical reason to do this). You also may not start logging new roots and stop logging new
@@ -574,10 +572,6 @@ class Runtime {
return jit_options_.get();
}
- MethodRefToStringInitRegMap& GetStringInitMap() {
- return method_ref_string_init_reg_map_;
- }
-
bool IsDebuggable() const;
// Returns the build fingerprint, if set. Otherwise an empty string is returned.
@@ -803,8 +797,6 @@ class Runtime {
// Experimental opcodes should not be used by other production code.
ExperimentalFlags experimental_flags_;
- MethodRefToStringInitRegMap method_ref_string_init_reg_map_;
-
// Contains the build fingerprint, if given as a parameter.
std::string fingerprint_;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index a6cf9eaf86..0c6060e4e8 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -617,23 +617,6 @@ ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) {
return GetQuickInvokedMethod(inst, register_line, is_range, false);
}
-SafeMap<uint32_t, std::set<uint32_t>> MethodVerifier::FindStringInitMap(ArtMethod* m) {
- Thread* self = Thread::Current();
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
- MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
- m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(),
- true, true, false, true);
- // Avoid copying: The map is moved out of the verifier before the verifier is destroyed.
- return std::move(verifier.FindStringInitMap());
-}
-
-SafeMap<uint32_t, std::set<uint32_t>>& MethodVerifier::FindStringInitMap() {
- Verify();
- return GetStringInitPcRegMap();
-}
-
bool MethodVerifier::Verify() {
// Some older code doesn't correctly mark constructors as such. Test for this case by looking at
// the name.
@@ -2865,8 +2848,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* Replace the uninitialized reference with an initialized one. We need to do this for all
* registers that have the same object instance in them, not just the "this" register.
*/
- const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- work_line_->MarkRefsAsInitialized(this, this_type, this_reg, work_insn_idx_);
+ work_line_->MarkRefsAsInitialized(this, this_type);
}
if (return_type == nullptr) {
return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor, false);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index b53a45cf41..6d8e1ab6ee 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -213,9 +213,6 @@ class MethodVerifier {
static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc)
SHARED_REQUIRES(Locks::mutator_lock_);
- static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
static void Init() SHARED_REQUIRES(Locks::mutator_lock_);
static void Shutdown();
@@ -294,10 +291,6 @@ class MethodVerifier {
ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
SHARED_REQUIRES(Locks::mutator_lock_);
- SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() {
- return string_init_pc_reg_map_;
- }
-
uint32_t GetEncounteredFailureTypes() {
return encountered_failure_types_;
}
@@ -875,11 +868,6 @@ class MethodVerifier {
friend class art::Thread;
- // Map of dex pcs of invocations of java.lang.String.<init> to the set of other registers that
- // contain the uninitialized this pointer to that invoke. Will contain no entry if there are
- // no other registers.
- SafeMap<uint32_t, std::set<uint32_t>> string_init_pc_reg_map_;
-
DISALLOW_COPY_AND_ASSIGN(MethodVerifier);
};
std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index b7cde995c7..82c371dec5 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -91,25 +91,14 @@ bool RegisterLine::VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsr
return true;
}
-void RegisterLine::MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type,
- uint32_t this_reg, uint32_t dex_pc) {
+void RegisterLine::MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type) {
DCHECK(uninit_type.IsUninitializedTypes());
- bool is_string = !uninit_type.IsUnresolvedTypes() && uninit_type.GetClass()->IsStringClass();
const RegType& init_type = verifier->GetRegTypeCache()->FromUninitialized(uninit_type);
size_t changed = 0;
for (uint32_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(verifier, i).Equals(uninit_type)) {
line_[i] = init_type.GetId();
changed++;
- if (is_string && i != this_reg) {
- auto it = verifier->GetStringInitPcRegMap().find(dex_pc);
- if (it != verifier->GetStringInitPcRegMap().end()) {
- it->second.insert(i);
- } else {
- std::set<uint32_t> reg_set = { i };
- verifier->GetStringInitPcRegMap().Put(dex_pc, reg_set);
- }
- }
}
}
// Is this initializing "this"?
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 9ea9cb763b..15ae202301 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -161,10 +161,7 @@ class RegisterLine {
* reference type. This is called when an appropriate constructor is invoked -- all copies of
* the reference must be marked as initialized.
*/
- void MarkRefsAsInitialized(MethodVerifier* verifier,
- const RegType& uninit_type,
- uint32_t this_reg,
- uint32_t dex_pc)
+ void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type)
SHARED_REQUIRES(Locks::mutator_lock_);
/*
diff --git a/test/575-checker-string-init-alias/expected.txt b/test/575-checker-string-init-alias/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/575-checker-string-init-alias/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/575-checker-string-init-alias/info.txt b/test/575-checker-string-init-alias/info.txt
new file mode 100644
index 0000000000..a91ea645e7
--- /dev/null
+++ b/test/575-checker-string-init-alias/info.txt
@@ -0,0 +1,2 @@
+Test for the String.<init> change and deoptimization: make
+sure the compiler knows how to handle dex aliases.
diff --git a/test/575-checker-string-init-alias/smali/TestCase.smali b/test/575-checker-string-init-alias/smali/TestCase.smali
new file mode 100644
index 0000000000..ff04b278a4
--- /dev/null
+++ b/test/575-checker-string-init-alias/smali/TestCase.smali
@@ -0,0 +1,72 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+
+.super Ljava/lang/Object;
+
+.field public static staticField:Ljava/lang/String;
+
+## CHECK-START: void TestCase.testNoAlias(int[], java.lang.String) register (after)
+## CHECK: <<Null:l\d+>> NullConstant
+## CHECK: Deoptimize env:[[<<Null>>,{{.*]]}}
+## CHECK: InvokeStaticOrDirect method_name:java.lang.String.<init>
+.method public static testNoAlias([ILjava/lang/String;)V
+ .registers 6
+ const v1, 0
+ const v2, 1
+ new-instance v0, Ljava/lang/String;
+
+ # Will deoptimize.
+ aget v3, p0, v1
+
+ # Check that we're being executed by the interpreter.
+ invoke-static {}, LMain;->assertIsInterpreted()V
+
+ invoke-direct {v0, p1}, Ljava/lang/String;-><init>(Ljava/lang/String;)V
+
+ sput-object v0, LTestCase;->staticField:Ljava/lang/String;
+
+ # Will throw AIOOBE.
+ aget v3, p0, v2
+
+ return-void
+.end method
+
+## CHECK-START: void TestCase.testAlias(int[], java.lang.String) register (after)
+## CHECK: <<New:l\d+>> NewInstance
+## CHECK: Deoptimize env:[[<<New>>,<<New>>,{{.*]]}}
+## CHECK: InvokeStaticOrDirect method_name:java.lang.String.<init>
+.method public static testAlias([ILjava/lang/String;)V
+ .registers 7
+ const v2, 0
+ const v3, 1
+ new-instance v0, Ljava/lang/String;
+ move-object v1, v0
+
+ # Will deoptimize.
+ aget v4, p0, v2
+
+ # Check that we're being executed by the interpreter.
+ invoke-static {}, LMain;->assertIsInterpreted()V
+
+ invoke-direct {v1, p1}, Ljava/lang/String;-><init>(Ljava/lang/String;)V
+
+ sput-object v1, LTestCase;->staticField:Ljava/lang/String;
+
+ # Will throw AIOOBE.
+ aget v4, p0, v3
+
+ return-void
+.end method
diff --git a/test/575-checker-string-init-alias/src/Main.java b/test/575-checker-string-init-alias/src/Main.java
new file mode 100644
index 0000000000..1ab320748a
--- /dev/null
+++ b/test/575-checker-string-init-alias/src/Main.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.InvocationTargetException;
+
+public class Main {
+ // Workaround for b/18051191.
+ class Inner {}
+
+ public static native void assertIsInterpreted();
+
+ private static void assertEqual(String expected, String actual) {
+ if (!expected.equals(actual)) {
+ throw new Error("Assertion failed: " + expected + " != " + actual);
+ }
+ }
+
+ public static void main(String[] args) throws Throwable {
+ System.loadLibrary(args[0]);
+ Class<?> c = Class.forName("TestCase");
+ int[] array = new int[1];
+
+ {
+ Method m = c.getMethod("testNoAlias", int[].class, String.class);
+ try {
+ m.invoke(null, new Object[] { array , "foo" });
+ throw new Error("Expected AIOOBE");
+ } catch (InvocationTargetException e) {
+ if (!(e.getCause() instanceof ArrayIndexOutOfBoundsException)) {
+ throw new Error("Expected AIOOBE");
+ }
+ // Ignore
+ }
+ Field field = c.getField("staticField");
+ assertEqual("foo", (String)field.get(null));
+ }
+
+ {
+ Method m = c.getMethod("testAlias", int[].class, String.class);
+ try {
+ m.invoke(null, new Object[] { array, "bar" });
+ throw new Error("Expected AIOOBE");
+ } catch (InvocationTargetException e) {
+ if (!(e.getCause() instanceof ArrayIndexOutOfBoundsException)) {
+ throw new Error("Expected AIOOBE");
+ }
+ // Ignore
+ }
+ Field field = c.getField("staticField");
+ assertEqual("bar", (String)field.get(null));
+ }
+ }
+}
diff --git a/test/576-polymorphic-inlining/expected.txt b/test/576-polymorphic-inlining/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/576-polymorphic-inlining/expected.txt
diff --git a/test/576-polymorphic-inlining/info.txt b/test/576-polymorphic-inlining/info.txt
new file mode 100644
index 0000000000..b3ef0c8fba
--- /dev/null
+++ b/test/576-polymorphic-inlining/info.txt
@@ -0,0 +1 @@
+Test for polymorphic inlining.
diff --git a/test/576-polymorphic-inlining/src/Main.java b/test/576-polymorphic-inlining/src/Main.java
new file mode 100644
index 0000000000..d8d09aff87
--- /dev/null
+++ b/test/576-polymorphic-inlining/src/Main.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ for (int i = 0; i < 20000; ++i) {
+ $noinline$testVoid(new Main());
+ $noinline$testVoid(new SubMain());
+ $noinline$testVoid(new SubSubMain());
+
+ $noinline$testWithReturnValue(new Main());
+ $noinline$testWithReturnValue(new SubMain());
+ $noinline$testWithReturnValue(new SubSubMain());
+
+ $noinline$testWithBackEdge(new Main());
+ $noinline$testWithBackEdge(new SubMain());
+ $noinline$testWithBackEdge(new SubSubMain());
+ }
+ }
+
+ public static void assertIdentical(Object expected, Object actual) {
+ if (expected != actual) {
+ throw new Error("Expected " + expected + ", got " + actual);
+ }
+ }
+
+ public static void $noinline$testVoid(Main m) {
+ if (doThrow) throw new Error("");
+ m.willInlineVoid();
+ m.willOnlyInlineForMainVoid();
+ }
+
+ public static void $noinline$testWithReturnValue(Main m) {
+ if (doThrow) throw new Error("");
+ assertIdentical(m.getClass(), m.willInlineWithReturnValue());
+ assertIdentical(m.getClass(), m.willOnlyInlineForMainWithReturnValue());
+ }
+
+ public static void $noinline$testWithBackEdge(Main m) {
+ if (doThrow) throw new Error("");
+ for (int i = 0; i < 10; ++i) {
+ m.willInlineVoid();
+ }
+ for (int i = 0; i < 10; ++i) {
+ m.willOnlyInlineForMainVoid();
+ }
+ }
+
+ public void willInlineVoid() {
+ }
+
+ public void willOnlyInlineForMainVoid() {
+ }
+
+ public Class willInlineWithReturnValue() {
+ return Main.class;
+ }
+
+ public Class willOnlyInlineForMainWithReturnValue() {
+ return Main.class;
+ }
+ public static boolean doThrow;
+}
+
+class SubMain extends Main {
+ public void willOnlyInlineForMainVoid() {
+ if (doThrow) throw new Error("");
+ }
+
+ public void willInlineVoid() {
+ }
+
+ public Class willInlineWithReturnValue() {
+ return SubMain.class;
+ }
+
+ public Class willOnlyInlineForMainWithReturnValue() {
+ return SubMain.class;
+ }
+}
+
+class SubSubMain extends SubMain {
+ public Class willInlineWithReturnValue() {
+ return SubSubMain.class;
+ }
+
+ public Class willOnlyInlineForMainWithReturnValue() {
+ return SubSubMain.class;
+ }
+}