summaryrefslogtreecommitdiff
path: root/compiler/optimizing/nodes.cc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing/nodes.cc')
-rw-r--r--compiler/optimizing/nodes.cc762
1 files changed, 483 insertions, 279 deletions
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index fc12224783..1a426d5930 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -13,9 +13,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
#include "nodes.h"
+#include <cfloat>
+
#include "code_generator.h"
#include "common_dominator.h"
#include "ssa_builder.h"
@@ -28,6 +29,21 @@
namespace art {
+// Enable floating-point static evaluation during constant folding
+// only if all floating-point operations and constants evaluate in the
+// range and precision of the type used (i.e., 32-bit float, 64-bit
+// double).
+static constexpr bool kEnableFloatingPointStaticEvaluation = (FLT_EVAL_METHOD == 0);
+
+void HGraph::InitializeInexactObjectRTI(StackHandleScopeCollection* handles) {
+ ScopedObjectAccess soa(Thread::Current());
+ // Create the inexact Object reference type and store it in the HGraph.
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ inexact_object_rti_ = ReferenceTypeInfo::Create(
+ handles->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangObject)),
+ /* is_exact */ false);
+}
+
void HGraph::AddBlock(HBasicBlock* block) {
block->SetBlockId(blocks_.size());
blocks_.push_back(block);
@@ -38,7 +54,7 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) {
DCHECK_EQ(visited->GetHighestBitSet(), -1);
// Nodes that we're currently visiting, indexed by block id.
- ArenaBitVector visiting(arena_, blocks_.size(), false);
+ ArenaBitVector visiting(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
// Number of successors visited from a given node, indexed by block id.
ArenaVector<size_t> successors_visited(blocks_.size(), 0u, arena_->Adapter());
// Stack of nodes that we're currently visiting (same as marked in "visiting" above).
@@ -90,6 +106,7 @@ void HGraph::RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visit
for (size_t i = 0; i < blocks_.size(); ++i) {
if (!visited.IsBitSet(i)) {
HBasicBlock* block = blocks_[i];
+ if (block == nullptr) continue;
DCHECK(block->GetPhis().IsEmpty()) << "Phis are not inserted at this stage";
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
RemoveAsUser(it.Current());
@@ -102,6 +119,7 @@ void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) {
for (size_t i = 0; i < blocks_.size(); ++i) {
if (!visited.IsBitSet(i)) {
HBasicBlock* block = blocks_[i];
+ if (block == nullptr) continue;
// We only need to update the successor, which might be live.
for (HBasicBlock* successor : block->GetSuccessors()) {
successor->RemovePredecessor(block);
@@ -109,17 +127,20 @@ void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) {
// Remove the block from the list of blocks, so that further analyses
// never see it.
blocks_[i] = nullptr;
+ if (block->IsExitBlock()) {
+ SetExitBlock(nullptr);
+ }
}
}
}
-void HGraph::BuildDominatorTree() {
+GraphAnalysisResult HGraph::BuildDominatorTree() {
// (1) Simplify the CFG so that catch blocks have only exceptional incoming
// edges. This invariant simplifies building SSA form because Phis cannot
// collect both normal- and exceptional-flow values at the same time.
SimplifyCatchBlocks();
- ArenaBitVector visited(arena_, blocks_.size(), false);
+ ArenaBitVector visited(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
// (2) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
@@ -130,7 +151,7 @@ void HGraph::BuildDominatorTree() {
RemoveInstructionsAsUsersFromDeadBlocks(visited);
// (4) Remove blocks not visited during the initial DFS.
- // Step (4) requires dead blocks to be removed from the
+ // Step (5) requires dead blocks to be removed from the
// predecessors list of live blocks.
RemoveDeadBlocks(visited);
@@ -140,6 +161,20 @@ void HGraph::BuildDominatorTree() {
// (6) Compute the dominance information and the reverse post order.
ComputeDominanceInformation();
+
+ // (7) Analyze loops discovered through back edge analysis, and
+ // set the loop information on each block.
+ GraphAnalysisResult result = AnalyzeLoops();
+ if (result != kAnalysisSuccess) {
+ return result;
+ }
+
+ // (8) Precompute per-block try membership before entering the SSA builder,
+ // which needs the information to build catch block phis from values of
+ // locals at throwing instructions inside try blocks.
+ ComputeTryBlockInformation();
+
+ return kAnalysisSuccess;
}
void HGraph::ClearDominanceInformation() {
@@ -149,11 +184,26 @@ void HGraph::ClearDominanceInformation() {
reverse_post_order_.clear();
}
+void HGraph::ClearLoopInformation() {
+ SetHasIrreducibleLoops(false);
+ for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ it.Current()->SetLoopInformation(nullptr);
+ }
+}
+
void HBasicBlock::ClearDominanceInformation() {
dominated_blocks_.clear();
dominator_ = nullptr;
}
+HInstruction* HBasicBlock::GetFirstInstructionDisregardMoves() const {
+ HInstruction* instruction = GetFirstInstruction();
+ while (instruction->IsParallelMove()) {
+ instruction = instruction->GetNext();
+ }
+ return instruction;
+}
+
void HGraph::ComputeDominanceInformation() {
DCHECK(reverse_post_order_.empty());
reverse_post_order_.reserve(blocks_.size());
@@ -190,46 +240,20 @@ void HGraph::ComputeDominanceInformation() {
// dominator of the block. We can then start visiting its successors.
if (++visits[successor->GetBlockId()] ==
successor->GetPredecessors().size() - successor->NumberOfBackEdges()) {
- successor->GetDominator()->AddDominatedBlock(successor);
reverse_post_order_.push_back(successor);
worklist.push_back(successor);
}
}
}
-}
-
-BuildSsaResult HGraph::TryBuildingSsa(StackHandleScopeCollection* handles) {
- BuildDominatorTree();
- // The SSA builder requires loops to all be natural. Specifically, the dead phi
- // elimination phase checks the consistency of the graph when doing a post-order
- // visit for eliminating dead phis: a dead phi can only have loop header phi
- // users remaining when being visited.
- BuildSsaResult result = AnalyzeNaturalLoops();
- if (result != kBuildSsaSuccess) {
- return result;
- }
-
- // Precompute per-block try membership before entering the SSA builder,
- // which needs the information to build catch block phis from values of
- // locals at throwing instructions inside try blocks.
- ComputeTryBlockInformation();
-
- // Create the inexact Object reference type and store it in the HGraph.
- ScopedObjectAccess soa(Thread::Current());
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- inexact_object_rti_ = ReferenceTypeInfo::Create(
- handles->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangObject)),
- /* is_exact */ false);
-
- // Tranforms graph to SSA form.
- result = SsaBuilder(this, handles).BuildSsa();
- if (result != kBuildSsaSuccess) {
- return result;
+ // Populate `dominated_blocks_` information after computing all dominators.
+ // The potential presence of irreducible loops requires to do it after.
+ for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ if (!block->IsEntryBlock()) {
+ block->GetDominator()->AddDominatedBlock(block);
+ }
}
-
- in_ssa_form_ = true;
- return kBuildSsaSuccess;
}
HBasicBlock* HGraph::SplitEdge(HBasicBlock* block, HBasicBlock* successor) {
@@ -261,9 +285,10 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
// Make sure the loop has only one pre header. This simplifies SSA building by having
// to just look at the pre header to know which locals are initialized at entry of the
- // loop.
+ // loop. Also, don't allow the entry block to be a pre header: this simplifies inlining
+ // this graph.
size_t number_of_incomings = header->GetPredecessors().size() - info->NumberOfBackEdges();
- if (number_of_incomings != 1) {
+ if (number_of_incomings != 1 || (GetEntryBlock()->GetSingleSuccessor() == header)) {
HBasicBlock* pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
AddBlock(pre_header);
pre_header->AddInstruction(new (arena_) HGoto(header->GetDexPc()));
@@ -331,7 +356,7 @@ void HGraph::SimplifyCatchBlocks() {
// can be invalidated. We remember the initial size to avoid iterating over the new blocks.
for (size_t block_id = 0u, end = blocks_.size(); block_id != end; ++block_id) {
HBasicBlock* catch_block = blocks_[block_id];
- if (!catch_block->IsCatchBlock()) {
+ if (catch_block == nullptr || !catch_block->IsCatchBlock()) {
continue;
}
@@ -434,11 +459,15 @@ void HGraph::SimplifyCFG() {
}
if (block->IsLoopHeader()) {
SimplifyLoop(block);
+ } else if (!block->IsEntryBlock() && block->GetFirstInstruction()->IsSuspendCheck()) {
+ // We are being called by the dead code elimination pass, and what used to be
+ // a loop got dismantled. Just remove the suspend check.
+ block->RemoveInstruction(block->GetFirstInstruction());
}
}
}
-BuildSsaResult HGraph::AnalyzeNaturalLoops() const {
+GraphAnalysisResult HGraph::AnalyzeLoops() const {
// Order does not matter.
for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
@@ -446,16 +475,26 @@ BuildSsaResult HGraph::AnalyzeNaturalLoops() const {
if (block->IsCatchBlock()) {
// TODO: Dealing with exceptional back edges could be tricky because
// they only approximate the real control flow. Bail out for now.
- return kBuildSsaFailThrowCatchLoop;
- }
- HLoopInformation* info = block->GetLoopInformation();
- if (!info->Populate()) {
- // Abort if the loop is non natural. We currently bailout in such cases.
- return kBuildSsaFailNonNaturalLoop;
+ return kAnalysisFailThrowCatchLoop;
}
+ block->GetLoopInformation()->Populate();
}
}
- return kBuildSsaSuccess;
+ return kAnalysisSuccess;
+}
+
+void HLoopInformation::Dump(std::ostream& os) {
+ os << "header: " << header_->GetBlockId() << std::endl;
+ os << "pre header: " << GetPreHeader()->GetBlockId() << std::endl;
+ for (HBasicBlock* block : back_edges_) {
+ os << "back edge: " << block->GetBlockId() << std::endl;
+ }
+ for (HBasicBlock* block : header_->GetPredecessors()) {
+ os << "predecessor: " << block->GetBlockId() << std::endl;
+ }
+ for (uint32_t idx : blocks_.Indexes()) {
+ os << " in loop: " << idx << std::endl;
+ }
}
void HGraph::InsertConstant(HConstant* constant) {
@@ -555,61 +594,69 @@ void HLoopInformation::PopulateRecursive(HBasicBlock* block) {
}
}
-bool HLoopInformation::Populate() {
- DCHECK_EQ(blocks_.NumSetBits(), 0u) << "Loop information has already been populated";
- for (HBasicBlock* back_edge : GetBackEdges()) {
- DCHECK(back_edge->GetDominator() != nullptr);
- if (!header_->Dominates(back_edge)) {
- // This loop is not natural. Do not bother going further.
- return false;
- }
-
- // Populate this loop: starting with the back edge, recursively add predecessors
- // that are not already part of that loop. Set the header as part of the loop
- // to end the recursion.
- // This is a recursive implementation of the algorithm described in
- // "Advanced Compiler Design & Implementation" (Muchnick) p192.
- blocks_.SetBit(header_->GetBlockId());
- PopulateRecursive(back_edge);
+void HLoopInformation::PopulateIrreducibleRecursive(HBasicBlock* block) {
+ if (blocks_.IsBitSet(block->GetBlockId())) {
+ return;
}
- return true;
-}
-void HLoopInformation::Update() {
- HGraph* graph = header_->GetGraph();
- for (uint32_t id : blocks_.Indexes()) {
- HBasicBlock* block = graph->GetBlocks()[id];
- // Reset loop information of non-header blocks inside the loop, except
- // members of inner nested loops because those should already have been
- // updated by their own LoopInformation.
- if (block->GetLoopInformation() == this && block != header_) {
- block->SetLoopInformation(nullptr);
+ if (block->IsLoopHeader()) {
+ // If we hit a loop header in an irreducible loop, we first check if the
+ // pre header of that loop belongs to the currently analyzed loop. If it does,
+ // then we visit the back edges.
+ // Note that we cannot use GetPreHeader, as the loop may have not been populated
+ // yet.
+ HBasicBlock* pre_header = block->GetPredecessors()[0];
+ PopulateIrreducibleRecursive(pre_header);
+ if (blocks_.IsBitSet(pre_header->GetBlockId())) {
+ blocks_.SetBit(block->GetBlockId());
+ block->SetInLoop(this);
+ HLoopInformation* info = block->GetLoopInformation();
+ for (HBasicBlock* back_edge : info->GetBackEdges()) {
+ PopulateIrreducibleRecursive(back_edge);
+ }
+ }
+ } else {
+ // Visit all predecessors. If one predecessor is part of the loop, this
+ // block is also part of this loop.
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ PopulateIrreducibleRecursive(predecessor);
+ if (blocks_.IsBitSet(predecessor->GetBlockId())) {
+ blocks_.SetBit(block->GetBlockId());
+ block->SetInLoop(this);
+ }
}
}
- blocks_.ClearAllBits();
+}
- if (back_edges_.empty()) {
- // The loop has been dismantled, delete its suspend check and remove info
- // from the header.
- DCHECK(HasSuspendCheck());
- header_->RemoveInstruction(suspend_check_);
- header_->SetLoopInformation(nullptr);
- header_ = nullptr;
- suspend_check_ = nullptr;
- } else {
- if (kIsDebugBuild) {
- for (HBasicBlock* back_edge : back_edges_) {
- DCHECK(header_->Dominates(back_edge));
+void HLoopInformation::Populate() {
+ DCHECK_EQ(blocks_.NumSetBits(), 0u) << "Loop information has already been populated";
+ // Populate this loop: starting with the back edge, recursively add predecessors
+ // that are not already part of that loop. Set the header as part of the loop
+ // to end the recursion.
+ // This is a recursive implementation of the algorithm described in
+ // "Advanced Compiler Design & Implementation" (Muchnick) p192.
+ blocks_.SetBit(header_->GetBlockId());
+ header_->SetInLoop(this);
+ for (HBasicBlock* back_edge : GetBackEdges()) {
+ DCHECK(back_edge->GetDominator() != nullptr);
+ if (!header_->Dominates(back_edge)) {
+ irreducible_ = true;
+ header_->GetGraph()->SetHasIrreducibleLoops(true);
+ PopulateIrreducibleRecursive(back_edge);
+ } else {
+ if (header_->GetGraph()->IsCompilingOsr()) {
+ irreducible_ = true;
+ header_->GetGraph()->SetHasIrreducibleLoops(true);
}
+ PopulateRecursive(back_edge);
}
- // This loop still has reachable back edges. Repopulate the list of blocks.
- bool populate_successful = Populate();
- DCHECK(populate_successful);
}
}
HBasicBlock* HLoopInformation::GetPreHeader() const {
- return header_->GetDominator();
+ HBasicBlock* block = header_->GetPredecessors()[0];
+ DCHECK(irreducible_ || (block == header_->GetDominator()));
+ return block;
}
bool HLoopInformation::Contains(const HBasicBlock& block) const {
@@ -676,6 +723,22 @@ void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
RemoveInstruction(initial);
}
+void HBasicBlock::MoveInstructionBefore(HInstruction* insn, HInstruction* cursor) {
+ DCHECK(!cursor->IsPhi());
+ DCHECK(!insn->IsPhi());
+ DCHECK(!insn->IsControlFlow());
+ DCHECK(insn->CanBeMoved());
+ DCHECK(!insn->HasSideEffects());
+
+ HBasicBlock* from_block = insn->GetBlock();
+ HBasicBlock* to_block = cursor->GetBlock();
+ DCHECK(from_block != to_block);
+
+ from_block->RemoveInstruction(insn, /* ensure_safety */ false);
+ insn->SetBlock(to_block);
+ to_block->instructions_.InsertInstructionBefore(insn, cursor);
+}
+
static void Add(HInstructionList* instruction_list,
HBasicBlock* block,
HInstruction* instruction) {
@@ -796,7 +859,6 @@ void HEnvironment::CopyFromWithLoopPhiAdjustment(HEnvironment* env,
// At the end of the loop pre-header, the corresponding value for instruction
// is the first input of the phi.
HInstruction* initial = instruction->AsPhi()->InputAt(0);
- DCHECK(initial->GetBlock()->Dominates(loop_header));
SetRawEnvAt(i, initial);
initial->AddEnvUseAt(this, i);
} else {
@@ -1108,25 +1170,37 @@ HConstant* HUnaryOperation::TryStaticEvaluation() const {
return Evaluate(GetInput()->AsIntConstant());
} else if (GetInput()->IsLongConstant()) {
return Evaluate(GetInput()->AsLongConstant());
+ } else if (kEnableFloatingPointStaticEvaluation) {
+ if (GetInput()->IsFloatConstant()) {
+ return Evaluate(GetInput()->AsFloatConstant());
+ } else if (GetInput()->IsDoubleConstant()) {
+ return Evaluate(GetInput()->AsDoubleConstant());
+ }
}
return nullptr;
}
HConstant* HBinaryOperation::TryStaticEvaluation() const {
- if (GetLeft()->IsIntConstant()) {
- if (GetRight()->IsIntConstant()) {
- return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsIntConstant());
- } else if (GetRight()->IsLongConstant()) {
- return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsLongConstant());
- }
+ if (GetLeft()->IsIntConstant() && GetRight()->IsIntConstant()) {
+ return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsIntConstant());
} else if (GetLeft()->IsLongConstant()) {
if (GetRight()->IsIntConstant()) {
+ // The binop(long, int) case is only valid for shifts and rotations.
+ DCHECK(IsShl() || IsShr() || IsUShr() || IsRor()) << DebugName();
return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsIntConstant());
} else if (GetRight()->IsLongConstant()) {
return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsLongConstant());
}
} else if (GetLeft()->IsNullConstant() && GetRight()->IsNullConstant()) {
+ // The binop(null, null) case is only valid for equal and not-equal conditions.
+ DCHECK(IsEqual() || IsNotEqual()) << DebugName();
return Evaluate(GetLeft()->AsNullConstant(), GetRight()->AsNullConstant());
+ } else if (kEnableFloatingPointStaticEvaluation) {
+ if (GetLeft()->IsFloatConstant() && GetRight()->IsFloatConstant()) {
+ return Evaluate(GetLeft()->AsFloatConstant(), GetRight()->AsFloatConstant());
+ } else if (GetLeft()->IsDoubleConstant() && GetRight()->IsDoubleConstant()) {
+ return Evaluate(GetLeft()->AsDoubleConstant(), GetRight()->AsDoubleConstant());
+ }
}
return nullptr;
}
@@ -1154,6 +1228,20 @@ HInstruction* HBinaryOperation::GetLeastConstantLeft() const {
}
}
+std::ostream& operator<<(std::ostream& os, const ComparisonBias& rhs) {
+ switch (rhs) {
+ case ComparisonBias::kNoBias:
+ return os << "no_bias";
+ case ComparisonBias::kGtBias:
+ return os << "gt_bias";
+ case ComparisonBias::kLtBias:
+ return os << "lt_bias";
+ default:
+ LOG(FATAL) << "Unknown ComparisonBias: " << static_cast<int>(rhs);
+ UNREACHABLE();
+ }
+}
+
bool HCondition::IsBeforeWhenDisregardMoves(HInstruction* instruction) const {
return this == instruction->GetPreviousDisregardingMoves();
}
@@ -1335,7 +1423,38 @@ HBasicBlock* HBasicBlock::SplitCatchBlockAfterMoveException() {
}
}
-HBasicBlock* HBasicBlock::SplitAfter(HInstruction* cursor) {
+HBasicBlock* HBasicBlock::SplitBeforeForInlining(HInstruction* cursor) {
+ DCHECK_EQ(cursor->GetBlock(), this);
+
+ HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(),
+ cursor->GetDexPc());
+ new_block->instructions_.first_instruction_ = cursor;
+ new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
+ instructions_.last_instruction_ = cursor->previous_;
+ if (cursor->previous_ == nullptr) {
+ instructions_.first_instruction_ = nullptr;
+ } else {
+ cursor->previous_->next_ = nullptr;
+ cursor->previous_ = nullptr;
+ }
+
+ new_block->instructions_.SetBlockOfInstructions(new_block);
+
+ for (HBasicBlock* successor : GetSuccessors()) {
+ new_block->successors_.push_back(successor);
+ successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
+ }
+ successors_.clear();
+
+ for (HBasicBlock* dominated : GetDominatedBlocks()) {
+ dominated->dominator_ = new_block;
+ new_block->dominated_blocks_.push_back(dominated);
+ }
+ dominated_blocks_.clear();
+ return new_block;
+}
+
+HBasicBlock* HBasicBlock::SplitAfterForInlining(HInstruction* cursor) {
DCHECK(!cursor->IsControlFlow());
DCHECK_NE(instructions_.last_instruction_, cursor);
DCHECK_EQ(cursor->GetBlock(), this);
@@ -1488,6 +1607,20 @@ void HInstructionList::AddAfter(HInstruction* cursor, const HInstructionList& in
}
}
+void HInstructionList::AddBefore(HInstruction* cursor, const HInstructionList& instruction_list) {
+ DCHECK(Contains(cursor));
+ if (!instruction_list.IsEmpty()) {
+ if (cursor == first_instruction_) {
+ first_instruction_ = instruction_list.first_instruction_;
+ } else {
+ cursor->previous_->next_ = instruction_list.first_instruction_;
+ }
+ instruction_list.last_instruction_->next_ = cursor;
+ instruction_list.first_instruction_->previous_ = cursor->previous_;
+ cursor->previous_ = instruction_list.last_instruction_;
+ }
+}
+
void HInstructionList::Add(const HInstructionList& instruction_list) {
if (IsEmpty()) {
first_instruction_ = instruction_list.first_instruction_;
@@ -1521,21 +1654,77 @@ void HBasicBlock::DisconnectAndDelete() {
// iteration.
DCHECK(dominated_blocks_.empty());
- // (1) Remove the block from all loops it is included in.
- for (HLoopInformationOutwardIterator it(*this); !it.Done(); it.Advance()) {
- HLoopInformation* loop_info = it.Current();
- loop_info->Remove(this);
- if (loop_info->IsBackEdge(*this)) {
- // If this was the last back edge of the loop, we deliberately leave the
- // loop in an inconsistent state and will fail SSAChecker unless the
- // entire loop is removed during the pass.
- loop_info->RemoveBackEdge(this);
+ // The following steps gradually remove the block from all its dependants in
+ // post order (b/27683071).
+
+ // (1) Store a basic block that we'll use in step (5) to find loops to be updated.
+ // We need to do this before step (4) which destroys the predecessor list.
+ HBasicBlock* loop_update_start = this;
+ if (IsLoopHeader()) {
+ HLoopInformation* loop_info = GetLoopInformation();
+ // All other blocks in this loop should have been removed because the header
+ // was their dominator.
+ // Note that we do not remove `this` from `loop_info` as it is unreachable.
+ DCHECK(!loop_info->IsIrreducible());
+ DCHECK_EQ(loop_info->GetBlocks().NumSetBits(), 1u);
+ DCHECK_EQ(static_cast<uint32_t>(loop_info->GetBlocks().GetHighestBitSet()), GetBlockId());
+ loop_update_start = loop_info->GetPreHeader();
+ }
+
+ // (2) Disconnect the block from its successors and update their phis.
+ for (HBasicBlock* successor : successors_) {
+ // Delete this block from the list of predecessors.
+ size_t this_index = successor->GetPredecessorIndexOf(this);
+ successor->predecessors_.erase(successor->predecessors_.begin() + this_index);
+
+ // Check that `successor` has other predecessors, otherwise `this` is the
+ // dominator of `successor` which violates the order DCHECKed at the top.
+ DCHECK(!successor->predecessors_.empty());
+
+ // Remove this block's entries in the successor's phis. Skip exceptional
+ // successors because catch phi inputs do not correspond to predecessor
+ // blocks but throwing instructions. The inputs of the catch phis will be
+ // updated in step (3).
+ if (!successor->IsCatchBlock()) {
+ if (successor->predecessors_.size() == 1u) {
+ // The successor has just one predecessor left. Replace phis with the only
+ // remaining input.
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HPhi* phi = phi_it.Current()->AsPhi();
+ phi->ReplaceWith(phi->InputAt(1 - this_index));
+ successor->RemovePhi(phi);
+ }
+ } else {
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ phi_it.Current()->AsPhi()->RemoveInputAt(this_index);
+ }
+ }
}
}
+ successors_.clear();
- // (2) Disconnect the block from its predecessors and update their
+ // (3) Remove instructions and phis. Instructions should have no remaining uses
+ // except in catch phis. If an instruction is used by a catch phi at `index`,
+ // remove `index`-th input of all phis in the catch block since they are
+ // guaranteed dead. Note that we may miss dead inputs this way but the
+ // graph will always remain consistent.
+ for (HBackwardInstructionIterator it(GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* insn = it.Current();
+ RemoveUsesOfDeadInstruction(insn);
+ RemoveInstruction(insn);
+ }
+ for (HInstructionIterator it(GetPhis()); !it.Done(); it.Advance()) {
+ HPhi* insn = it.Current()->AsPhi();
+ RemoveUsesOfDeadInstruction(insn);
+ RemovePhi(insn);
+ }
+
+ // (4) Disconnect the block from its predecessors and update their
// control-flow instructions.
for (HBasicBlock* predecessor : predecessors_) {
+ // We should not see any back edges as they would have been removed by step (3).
+ DCHECK(!IsInLoop() || !GetLoopInformation()->IsBackEdge(*predecessor));
+
HInstruction* last_instruction = predecessor->GetLastInstruction();
if (last_instruction->IsTryBoundary() && !IsCatchBlock()) {
// This block is the only normal-flow successor of the TryBoundary which
@@ -1566,7 +1755,7 @@ void HBasicBlock::DisconnectAndDelete() {
} else if (num_pred_successors == 0u) {
// The predecessor has no remaining successors and therefore must be dead.
// We deliberately leave it without a control-flow instruction so that the
- // SSAChecker fails unless it is not removed during the pass too.
+ // GraphChecker fails unless it is not removed during the pass too.
predecessor->RemoveInstruction(last_instruction);
} else {
// There are multiple successors left. The removed block might be a successor
@@ -1579,58 +1768,25 @@ void HBasicBlock::DisconnectAndDelete() {
}
predecessors_.clear();
- // (3) Disconnect the block from its successors and update their phis.
- for (HBasicBlock* successor : successors_) {
- // Delete this block from the list of predecessors.
- size_t this_index = successor->GetPredecessorIndexOf(this);
- successor->predecessors_.erase(successor->predecessors_.begin() + this_index);
-
- // Check that `successor` has other predecessors, otherwise `this` is the
- // dominator of `successor` which violates the order DCHECKed at the top.
- DCHECK(!successor->predecessors_.empty());
-
- // Remove this block's entries in the successor's phis. Skip exceptional
- // successors because catch phi inputs do not correspond to predecessor
- // blocks but throwing instructions. Their inputs will be updated in step (4).
- if (!successor->IsCatchBlock()) {
- if (successor->predecessors_.size() == 1u) {
- // The successor has just one predecessor left. Replace phis with the only
- // remaining input.
- for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
- HPhi* phi = phi_it.Current()->AsPhi();
- phi->ReplaceWith(phi->InputAt(1 - this_index));
- successor->RemovePhi(phi);
- }
- } else {
- for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
- phi_it.Current()->AsPhi()->RemoveInputAt(this_index);
- }
- }
+ // (5) Remove the block from all loops it is included in. Skip the inner-most
+ // loop if this is the loop header (see definition of `loop_update_start`)
+ // because the loop header's predecessor list has been destroyed in step (4).
+ for (HLoopInformationOutwardIterator it(*loop_update_start); !it.Done(); it.Advance()) {
+ HLoopInformation* loop_info = it.Current();
+ loop_info->Remove(this);
+ if (loop_info->IsBackEdge(*this)) {
+ // If this was the last back edge of the loop, we deliberately leave the
+ // loop in an inconsistent state and will fail GraphChecker unless the
+ // entire loop is removed during the pass.
+ loop_info->RemoveBackEdge(this);
}
}
- successors_.clear();
- // (4) Remove instructions and phis. Instructions should have no remaining uses
- // except in catch phis. If an instruction is used by a catch phi at `index`,
- // remove `index`-th input of all phis in the catch block since they are
- // guaranteed dead. Note that we may miss dead inputs this way but the
- // graph will always remain consistent.
- for (HBackwardInstructionIterator it(GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* insn = it.Current();
- RemoveUsesOfDeadInstruction(insn);
- RemoveInstruction(insn);
- }
- for (HInstructionIterator it(GetPhis()); !it.Done(); it.Advance()) {
- HPhi* insn = it.Current()->AsPhi();
- RemoveUsesOfDeadInstruction(insn);
- RemovePhi(insn);
- }
-
- // Disconnect from the dominator.
+ // (6) Disconnect from the dominator.
dominator_->RemoveDominatedBlock(this);
SetDominator(nullptr);
- // Delete from the graph, update reverse post order.
+ // (7) Delete from the graph, update reverse post order.
graph_->DeleteDeadEmptyBlock(this);
SetGraph(nullptr);
}
@@ -1730,18 +1886,6 @@ void HBasicBlock::ReplaceWith(HBasicBlock* other) {
graph_ = nullptr;
}
-// Create space in `blocks` for adding `number_of_new_blocks` entries
-// starting at location `at`. Blocks after `at` are moved accordingly.
-static void MakeRoomFor(ArenaVector<HBasicBlock*>* blocks,
- size_t number_of_new_blocks,
- size_t after) {
- DCHECK_LT(after, blocks->size());
- size_t old_size = blocks->size();
- size_t new_size = old_size + number_of_new_blocks;
- blocks->resize(new_size);
- std::copy_backward(blocks->begin() + after + 1u, blocks->begin() + old_size, blocks->end());
-}
-
void HGraph::DeleteDeadEmptyBlock(HBasicBlock* block) {
DCHECK_EQ(block->GetGraph(), this);
DCHECK(block->GetSuccessors().empty());
@@ -1752,13 +1896,49 @@ void HGraph::DeleteDeadEmptyBlock(HBasicBlock* block) {
DCHECK(block->GetPhis().IsEmpty());
if (block->IsExitBlock()) {
- exit_block_ = nullptr;
+ SetExitBlock(nullptr);
}
RemoveElement(reverse_post_order_, block);
blocks_[block->GetBlockId()] = nullptr;
}
+void HGraph::UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
+ HBasicBlock* reference,
+ bool replace_if_back_edge) {
+ if (block->IsLoopHeader()) {
+ // Clear the information of which blocks are contained in that loop. Since the
+ // information is stored as a bit vector based on block ids, we have to update
+ // it, as those block ids were specific to the callee graph and we are now adding
+ // these blocks to the caller graph.
+ block->GetLoopInformation()->ClearAllBlocks();
+ }
+
+ // If not already in a loop, update the loop information.
+ if (!block->IsInLoop()) {
+ block->SetLoopInformation(reference->GetLoopInformation());
+ }
+
+ // If the block is in a loop, update all its outward loops.
+ HLoopInformation* loop_info = block->GetLoopInformation();
+ if (loop_info != nullptr) {
+ for (HLoopInformationOutwardIterator loop_it(*block);
+ !loop_it.Done();
+ loop_it.Advance()) {
+ loop_it.Current()->Add(block);
+ }
+ if (replace_if_back_edge && loop_info->IsBackEdge(*reference)) {
+ loop_info->ReplaceBackEdge(reference, block);
+ }
+ }
+
+ // Copy TryCatchInformation if `reference` is a try block, not if it is a catch block.
+ TryCatchInformation* try_catch_info = reference->IsTryBlock()
+ ? reference->GetTryCatchInformation()
+ : nullptr;
+ block->SetTryCatchInformation(try_catch_info);
+}
+
HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
DCHECK(HasExitBlock()) << "Unimplemented scenario";
// Update the environments in this graph to have the invoke's environment
@@ -1792,9 +1972,11 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
DCHECK(GetBlocks()[0]->IsEntryBlock());
DCHECK(GetBlocks()[2]->IsExitBlock());
DCHECK(!body->IsExitBlock());
+ DCHECK(!body->IsInLoop());
HInstruction* last = body->GetLastInstruction();
- invoke->GetBlock()->instructions_.AddAfter(invoke, body->GetInstructions());
+ // Note that we add instructions before the invoke only to simplify polymorphic inlining.
+ invoke->GetBlock()->instructions_.AddBefore(invoke, body->GetInstructions());
body->GetInstructions().SetBlockOfInstructions(invoke->GetBlock());
// Replace the invoke with the return value of the inlined graph.
@@ -1812,45 +1994,18 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// with the second half.
ArenaAllocator* allocator = outer_graph->GetArena();
HBasicBlock* at = invoke->GetBlock();
- HBasicBlock* to = at->SplitAfter(invoke);
+ // Note that we split before the invoke only to simplify polymorphic inlining.
+ HBasicBlock* to = at->SplitBeforeForInlining(invoke);
HBasicBlock* first = entry_block_->GetSuccessors()[0];
DCHECK(!first->IsInLoop());
at->MergeWithInlined(first);
exit_block_->ReplaceWith(to);
- // Update all predecessors of the exit block (now the `to` block)
- // to not `HReturn` but `HGoto` instead.
- bool returns_void = to->GetPredecessors()[0]->GetLastInstruction()->IsReturnVoid();
- if (to->GetPredecessors().size() == 1) {
- HBasicBlock* predecessor = to->GetPredecessors()[0];
- HInstruction* last = predecessor->GetLastInstruction();
- if (!returns_void) {
- return_value = last->InputAt(0);
- }
- predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
- predecessor->RemoveInstruction(last);
- } else {
- if (!returns_void) {
- // There will be multiple returns.
- return_value = new (allocator) HPhi(
- allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
- to->AddPhi(return_value->AsPhi());
- }
- for (HBasicBlock* predecessor : to->GetPredecessors()) {
- HInstruction* last = predecessor->GetLastInstruction();
- if (!returns_void) {
- return_value->AsPhi()->AddInput(last->InputAt(0));
- }
- predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
- predecessor->RemoveInstruction(last);
- }
- }
-
// Update the meta information surrounding blocks:
// (1) the graph they are now in,
// (2) the reverse post order of that graph,
- // (3) the potential loop information they are now in,
+ // (3) their potential loop information, inner and outer,
// (4) try block membership.
// Note that we do not need to update catch phi inputs because they
// correspond to the register file of the outer method which the inlinee
@@ -1870,28 +2025,17 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
size_t index_of_at = IndexOfElement(outer_graph->reverse_post_order_, at);
MakeRoomFor(&outer_graph->reverse_post_order_, blocks_added, index_of_at);
- HLoopInformation* loop_info = at->GetLoopInformation();
- // Copy TryCatchInformation if `at` is a try block, not if it is a catch block.
- TryCatchInformation* try_catch_info = at->IsTryBlock() ? at->GetTryCatchInformation() : nullptr;
-
// Do a reverse post order of the blocks in the callee and do (1), (2), (3)
// and (4) to the blocks that apply.
for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
if (current != exit_block_ && current != entry_block_ && current != first) {
- DCHECK(!current->IsInLoop());
DCHECK(current->GetTryCatchInformation() == nullptr);
DCHECK(current->GetGraph() == this);
current->SetGraph(outer_graph);
outer_graph->AddBlock(current);
outer_graph->reverse_post_order_[++index_of_at] = current;
- if (loop_info != nullptr) {
- current->SetLoopInformation(loop_info);
- for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
- loop_it.Current()->Add(current);
- }
- }
- current->SetTryCatchInformation(try_catch_info);
+ UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge */ false);
}
}
@@ -1899,24 +2043,40 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
to->SetGraph(outer_graph);
outer_graph->AddBlock(to);
outer_graph->reverse_post_order_[++index_of_at] = to;
- if (loop_info != nullptr) {
- to->SetLoopInformation(loop_info);
- for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
- loop_it.Current()->Add(to);
+ // Only `to` can become a back edge, as the inlined blocks
+ // are predecessors of `to`.
+ UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true);
+
+ // Update all predecessors of the exit block (now the `to` block)
+ // to not `HReturn` but `HGoto` instead.
+ bool returns_void = to->GetPredecessors()[0]->GetLastInstruction()->IsReturnVoid();
+ if (to->GetPredecessors().size() == 1) {
+ HBasicBlock* predecessor = to->GetPredecessors()[0];
+ HInstruction* last = predecessor->GetLastInstruction();
+ if (!returns_void) {
+ return_value = last->InputAt(0);
}
- if (loop_info->IsBackEdge(*at)) {
- // Only `to` can become a back edge, as the inlined blocks
- // are predecessors of `to`.
- loop_info->ReplaceBackEdge(at, to);
+ predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
+ predecessor->RemoveInstruction(last);
+ } else {
+ if (!returns_void) {
+ // There will be multiple returns.
+ return_value = new (allocator) HPhi(
+ allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
+ to->AddPhi(return_value->AsPhi());
+ }
+ for (HBasicBlock* predecessor : to->GetPredecessors()) {
+ HInstruction* last = predecessor->GetLastInstruction();
+ if (!returns_void) {
+ DCHECK(last->IsReturn());
+ return_value->AsPhi()->AddInput(last->InputAt(0));
+ }
+ predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
+ predecessor->RemoveInstruction(last);
}
}
- to->SetTryCatchInformation(try_catch_info);
}
- // Update the next instruction id of the outer graph, so that instructions
- // added later get bigger ids than those in the inner graph.
- outer_graph->SetCurrentInstructionId(GetNextInstructionId());
-
// Walk over the entry block and:
// - Move constants from the entry block to the outer_graph's entry block,
// - Replace HParameterValue instructions with their real value.
@@ -1967,13 +2127,6 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
}
}
- if (return_value != nullptr) {
- invoke->ReplaceWith(return_value);
- }
-
- // Finally remove the invoke from the caller.
- invoke->GetBlock()->RemoveInstruction(invoke);
-
return return_value;
}
@@ -2032,32 +2185,29 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
reverse_post_order_[index_of_header++] = false_block;
reverse_post_order_[index_of_header++] = new_pre_header;
- // Fix loop information.
- HLoopInformation* loop_info = old_pre_header->GetLoopInformation();
- if (loop_info != nullptr) {
- if_block->SetLoopInformation(loop_info);
- true_block->SetLoopInformation(loop_info);
- false_block->SetLoopInformation(loop_info);
- new_pre_header->SetLoopInformation(loop_info);
- // Add blocks to all enveloping loops.
- for (HLoopInformationOutwardIterator loop_it(*old_pre_header);
- !loop_it.Done();
- loop_it.Advance()) {
- loop_it.Current()->Add(if_block);
- loop_it.Current()->Add(true_block);
- loop_it.Current()->Add(false_block);
- loop_it.Current()->Add(new_pre_header);
- }
- }
+ // The pre_header can never be a back edge of a loop.
+ DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
+ !old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
+ UpdateLoopAndTryInformationOfNewBlock(
+ if_block, old_pre_header, /* replace_if_back_edge */ false);
+ UpdateLoopAndTryInformationOfNewBlock(
+ true_block, old_pre_header, /* replace_if_back_edge */ false);
+ UpdateLoopAndTryInformationOfNewBlock(
+ false_block, old_pre_header, /* replace_if_back_edge */ false);
+ UpdateLoopAndTryInformationOfNewBlock(
+ new_pre_header, old_pre_header, /* replace_if_back_edge */ false);
+}
- // Fix try/catch information.
- TryCatchInformation* try_catch_info = old_pre_header->IsTryBlock()
- ? old_pre_header->GetTryCatchInformation()
- : nullptr;
- if_block->SetTryCatchInformation(try_catch_info);
- true_block->SetTryCatchInformation(try_catch_info);
- false_block->SetTryCatchInformation(try_catch_info);
- new_pre_header->SetTryCatchInformation(try_catch_info);
+static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (rti.IsValid()) {
+ DCHECK(upper_bound_rti.IsSupertypeOf(rti))
+ << " upper_bound_rti: " << upper_bound_rti
+ << " rti: " << rti;
+ DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact())
+ << " upper_bound_rti: " << upper_bound_rti
+ << " rti: " << rti;
+ }
}
void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) {
@@ -2068,24 +2218,34 @@ void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) {
if (IsBoundType()) {
// Having the test here spares us from making the method virtual just for
// the sake of a DCHECK.
- ReferenceTypeInfo upper_bound_rti = AsBoundType()->GetUpperBound();
- DCHECK(upper_bound_rti.IsSupertypeOf(rti))
- << " upper_bound_rti: " << upper_bound_rti
- << " rti: " << rti;
- DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact());
+ CheckAgainstUpperBound(rti, AsBoundType()->GetUpperBound());
}
}
- reference_type_info_ = rti;
+ reference_type_handle_ = rti.GetTypeHandle();
+ SetPackedFlag<kFlagReferenceTypeIsExact>(rti.IsExact());
}
-ReferenceTypeInfo::ReferenceTypeInfo() : type_handle_(TypeHandle()), is_exact_(false) {}
+void HBoundType::SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null) {
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(upper_bound.IsValid());
+ DCHECK(!upper_bound_.IsValid()) << "Upper bound should only be set once.";
+ CheckAgainstUpperBound(GetReferenceTypeInfo(), upper_bound);
+ }
+ upper_bound_ = upper_bound;
+ SetPackedFlag<kFlagUpperCanBeNull>(can_be_null);
+}
-ReferenceTypeInfo::ReferenceTypeInfo(TypeHandle type_handle, bool is_exact)
- : type_handle_(type_handle), is_exact_(is_exact) {
+ReferenceTypeInfo ReferenceTypeInfo::Create(TypeHandle type_handle, bool is_exact) {
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
DCHECK(IsValidHandle(type_handle));
+ if (!is_exact) {
+ DCHECK(!type_handle->CannotBeAssignedFromOtherTypes())
+ << "Callers of ReferenceTypeInfo::Create should ensure is_exact is properly computed";
+ }
}
+ return ReferenceTypeInfo(type_handle, is_exact);
}
std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs) {
@@ -2129,10 +2289,7 @@ void HInvoke::SetIntrinsic(Intrinsics intrinsic,
IntrinsicExceptions exceptions) {
intrinsic_ = intrinsic;
IntrinsicOptimizations opt(this);
- if (needs_env_or_cache == kNoEnvironmentOrCache) {
- opt.SetDoesNotNeedDexCache();
- opt.SetDoesNotNeedEnvironment();
- }
+
// Adjust method's side effects from intrinsic table.
switch (side_effects) {
case kNoSideEffects: SetSideEffects(SideEffects::None()); break;
@@ -2140,11 +2297,21 @@ void HInvoke::SetIntrinsic(Intrinsics intrinsic,
case kWriteSideEffects: SetSideEffects(SideEffects::AllWrites()); break;
case kAllSideEffects: SetSideEffects(SideEffects::AllExceptGCDependency()); break;
}
- // Adjust method's exception status from intrinsic table.
- switch (exceptions) {
- case kNoThrow: SetCanThrow(false); break;
- case kCanThrow: SetCanThrow(true); break;
+
+ if (needs_env_or_cache == kNoEnvironmentOrCache) {
+ opt.SetDoesNotNeedDexCache();
+ opt.SetDoesNotNeedEnvironment();
+ } else {
+ // If we need an environment, that means there will be a call, which can trigger GC.
+ SetSideEffects(GetSideEffects().Union(SideEffects::CanTriggerGC()));
}
+ // Adjust method's exception status from intrinsic table.
+ SetCanThrow(exceptions == kCanThrow);
+}
+
+bool HNewInstance::IsStringAlloc() const {
+ ScopedObjectAccess soa(Thread::Current());
+ return GetReferenceTypeInfo().IsStringClass();
}
bool HInvoke::NeedsEnvironment() const {
@@ -2229,7 +2396,7 @@ void HInstruction::RemoveEnvironmentUsers() {
env_uses_.Clear();
}
-// Returns an instruction with the opposite boolean value from 'cond'.
+// Returns an instruction with the opposite Boolean value from 'cond'.
HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) {
ArenaAllocator* allocator = GetArena();
@@ -2258,10 +2425,10 @@ HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction*
return replacement;
} else if (cond->IsIntConstant()) {
HIntConstant* int_const = cond->AsIntConstant();
- if (int_const->IsZero()) {
+ if (int_const->IsFalse()) {
return GetIntConstant(1);
} else {
- DCHECK(int_const->IsOne());
+ DCHECK(int_const->IsTrue()) << int_const->GetValue();
return GetIntConstant(0);
}
} else {
@@ -2271,4 +2438,41 @@ HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction*
}
}
+std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs) {
+ os << "["
+ << " source=" << rhs.GetSource()
+ << " destination=" << rhs.GetDestination()
+ << " type=" << rhs.GetType()
+ << " instruction=";
+ if (rhs.GetInstruction() != nullptr) {
+ os << rhs.GetInstruction()->DebugName() << ' ' << rhs.GetInstruction()->GetId();
+ } else {
+ os << "null";
+ }
+ os << " ]";
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, TypeCheckKind rhs) {
+ switch (rhs) {
+ case TypeCheckKind::kUnresolvedCheck:
+ return os << "unresolved_check";
+ case TypeCheckKind::kExactCheck:
+ return os << "exact_check";
+ case TypeCheckKind::kClassHierarchyCheck:
+ return os << "class_hierarchy_check";
+ case TypeCheckKind::kAbstractClassCheck:
+ return os << "abstract_class_check";
+ case TypeCheckKind::kInterfaceCheck:
+ return os << "interface_check";
+ case TypeCheckKind::kArrayObjectCheck:
+ return os << "array_object_check";
+ case TypeCheckKind::kArrayCheck:
+ return os << "array_check";
+ default:
+ LOG(FATAL) << "Unknown TypeCheckKind: " << static_cast<int>(rhs);
+ UNREACHABLE();
+ }
+}
+
} // namespace art