diff options
author | 2017-02-16 22:08:29 +0000 | |
---|---|---|
committer | 2017-02-27 10:27:42 +0000 | |
commit | b813ca14be33f7db8b7049c3b08a1eb776f25d1b (patch) | |
tree | 4757b96eb5efd3a0e992f7f399ea479e7b5426c8 | |
parent | 30e015c442c8033390c30d2f293604723c29bc75 (diff) |
Implement code sinking.
Small example of what the optimization does:
Object o = new Object();
if (test) {
throw new Error(o.toString());
}
will be turned into (note that the first user of 'o'
is the 'new Error' allocation which has 'o' in its
environment):
if (test) {
Object o = new Obect();
throw new Error(o.toString());
}
There are other examples in 639-checker-code-sinking.
Ritz individual benchmarks improve on art-jit-cc from
5% (EvaluateComplexFormulas) to 23% (MoveFunctionColumn)
on all platforms.
Test: 639-checker-code-sinking
Test: test-art-host
Test: borg job run
Test: libcore + jdwp
bug:35634932
bug:30933338
Change-Id: Ib99c00c93fe76ffffb17afffb5a0e30a14310652
-rw-r--r-- | compiler/Android.bp | 1 | ||||
-rw-r--r-- | compiler/optimizing/code_sinking.cc | 403 | ||||
-rw-r--r-- | compiler/optimizing/code_sinking.h | 48 | ||||
-rw-r--r-- | compiler/optimizing/common_dominator.h | 6 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler_stats.h | 2 | ||||
-rw-r--r-- | test/639-checker-code-sinking/expected.txt | 3 | ||||
-rw-r--r-- | test/639-checker-code-sinking/info.txt | 1 | ||||
-rw-r--r-- | test/639-checker-code-sinking/src/Main.java | 355 |
9 files changed, 823 insertions, 1 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp index f5589cd7a3..1ee2a21b18 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -52,6 +52,7 @@ art_cc_defaults { "optimizing/cha_guard_optimization.cc", "optimizing/code_generator.cc", "optimizing/code_generator_utils.cc", + "optimizing/code_sinking.cc", "optimizing/constant_folding.cc", "optimizing/dead_code_elimination.cc", "optimizing/escape.cc", diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc new file mode 100644 index 0000000000..dc3d378e75 --- /dev/null +++ b/compiler/optimizing/code_sinking.cc @@ -0,0 +1,403 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_sinking.h" + +#include "common_dominator.h" +#include "nodes.h" + +namespace art { + +void CodeSinking::Run() { + HBasicBlock* exit = graph_->GetExitBlock(); + if (exit == nullptr) { + // Infinite loop, just bail. + return; + } + // TODO(ngeoffray): we do not profile branches yet, so use throw instructions + // as an indicator of an uncommon branch. + for (HBasicBlock* exit_predecessor : exit->GetPredecessors()) { + if (exit_predecessor->GetLastInstruction()->IsThrow()) { + SinkCodeToUncommonBranch(exit_predecessor); + } + } +} + +static bool IsInterestingInstruction(HInstruction* instruction) { + // Instructions from the entry graph (for example constants) are never interesting to move. + if (instruction->GetBlock() == instruction->GetBlock()->GetGraph()->GetEntryBlock()) { + return false; + } + // We want to move moveable instructions that cannot throw, as well as + // heap stores and allocations. + + // Volatile stores cannot be moved. + if (instruction->IsInstanceFieldSet()) { + if (instruction->AsInstanceFieldSet()->IsVolatile()) { + return false; + } + } + + // Check allocations first, as they can throw, but it is safe to move them. + if (instruction->IsNewInstance() || instruction->IsNewArray()) { + return true; + } + + // All other instructions that can throw cannot be moved. + if (instruction->CanThrow()) { + return false; + } + + // We can only store on local allocations. Other heap references can + // be escaping. Note that allocations can escape too, but we only move + // allocations if their users can move to, or are in the list of + // post dominated blocks. + if (instruction->IsInstanceFieldSet()) { + if (!instruction->InputAt(0)->IsNewInstance()) { + return false; + } + } + + if (instruction->IsArraySet()) { + if (!instruction->InputAt(0)->IsNewArray()) { + return false; + } + } + + // Heap accesses cannot go pass instructions that have memory side effects, which + // we are not tracking here. Note that the load/store elimination optimization + // runs before this optimization, and should have removed interesting ones. + // In theory, we could handle loads of local allocations, but this is currently + // hard to test, as LSE removes them. + if (instruction->IsStaticFieldGet() || + instruction->IsInstanceFieldGet() || + instruction->IsArrayGet()) { + return false; + } + + if (instruction->IsInstanceFieldSet() || + instruction->IsArraySet() || + instruction->CanBeMoved()) { + return true; + } + return false; +} + +static void AddInstruction(HInstruction* instruction, + const ArenaBitVector& processed_instructions, + const ArenaBitVector& discard_blocks, + ArenaVector<HInstruction*>* worklist) { + // Add to the work list if the instruction is not in the list of blocks + // to discard, hasn't been already processed and is of interest. + if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) && + !processed_instructions.IsBitSet(instruction->GetId()) && + IsInterestingInstruction(instruction)) { + worklist->push_back(instruction); + } +} + +static void AddInputs(HInstruction* instruction, + const ArenaBitVector& processed_instructions, + const ArenaBitVector& discard_blocks, + ArenaVector<HInstruction*>* worklist) { + for (HInstruction* input : instruction->GetInputs()) { + AddInstruction(input, processed_instructions, discard_blocks, worklist); + } +} + +static void AddInputs(HBasicBlock* block, + const ArenaBitVector& processed_instructions, + const ArenaBitVector& discard_blocks, + ArenaVector<HInstruction*>* worklist) { + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { + AddInputs(it.Current(), processed_instructions, discard_blocks, worklist); + } + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { + AddInputs(it.Current(), processed_instructions, discard_blocks, worklist); + } +} + +static bool ShouldFilterUse(HInstruction* instruction, + HInstruction* user, + const ArenaBitVector& post_dominated) { + if (instruction->IsNewInstance()) { + return user->IsInstanceFieldSet() && + (user->InputAt(0) == instruction) && + !post_dominated.IsBitSet(user->GetBlock()->GetBlockId()); + } else if (instruction->IsNewArray()) { + return user->IsArraySet() && + (user->InputAt(0) == instruction) && + !post_dominated.IsBitSet(user->GetBlock()->GetBlockId()); + } + return false; +} + + +// Find the ideal position for moving `instruction`. If `filter` is true, +// we filter out store instructions to that instruction, which are processed +// first in the step (3) of the sinking algorithm. +// This method is tailored to the sinking algorithm, unlike +// the generic HInstruction::MoveBeforeFirstUserAndOutOfLoops. +static HInstruction* FindIdealPosition(HInstruction* instruction, + const ArenaBitVector& post_dominated, + bool filter = false) { + DCHECK(!instruction->IsPhi()); // Makes no sense for Phi. + + // Find the target block. + CommonDominator finder(/* start_block */ nullptr); + for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) { + HInstruction* user = use.GetUser(); + if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) { + finder.Update(user->IsPhi() + ? user->GetBlock()->GetPredecessors()[use.GetIndex()] + : user->GetBlock()); + } + } + for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) { + DCHECK(!use.GetUser()->GetHolder()->IsPhi()); + DCHECK(!filter || !ShouldFilterUse(instruction, use.GetUser()->GetHolder(), post_dominated)); + finder.Update(use.GetUser()->GetHolder()->GetBlock()); + } + HBasicBlock* target_block = finder.Get(); + if (target_block == nullptr) { + // No user we can go next to? Likely a LSE or DCE limitation. + return nullptr; + } + + // Move to the first dominator not in a loop, if we can. + while (target_block->IsInLoop()) { + if (!post_dominated.IsBitSet(target_block->GetDominator()->GetBlockId())) { + break; + } + target_block = target_block->GetDominator(); + DCHECK(target_block != nullptr); + } + + // Find insertion position. No need to filter anymore, as we have found a + // target block. + HInstruction* insert_pos = nullptr; + for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) { + if (use.GetUser()->GetBlock() == target_block && + (insert_pos == nullptr || use.GetUser()->StrictlyDominates(insert_pos))) { + insert_pos = use.GetUser(); + } + } + for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) { + HInstruction* user = use.GetUser()->GetHolder(); + if (user->GetBlock() == target_block && + (insert_pos == nullptr || user->StrictlyDominates(insert_pos))) { + insert_pos = user; + } + } + if (insert_pos == nullptr) { + // No user in `target_block`, insert before the control flow instruction. + insert_pos = target_block->GetLastInstruction(); + DCHECK(insert_pos->IsControlFlow()); + // Avoid splitting HCondition from HIf to prevent unnecessary materialization. + if (insert_pos->IsIf()) { + HInstruction* if_input = insert_pos->AsIf()->InputAt(0); + if (if_input == insert_pos->GetPrevious()) { + insert_pos = if_input; + } + } + } + DCHECK(!insert_pos->IsPhi()); + return insert_pos; +} + + +void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) { + // Local allocator to discard data structures created below at the end of + // this optimization. + ArenaAllocator allocator(graph_->GetArena()->GetArenaPool()); + + size_t number_of_instructions = graph_->GetCurrentInstructionId(); + ArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc)); + ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false); + ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false); + ArenaBitVector instructions_that_can_move( + &allocator, number_of_instructions, /* expandable */ false); + ArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc)); + + // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`. + // TODO(ngeoffray): Getting the full set of post-dominated shoud be done by + // computint the post dominator tree, but that could be too time consuming. Also, + // we should start the analysis from blocks dominated by an uncommon branch, but we + // don't profile branches yet. + bool found_block = false; + for (HBasicBlock* block : graph_->GetPostOrder()) { + if (block == end_block) { + found_block = true; + post_dominated.SetBit(block->GetBlockId()); + } else if (found_block) { + bool is_post_dominated = true; + if (block->GetSuccessors().empty()) { + // We currently bail for loops. + is_post_dominated = false; + } else { + for (HBasicBlock* successor : block->GetSuccessors()) { + if (!post_dominated.IsBitSet(successor->GetBlockId())) { + is_post_dominated = false; + break; + } + } + } + if (is_post_dominated) { + post_dominated.SetBit(block->GetBlockId()); + } + } + } + + // Now that we have found a subset of post-dominated blocks, add to the worklist all inputs + // of instructions in these blocks that are not themselves in these blocks. + // Also find the common dominator of the found post dominated blocks, to help filtering + // out un-movable uses in step (2). + CommonDominator finder(end_block); + for (size_t i = 0, e = graph_->GetBlocks().size(); i < e; ++i) { + if (post_dominated.IsBitSet(i)) { + finder.Update(graph_->GetBlocks()[i]); + AddInputs(graph_->GetBlocks()[i], processed_instructions, post_dominated, &worklist); + } + } + HBasicBlock* common_dominator = finder.Get(); + + // Step (2): iterate over the worklist to find sinking candidates. + while (!worklist.empty()) { + HInstruction* instruction = worklist.back(); + if (processed_instructions.IsBitSet(instruction->GetId())) { + // The instruction has already been processed, continue. This happens + // when the instruction is the input/user of multiple instructions. + worklist.pop_back(); + continue; + } + bool all_users_in_post_dominated_blocks = true; + bool can_move = true; + // Check users of the instruction. + for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) { + HInstruction* user = use.GetUser(); + if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId()) && + !instructions_that_can_move.IsBitSet(user->GetId())) { + all_users_in_post_dominated_blocks = false; + // If we've already processed this user, or the user cannot be moved, or + // is not dominating the post dominated blocks, bail. + // TODO(ngeoffray): The domination check is an approximation. We should + // instead check if the dominated blocks post dominate the user's block, + // but we do not have post dominance information here. + if (processed_instructions.IsBitSet(user->GetId()) || + !IsInterestingInstruction(user) || + !user->GetBlock()->Dominates(common_dominator)) { + can_move = false; + break; + } + } + } + + // Check environment users of the instruction. Some of these users require + // the instruction not to move. + if (all_users_in_post_dominated_blocks) { + for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) { + HEnvironment* environment = use.GetUser(); + HInstruction* user = environment->GetHolder(); + if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) { + if (graph_->IsDebuggable() || + user->IsDeoptimize() || + user->CanThrowIntoCatchBlock() || + (user->IsSuspendCheck() && graph_->IsCompilingOsr())) { + can_move = false; + break; + } + } + } + } + if (!can_move) { + // Instruction cannot be moved, mark it as processed and remove it from the work + // list. + processed_instructions.SetBit(instruction->GetId()); + worklist.pop_back(); + } else if (all_users_in_post_dominated_blocks) { + // Instruction is a candidate for being sunk. Mark it as such, remove it from the + // work list, and add its inputs to the work list. + instructions_that_can_move.SetBit(instruction->GetId()); + move_in_order.push_back(instruction); + processed_instructions.SetBit(instruction->GetId()); + worklist.pop_back(); + AddInputs(instruction, processed_instructions, post_dominated, &worklist); + // Drop the environment use not in the list of post-dominated block. This is + // to help step (3) of this optimization, when we start moving instructions + // closer to their use. + for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) { + HEnvironment* environment = use.GetUser(); + HInstruction* user = environment->GetHolder(); + if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) { + environment->RemoveAsUserOfInput(use.GetIndex()); + environment->SetRawEnvAt(use.GetIndex(), nullptr); + } + } + } else { + // The information we have on the users was not enough to decide whether the + // instruction could be moved. + // Add the users to the work list, and keep the instruction in the work list + // to process it again once all users have been processed. + for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) { + AddInstruction(use.GetUser(), processed_instructions, post_dominated, &worklist); + } + } + } + + // Make sure we process instructions in dominated order. This is required for heap + // stores. + std::sort(move_in_order.begin(), move_in_order.end(), [](HInstruction* a, HInstruction* b) { + return b->StrictlyDominates(a); + }); + + // Step (3): Try to move sinking candidates. + for (HInstruction* instruction : move_in_order) { + HInstruction* position = nullptr; + if (instruction->IsArraySet() || instruction->IsInstanceFieldSet()) { + if (!instructions_that_can_move.IsBitSet(instruction->InputAt(0)->GetId())) { + // A store can trivially move, but it can safely do so only if the heap + // location it stores to can also move. + // TODO(ngeoffray): Handle allocation/store cycles by pruning these instructions + // from the set and all their inputs. + continue; + } + // Find the position of the instruction we're storing into, filtering out this + // store and all other stores to that instruction. + position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true); + + // The position needs to be dominated by the store, in order for the store to move there. + if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) { + continue; + } + } else { + // Find the ideal position within the post dominated blocks. + position = FindIdealPosition(instruction, post_dominated); + if (position == nullptr) { + continue; + } + } + // Bail if we could not find a position in the post dominated blocks (for example, + // if there are multiple users whose common dominator is not in the list of + // post dominated blocks). + if (!post_dominated.IsBitSet(position->GetBlock()->GetBlockId())) { + continue; + } + MaybeRecordStat(MethodCompilationStat::kInstructionSunk); + instruction->MoveBefore(position, /* ensure_safety */ false); + } +} + +} // namespace art diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h new file mode 100644 index 0000000000..59cda52a8c --- /dev/null +++ b/compiler/optimizing/code_sinking.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_CODE_SINKING_H_ +#define ART_COMPILER_OPTIMIZING_CODE_SINKING_H_ + +#include "nodes.h" +#include "optimization.h" + +namespace art { + +/** + * Optimization pass to move instructions into uncommon branches, + * when it is safe to do so. + */ +class CodeSinking : public HOptimization { + public: + CodeSinking(HGraph* graph, OptimizingCompilerStats* stats) + : HOptimization(graph, kCodeSinkingPassName, stats) {} + + void Run() OVERRIDE; + + static constexpr const char* kCodeSinkingPassName = "code_sinking"; + + private: + // Try to move code only used by `end_block` and all its post-dominated / dominated + // blocks, to these blocks. + void SinkCodeToUncommonBranch(HBasicBlock* end_block); + + DISALLOW_COPY_AND_ASSIGN(CodeSinking); +}; + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_CODE_SINKING_H_ diff --git a/compiler/optimizing/common_dominator.h b/compiler/optimizing/common_dominator.h index b459d24d7c..9f012cfbb2 100644 --- a/compiler/optimizing/common_dominator.h +++ b/compiler/optimizing/common_dominator.h @@ -36,12 +36,16 @@ class CommonDominator { // Create a finder starting with a given block. explicit CommonDominator(HBasicBlock* block) : dominator_(block), chain_length_(ChainLength(block)) { - DCHECK(block != nullptr); } // Update the common dominator with another block. void Update(HBasicBlock* block) { DCHECK(block != nullptr); + if (dominator_ == nullptr) { + dominator_ = block; + chain_length_ = ChainLength(block); + return; + } HBasicBlock* block2 = dominator_; DCHECK(block2 != nullptr); if (block == block2) { diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index f72bd6a5a3..3842ef98da 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -56,6 +56,7 @@ #include "builder.h" #include "cha_guard_optimization.h" #include "code_generator.h" +#include "code_sinking.h" #include "compiled_method.h" #include "compiler.h" #include "constant_folding.h" @@ -521,6 +522,8 @@ static HOptimization* BuildOptimization( return new (arena) HLoopOptimization(graph, most_recent_induction); } else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) { return new (arena) CHAGuardOptimization(graph); + } else if (opt_name == CodeSinking::kCodeSinkingPassName) { + return new (arena) CodeSinking(graph, stats); #ifdef ART_ENABLE_CODEGEN_arm } else if (opt_name == arm::DexCacheArrayFixups::kDexCacheArrayFixupsArmPassName) { return new (arena) arm::DexCacheArrayFixups(graph, codegen, stats); @@ -787,6 +790,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, graph, stats, "instruction_simplifier$before_codegen"); IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats); CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph); + CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats); HOptimization* optimizations1[] = { intrinsics, @@ -817,6 +821,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, lse, cha_guard, dce3, + code_sinking, // The codegen has a few assumptions that only the instruction simplifier // can satisfy. For example, the code generator does not expect to see a // HTypeConversion from a type to the same type. diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h index 203b1ec7ec..172c8f745d 100644 --- a/compiler/optimizing/optimizing_compiler_stats.h +++ b/compiler/optimizing/optimizing_compiler_stats.h @@ -67,6 +67,7 @@ enum MethodCompilationStat { kImplicitNullCheckGenerated, kExplicitNullCheckGenerated, kSimplifyIf, + kInstructionSunk, kLastStat }; @@ -147,6 +148,7 @@ class OptimizingCompilerStats { case kImplicitNullCheckGenerated: name = "ImplicitNullCheckGenerated"; break; case kExplicitNullCheckGenerated: name = "ExplicitNullCheckGenerated"; break; case kSimplifyIf: name = "SimplifyIf"; break; + case kInstructionSunk: name = "InstructionSunk"; break; case kLastStat: LOG(FATAL) << "invalid stat " diff --git a/test/639-checker-code-sinking/expected.txt b/test/639-checker-code-sinking/expected.txt new file mode 100644 index 0000000000..52e756c231 --- /dev/null +++ b/test/639-checker-code-sinking/expected.txt @@ -0,0 +1,3 @@ +0 +class java.lang.Object +43 diff --git a/test/639-checker-code-sinking/info.txt b/test/639-checker-code-sinking/info.txt new file mode 100644 index 0000000000..9722bdff2e --- /dev/null +++ b/test/639-checker-code-sinking/info.txt @@ -0,0 +1 @@ +Checker tests for the code sinking optimization pass. diff --git a/test/639-checker-code-sinking/src/Main.java b/test/639-checker-code-sinking/src/Main.java new file mode 100644 index 0000000000..1da19b687c --- /dev/null +++ b/test/639-checker-code-sinking/src/Main.java @@ -0,0 +1,355 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + public static void main(String[] args) { + testSimpleUse(); + testTwoUses(); + testFieldStores(doThrow); + testFieldStoreCycle(); + testArrayStores(); + testOnlyStoreUses(); + testNoUse(); + testPhiInput(); + testVolatileStore(); + doThrow = true; + try { + testInstanceSideEffects(); + } catch (Error e) { + // expected + System.out.println(e.getMessage()); + } + try { + testStaticSideEffects(); + } catch (Error e) { + // expected + System.out.println(e.getMessage()); + } + + try { + testStoreStore(doThrow); + } catch (Error e) { + // expected + System.out.println(e.getMessage()); + } + } + + /// CHECK-START: void Main.testSimpleUse() code_sinking (before) + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object + /// CHECK: NewInstance [<<LoadClass>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Throw + + /// CHECK-START: void Main.testSimpleUse() code_sinking (after) + /// CHECK-NOT: NewInstance + /// CHECK: If + /// CHECK: begin_block + /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object + /// CHECK-NOT: begin_block + /// CHECK: NewInstance [<<LoadClass>>] + /// CHECK-NOT: begin_block + /// CHECK: NewInstance [<<Error>>] + /// CHECK: Throw + public static void testSimpleUse() { + Object o = new Object(); + if (doThrow) { + throw new Error(o.toString()); + } + } + + /// CHECK-START: void Main.testTwoUses() code_sinking (before) + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object + /// CHECK: NewInstance [<<LoadClass>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Throw + + /// CHECK-START: void Main.testTwoUses() code_sinking (after) + /// CHECK-NOT: NewInstance + /// CHECK: If + /// CHECK: begin_block + /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object + /// CHECK-NOT: begin_block + /// CHECK: NewInstance [<<LoadClass>>] + /// CHECK-NOT: begin_block + /// CHECK: NewInstance [<<Error>>] + /// CHECK: Throw + public static void testTwoUses() { + Object o = new Object(); + if (doThrow) { + throw new Error(o.toString() + o.toString()); + } + } + + /// CHECK-START: void Main.testFieldStores(boolean) code_sinking (before) + /// CHECK: <<Int42:i\d+>> IntConstant 42 + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main + /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Throw + + /// CHECK-START: void Main.testFieldStores(boolean) code_sinking (after) + /// CHECK: <<Int42:i\d+>> IntConstant 42 + /// CHECK-NOT: NewInstance + /// CHECK: If + /// CHECK: begin_block + /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main + /// CHECK-NOT: begin_block + /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK-NOT: begin_block + /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>] + /// CHECK-NOT: begin_block + /// CHECK: NewInstance [<<Error>>] + /// CHECK: Throw + public static void testFieldStores(boolean doThrow) { + Main m = new Main(); + m.intField = 42; + if (doThrow) { + throw new Error(m.toString()); + } + } + + /// CHECK-START: void Main.testFieldStoreCycle() code_sinking (before) + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main + /// CHECK: <<NewInstance1:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: <<NewInstance2:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: InstanceFieldSet [<<NewInstance1>>,<<NewInstance2>>] + /// CHECK: InstanceFieldSet [<<NewInstance2>>,<<NewInstance1>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Throw + + // TODO(ngeoffray): Handle allocation/store cycles. + /// CHECK-START: void Main.testFieldStoreCycle() code_sinking (after) + /// CHECK: begin_block + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main + /// CHECK: <<NewInstance1:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: <<NewInstance2:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: InstanceFieldSet [<<NewInstance1>>,<<NewInstance2>>] + /// CHECK: InstanceFieldSet [<<NewInstance2>>,<<NewInstance1>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Throw + public static void testFieldStoreCycle() { + Main m1 = new Main(); + Main m2 = new Main(); + m1.objectField = m2; + m2.objectField = m1; + if (doThrow) { + throw new Error(m1.toString() + m2.toString()); + } + } + + /// CHECK-START: void Main.testArrayStores() code_sinking (before) + /// CHECK: <<Int1:i\d+>> IntConstant 1 + /// CHECK: <<Int0:i\d+>> IntConstant 0 + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object[] + /// CHECK: <<NewArray:l\d+>> NewArray [<<LoadClass>>,<<Int1>>] + /// CHECK: ArraySet [<<NewArray>>,<<Int0>>,<<NewArray>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Throw + + /// CHECK-START: void Main.testArrayStores() code_sinking (after) + /// CHECK: <<Int1:i\d+>> IntConstant 1 + /// CHECK: <<Int0:i\d+>> IntConstant 0 + /// CHECK-NOT: NewArray + /// CHECK: If + /// CHECK: begin_block + /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object[] + /// CHECK-NOT: begin_block + /// CHECK: <<NewArray:l\d+>> NewArray [<<LoadClass>>,<<Int1>>] + /// CHECK-NOT: begin_block + /// CHECK: ArraySet [<<NewArray>>,<<Int0>>,<<NewArray>>] + /// CHECK-NOT: begin_block + /// CHECK: NewInstance [<<Error>>] + /// CHECK: Throw + public static void testArrayStores() { + Object[] o = new Object[1]; + o[0] = o; + if (doThrow) { + throw new Error(o.toString()); + } + } + + // Make sure code sinking does not crash on dead allocations. + public static void testOnlyStoreUses() { + Main m = new Main(); + Object[] o = new Object[1]; // dead allocation, should eventually be removed b/35634932. + o[0] = m; + o = null; // Avoid environment uses for the array allocation. + if (doThrow) { + throw new Error(m.toString()); + } + } + + // Make sure code sinking does not crash on dead code. + public static void testNoUse() { + Main m = new Main(); + boolean load = Main.doLoop; // dead code, not removed because of environment use. + // Ensure one environment use for the static field + $opt$noinline$foo(); + load = false; + if (doThrow) { + throw new Error(m.toString()); + } + } + + // Make sure we can move code only used by a phi. + /// CHECK-START: void Main.testPhiInput() code_sinking (before) + /// CHECK: <<Null:l\d+>> NullConstant + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object + /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Phi [<<Null>>,<<NewInstance>>] + /// CHECK: Throw + + /// CHECK-START: void Main.testPhiInput() code_sinking (after) + /// CHECK: <<Null:l\d+>> NullConstant + /// CHECK-NOT: NewInstance + /// CHECK: If + /// CHECK: begin_block + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object + /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: begin_block + /// CHECK: Phi [<<Null>>,<<NewInstance>>] + /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error + /// CHECK: NewInstance [<<Error>>] + /// CHECK: Throw + public static void testPhiInput() { + Object f = new Object(); + if (doThrow) { + Object o = null; + int i = 2; + if (doLoop) { + o = f; + i = 42; + } + throw new Error(o.toString() + i); + } + } + + static void $opt$noinline$foo() {} + + // Check that we do not move volatile stores. + /// CHECK-START: void Main.testVolatileStore() code_sinking (before) + /// CHECK: <<Int42:i\d+>> IntConstant 42 + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main + /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Throw + + /// CHECK-START: void Main.testVolatileStore() code_sinking (after) + /// CHECK: <<Int42:i\d+>> IntConstant 42 + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main + /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Throw + public static void testVolatileStore() { + Main m = new Main(); + m.volatileField = 42; + if (doThrow) { + throw new Error(m.toString()); + } + } + + public static void testInstanceSideEffects() { + int a = mainField.intField; + $noinline$changeIntField(); + if (doThrow) { + throw new Error("" + a); + } + } + + static void $noinline$changeIntField() { + mainField.intField = 42; + } + + public static void testStaticSideEffects() { + Object o = obj; + $noinline$changeStaticObjectField(); + if (doThrow) { + throw new Error(o.getClass().toString()); + } + } + + static void $noinline$changeStaticObjectField() { + obj = new Main(); + } + + // Test that we preserve the order of stores. + /// CHECK-START: void Main.testStoreStore(boolean) code_sinking (before) + /// CHECK: <<Int42:i\d+>> IntConstant 42 + /// CHECK: <<Int43:i\d+>> IntConstant 43 + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main + /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>] + /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int43>>] + /// CHECK: If + /// CHECK: begin_block + /// CHECK: Throw + + /// CHECK-START: void Main.testStoreStore(boolean) code_sinking (after) + /// CHECK: <<Int42:i\d+>> IntConstant 42 + /// CHECK: <<Int43:i\d+>> IntConstant 43 + /// CHECK-NOT: NewInstance + /// CHECK: If + /// CHECK: begin_block + /// CHECK: <<Error:l\d+>> LoadClass class_name:java.lang.Error + /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:Main + /// CHECK-NOT: begin_block + /// CHECK: <<NewInstance:l\d+>> NewInstance [<<LoadClass>>] + /// CHECK-NOT: begin_block + /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int42>>] + /// CHECK-NOT: begin_block + /// CHECK: InstanceFieldSet [<<NewInstance>>,<<Int43>>] + /// CHECK-NOT: begin_block + /// CHECK: NewInstance [<<Error>>] + /// CHECK: Throw + public static void testStoreStore(boolean doThrow) { + Main m = new Main(); + m.intField = 42; + m.intField = 43; + if (doThrow) { + throw new Error(m.$opt$noinline$toString()); + } + } + + public String $opt$noinline$toString() { + return "" + intField; + } + + volatile int volatileField; + int intField; + Object objectField; + static boolean doThrow; + static boolean doLoop; + static Main mainField = new Main(); + static Object obj = new Object(); +} |