Dead code elimination based on GVN results.
Change-Id: I5b77411a8f088f0b561da14b123cf6b0501c9db5
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 24d96ba..5966951 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -154,6 +154,7 @@
runtime/jni_internal_test.cc \
runtime/proxy_test.cc \
runtime/reflection_test.cc \
+ compiler/dex/gvn_dead_code_elimination_test.cc \
compiler/dex/global_value_numbering_test.cc \
compiler/dex/local_value_numbering_test.cc \
compiler/dex/mir_graph_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 61379fb..69b4295 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -21,6 +21,7 @@
LIBART_COMPILER_SRC_FILES := \
compiled_method.cc \
dex/global_value_numbering.cc \
+ dex/gvn_dead_code_elimination.cc \
dex/local_value_numbering.cc \
dex/quick/arm/assemble_arm.cc \
dex/quick/arm/call_arm.cc \
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 7685200..93d83c6 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -240,6 +240,41 @@
};
/**
+ * @class DeadCodeEliminationPass
+ * @brief Performs the GVN-based dead code elimination pass.
+ */
+class DeadCodeEliminationPass : public PassME {
+ public:
+ DeadCodeEliminationPass() : PassME("DCE", kPreOrderDFSTraversal, "4_post_dce_cfg") {
+ }
+
+ bool Gate(const PassDataHolder* data) const OVERRIDE {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->EliminateDeadCodeGate();
+ }
+
+ bool Worker(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
+ return c_unit->mir_graph->EliminateDeadCode(bb);
+ }
+
+ void End(PassDataHolder* data) const OVERRIDE {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->EliminateDeadCodeEnd();
+ down_cast<PassMEDataHolder*>(data)->dirty = !c_unit->mir_graph->MirSsaRepUpToDate();
+ }
+};
+
+/**
* @class BBCombine
* @brief Perform the basic block combination pass.
*/
diff --git a/compiler/dex/dex_flags.h b/compiler/dex/dex_flags.h
index eaf272b..e8eb40c 100644
--- a/compiler/dex/dex_flags.h
+++ b/compiler/dex/dex_flags.h
@@ -27,6 +27,7 @@
kNullCheckElimination,
kClassInitCheckElimination,
kGlobalValueNumbering,
+ kGvnDeadCodeElimination,
kLocalValueNumbering,
kPromoteRegs,
kTrackLiveTemps,
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index a8fd812..ab3c946 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -28,7 +28,7 @@
allocator_(allocator),
bbs_processed_(0u),
max_bbs_to_process_(kMaxBbsToProcessMultiplyFactor * mir_graph_->GetNumReachableBlocks()),
- last_value_(0u),
+ last_value_(kNullValue),
modifications_allowed_(true),
mode_(mode),
global_value_map_(std::less<uint64_t>(), allocator->Adapter()),
@@ -128,7 +128,11 @@
merge_lvns_.clear();
bool change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
- if (change) {
+ if (mode_ == kModeGvn) {
+ // In GVN mode, keep the latest LVN even if Equals() indicates no change. This is
+ // to keep the correct values of fields that do not contribute to Equals() as long
+ // as they depend only on predecessor LVNs' fields that do contribute to Equals().
+ // Currently, that's LVN::merge_map_ used by LVN::GetStartingVregValueNumberImpl().
std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
lvns_[bb->id] = work_lvn_.release();
} else {
@@ -178,7 +182,7 @@
}
// IF_EQZ/IF_NEZ checks some sreg, see if that sreg contains the value_name.
int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
- if (!pred_lvn->IsSregValue(s_reg, value_name)) {
+ if (pred_lvn->GetSregValue(s_reg) != value_name) {
return false;
}
}
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index 023dbdb..c7bca85 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -31,6 +31,9 @@
class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
public:
+ static constexpr uint16_t kNoValue = 0xffffu;
+ static constexpr uint16_t kNullValue = 1u;
+
enum Mode {
kModeGvn,
kModeGvnPostProcessing,
@@ -51,6 +54,14 @@
GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator, Mode mode);
~GlobalValueNumbering();
+ CompilationUnit* GetCompilationUnit() const {
+ return cu_;
+ }
+
+ MIRGraph* GetMirGraph() const {
+ return mir_graph_;
+ }
+
// Prepare LVN for the basic block.
LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb,
ScopedArenaAllocator* allocator = nullptr);
@@ -70,9 +81,10 @@
return modifications_allowed_ && Good();
}
- private:
- static constexpr uint16_t kNoValue = 0xffffu;
+ // Retrieve the LVN with GVN results for a given BasicBlock.
+ const LocalValueNumbering* GetLvn(BasicBlockId bb_id) const;
+ private:
// Allocate a new value name.
uint16_t NewValueName();
@@ -88,7 +100,7 @@
uint16_t LookupValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
uint16_t res;
uint64_t key = BuildKey(op, operand1, operand2, modifier);
- ValueMap::iterator lb = global_value_map_.lower_bound(key);
+ auto lb = global_value_map_.lower_bound(key);
if (lb != global_value_map_.end() && lb->first == key) {
res = lb->second;
} else {
@@ -99,10 +111,10 @@
}
// Look up a value in the global value map, don't add a new entry if there was none before.
- uint16_t FindValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
+ uint16_t FindValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) const {
uint16_t res;
uint64_t key = BuildKey(op, operand1, operand2, modifier);
- ValueMap::iterator lb = global_value_map_.lower_bound(key);
+ auto lb = global_value_map_.lower_bound(key);
if (lb != global_value_map_.end() && lb->first == key) {
res = lb->second;
} else {
@@ -111,18 +123,6 @@
return res;
}
- // Check if the exact value is stored in the global value map.
- bool HasValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier,
- uint16_t value) const {
- DCHECK(value != 0u || !Good());
- DCHECK_LE(value, last_value_);
- // This is equivalent to value == LookupValue(op, operand1, operand2, modifier)
- // except that it doesn't add an entry to the global value map if it's not there.
- uint64_t key = BuildKey(op, operand1, operand2, modifier);
- ValueMap::const_iterator it = global_value_map_.find(key);
- return (it != global_value_map_.end() && it->second == value);
- }
-
// Get an instance field id.
uint16_t GetIFieldId(MIR* mir) {
return GetMirGraph()->GetGvnIFieldId(mir);
@@ -200,14 +200,6 @@
bool DivZeroCheckedInAllPredecessors(const ScopedArenaVector<uint16_t>& merge_names) const;
- CompilationUnit* GetCompilationUnit() const {
- return cu_;
- }
-
- MIRGraph* GetMirGraph() const {
- return mir_graph_;
- }
-
ScopedArenaAllocator* Allocator() const {
return allocator_;
}
@@ -255,6 +247,13 @@
};
std::ostream& operator<<(std::ostream& os, const GlobalValueNumbering::Mode& rhs);
+inline const LocalValueNumbering* GlobalValueNumbering::GetLvn(BasicBlockId bb_id) const {
+ DCHECK_EQ(mode_, kModeGvnPostProcessing);
+ DCHECK_LT(bb_id, lvns_.size());
+ DCHECK(lvns_[bb_id] != nullptr);
+ return lvns_[bb_id];
+}
+
inline void GlobalValueNumbering::StartPostProcessing() {
DCHECK(Good());
DCHECK_EQ(mode_, kModeGvn);
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index cfa6388..54e34ea 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -134,8 +134,8 @@
{ bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
#define DEF_PHI2(bb, reg, src1, src2) \
{ bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
-#define DEF_DIV_REM(bb, opcode, result, dividend, divisor) \
- { bb, opcode, 0u, 0u, 2, { dividend, divisor }, 1, { result } }
+#define DEF_BINOP(bb, opcode, result, src1, src2) \
+ { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
void DoPrepareIFields(const IFieldDef* defs, size_t count) {
cu_.mir_graph->ifield_lowering_infos_.clear();
@@ -267,7 +267,6 @@
mir->offset = i; // LVN uses offset only for debug output
mir->optimization_flags = 0u;
}
- mirs_[count - 1u].next = nullptr;
DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
code_item->insns_size_in_code_units_ = 2u * count;
@@ -279,6 +278,20 @@
DoPrepareMIRs(defs, count);
}
+ void DoPrepareVregToSsaMapExit(BasicBlockId bb_id, const int32_t* map, size_t count) {
+ BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
+ ASSERT_TRUE(bb != nullptr);
+ ASSERT_TRUE(bb->data_flow_info != nullptr);
+ bb->data_flow_info->vreg_to_ssa_map_exit =
+ cu_.arena.AllocArray<int32_t>(count, kArenaAllocDFInfo);
+ std::copy_n(map, count, bb->data_flow_info->vreg_to_ssa_map_exit);
+ }
+
+ template <size_t count>
+ void PrepareVregToSsaMapExit(BasicBlockId bb_id, const int32_t (&map)[count]) {
+ DoPrepareVregToSsaMapExit(bb_id, map, count);
+ }
+
void PerformGVN() {
DoPerformGVN<LoopRepeatingTopologicalSortIterator>();
}
@@ -294,9 +307,9 @@
cu_.mir_graph->ComputeDominators();
cu_.mir_graph->ComputeTopologicalSortOrder();
cu_.mir_graph->SSATransformationEnd();
- cu_.mir_graph->temp_.gvn.ifield_ids_ = GlobalValueNumbering::PrepareGvnFieldIds(
+ cu_.mir_graph->temp_.gvn.ifield_ids = GlobalValueNumbering::PrepareGvnFieldIds(
allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
- cu_.mir_graph->temp_.gvn.sfield_ids_ = GlobalValueNumbering::PrepareGvnFieldIds(
+ cu_.mir_graph->temp_.gvn.sfield_ids = GlobalValueNumbering::PrepareGvnFieldIds(
allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
ASSERT_TRUE(gvn_ == nullptr);
gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
@@ -348,6 +361,10 @@
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
cu_.access_flags = kAccStatic; // Don't let "this" interfere with this test.
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+ // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
+ // 0 constants are integral, not references. Nothing else is used by LVN/GVN.
+ cu_.mir_graph->reg_location_ =
+ cu_.arena.AllocArray<RegLocation>(kMaxSsaRegs, kArenaAllocRegAlloc);
// Bind all possible sregs to live vregs for test purposes.
live_in_v_->SetInitialBits(kMaxSsaRegs);
cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
@@ -1570,6 +1587,40 @@
EXPECT_NE(value_names_[4], value_names_[3]);
}
+TEST_F(GlobalValueNumberingTestLoop, IFieldLoopVariable) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 0),
+ DEF_IPUT(3, Instruction::IPUT, 0u, 100u, 0u),
+ DEF_IGET(4, Instruction::IGET, 2u, 100u, 0u),
+ DEF_BINOP(4, Instruction::ADD_INT, 3u, 2u, 101u),
+ DEF_IPUT(4, Instruction::IPUT, 3u, 100u, 0u),
+ };
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN();
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_NE(value_names_[2], value_names_[0]);
+ EXPECT_NE(value_names_[3], value_names_[0]);
+ EXPECT_NE(value_names_[3], value_names_[2]);
+
+
+ // Set up vreg_to_ssa_map_exit for prologue and loop and set post-processing mode
+ // as needed for GetStartingVregValueNumber().
+ const int32_t prologue_vreg_to_ssa_map_exit[] = { 0 };
+ const int32_t loop_vreg_to_ssa_map_exit[] = { 3 };
+ PrepareVregToSsaMapExit(3, prologue_vreg_to_ssa_map_exit);
+ PrepareVregToSsaMapExit(4, loop_vreg_to_ssa_map_exit);
+ gvn_->StartPostProcessing();
+
+ // Check that vreg 0 has the same value number as the result of IGET 2u.
+ const LocalValueNumbering* loop = gvn_->GetLvn(4);
+ EXPECT_EQ(value_names_[2], loop->GetStartingVregValueNumber(0));
+}
+
TEST_F(GlobalValueNumberingTestCatch, IFields) {
static const IFieldDef ifields[] = {
{ 0u, 1u, 0u, false, kDexMemAccessWord },
@@ -2225,18 +2276,18 @@
TEST_F(GlobalValueNumberingTestDiamond, DivZeroCheckDiamond) {
static const MIRDef mirs[] = {
- DEF_DIV_REM(3u, Instruction::DIV_INT, 1u, 20u, 21u),
- DEF_DIV_REM(3u, Instruction::DIV_INT, 2u, 24u, 21u),
- DEF_DIV_REM(3u, Instruction::DIV_INT, 3u, 20u, 23u),
- DEF_DIV_REM(4u, Instruction::DIV_INT, 4u, 24u, 22u),
- DEF_DIV_REM(4u, Instruction::DIV_INT, 9u, 24u, 25u),
- DEF_DIV_REM(5u, Instruction::DIV_INT, 5u, 24u, 21u),
- DEF_DIV_REM(5u, Instruction::DIV_INT, 10u, 24u, 26u),
+ DEF_BINOP(3u, Instruction::DIV_INT, 1u, 20u, 21u),
+ DEF_BINOP(3u, Instruction::DIV_INT, 2u, 24u, 21u),
+ DEF_BINOP(3u, Instruction::DIV_INT, 3u, 20u, 23u),
+ DEF_BINOP(4u, Instruction::DIV_INT, 4u, 24u, 22u),
+ DEF_BINOP(4u, Instruction::DIV_INT, 9u, 24u, 25u),
+ DEF_BINOP(5u, Instruction::DIV_INT, 5u, 24u, 21u),
+ DEF_BINOP(5u, Instruction::DIV_INT, 10u, 24u, 26u),
DEF_PHI2(6u, 27u, 25u, 26u),
- DEF_DIV_REM(6u, Instruction::DIV_INT, 12u, 20u, 27u),
- DEF_DIV_REM(6u, Instruction::DIV_INT, 6u, 24u, 21u),
- DEF_DIV_REM(6u, Instruction::DIV_INT, 7u, 20u, 23u),
- DEF_DIV_REM(6u, Instruction::DIV_INT, 8u, 20u, 22u),
+ DEF_BINOP(6u, Instruction::DIV_INT, 12u, 20u, 27u),
+ DEF_BINOP(6u, Instruction::DIV_INT, 6u, 24u, 21u),
+ DEF_BINOP(6u, Instruction::DIV_INT, 7u, 20u, 23u),
+ DEF_BINOP(6u, Instruction::DIV_INT, 8u, 20u, 22u),
};
static const bool expected_ignore_div_zero_check[] = {
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
new file mode 100644
index 0000000..2e7f032
--- /dev/null
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -0,0 +1,1391 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sstream>
+
+#include "gvn_dead_code_elimination.h"
+
+#include "base/bit_vector-inl.h"
+#include "base/macros.h"
+#include "compiler_enums.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_instruction.h"
+#include "dex/mir_graph.h"
+#include "local_value_numbering.h"
+#include "utils/arena_bit_vector.h"
+
+namespace art {
+
+constexpr uint16_t GvnDeadCodeElimination::kNoValue;
+constexpr uint16_t GvnDeadCodeElimination::kNPos;
+
+inline uint16_t GvnDeadCodeElimination::MIRData::PrevChange(int v_reg) const {
+ DCHECK(has_def);
+ DCHECK(v_reg == vreg_def || v_reg == vreg_def + 1);
+ return (v_reg == vreg_def) ? prev_value.change : prev_value_high.change;
+}
+
+inline void GvnDeadCodeElimination::MIRData::SetPrevChange(int v_reg, uint16_t change) {
+ DCHECK(has_def);
+ DCHECK(v_reg == vreg_def || v_reg == vreg_def + 1);
+ if (v_reg == vreg_def) {
+ prev_value.change = change;
+ } else {
+ prev_value_high.change = change;
+ }
+}
+
+inline void GvnDeadCodeElimination::MIRData::RemovePrevChange(int v_reg, MIRData* prev_data) {
+ DCHECK_NE(PrevChange(v_reg), kNPos);
+ DCHECK(v_reg == prev_data->vreg_def || v_reg == prev_data->vreg_def + 1);
+ if (vreg_def == v_reg) {
+ if (prev_data->vreg_def == v_reg) {
+ prev_value = prev_data->prev_value;
+ low_def_over_high_word = prev_data->low_def_over_high_word;
+ } else {
+ prev_value = prev_data->prev_value_high;
+ low_def_over_high_word =
+ prev_data->prev_value_high.value != kNPos && !prev_data->high_def_over_low_word;
+ }
+ } else {
+ if (prev_data->vreg_def == v_reg) {
+ prev_value_high = prev_data->prev_value;
+ high_def_over_low_word =
+ prev_data->prev_value.value != kNPos && !prev_data->low_def_over_high_word;
+ } else {
+ prev_value_high = prev_data->prev_value_high;
+ high_def_over_low_word = prev_data->high_def_over_low_word;
+ }
+ }
+}
+
+GvnDeadCodeElimination::VRegChains::VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc)
+ : num_vregs_(num_vregs),
+ vreg_data_(alloc->AllocArray<VRegValue>(num_vregs, kArenaAllocMisc)),
+ mir_data_(alloc->Adapter()) {
+ mir_data_.reserve(100);
+}
+
+inline void GvnDeadCodeElimination::VRegChains::Reset() {
+ DCHECK(mir_data_.empty());
+ std::fill_n(vreg_data_, num_vregs_, VRegValue());
+}
+
+void GvnDeadCodeElimination::VRegChains::AddMIRWithDef(MIR* mir, int v_reg, bool wide,
+ uint16_t new_value) {
+ uint16_t pos = mir_data_.size();
+ mir_data_.emplace_back(mir);
+ MIRData* data = &mir_data_.back();
+ data->has_def = true;
+ data->wide_def = wide;
+ data->vreg_def = v_reg;
+
+ if (vreg_data_[v_reg].change != kNPos &&
+ mir_data_[vreg_data_[v_reg].change].vreg_def + 1 == v_reg) {
+ data->low_def_over_high_word = true;
+ }
+ data->prev_value = vreg_data_[v_reg];
+ DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+ vreg_data_[v_reg].value = new_value;
+ vreg_data_[v_reg].change = pos;
+
+ if (wide) {
+ if (vreg_data_[v_reg + 1].change != kNPos &&
+ mir_data_[vreg_data_[v_reg + 1].change].vreg_def == v_reg + 1) {
+ data->high_def_over_low_word = true;
+ }
+ data->prev_value_high = vreg_data_[v_reg + 1];
+ DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
+ vreg_data_[v_reg + 1].value = new_value;
+ vreg_data_[v_reg + 1].change = pos;
+ }
+}
+
+inline void GvnDeadCodeElimination::VRegChains::AddMIRWithoutDef(MIR* mir) {
+ mir_data_.emplace_back(mir);
+}
+
+void GvnDeadCodeElimination::VRegChains::RemoveLastMIRData() {
+ MIRData* data = LastMIRData();
+ if (data->has_def) {
+ DCHECK_EQ(vreg_data_[data->vreg_def].change, NumMIRs() - 1u);
+ vreg_data_[data->vreg_def] = data->prev_value;
+ if (data->wide_def) {
+ DCHECK_EQ(vreg_data_[data->vreg_def + 1].change, NumMIRs() - 1u);
+ vreg_data_[data->vreg_def + 1] = data->prev_value_high;
+ }
+ }
+ mir_data_.pop_back();
+}
+
+void GvnDeadCodeElimination::VRegChains::RemoveTrailingNops() {
+ // There's at least one NOP to drop. There may be more.
+ MIRData* last_data = LastMIRData();
+ DCHECK(!last_data->must_keep && !last_data->has_def);
+ do {
+ DCHECK_EQ(static_cast<int>(last_data->mir->dalvikInsn.opcode), static_cast<int>(kMirOpNop));
+ mir_data_.pop_back();
+ if (mir_data_.empty()) {
+ break;
+ }
+ last_data = LastMIRData();
+ } while (!last_data->must_keep && !last_data->has_def);
+}
+
+inline size_t GvnDeadCodeElimination::VRegChains::NumMIRs() const {
+ return mir_data_.size();
+}
+
+inline GvnDeadCodeElimination::MIRData* GvnDeadCodeElimination::VRegChains::GetMIRData(size_t pos) {
+ DCHECK_LT(pos, mir_data_.size());
+ return &mir_data_[pos];
+}
+
+inline GvnDeadCodeElimination::MIRData* GvnDeadCodeElimination::VRegChains::LastMIRData() {
+ DCHECK(!mir_data_.empty());
+ return &mir_data_.back();
+}
+
+uint32_t GvnDeadCodeElimination::VRegChains::NumVRegs() const {
+ return num_vregs_;
+}
+
+void GvnDeadCodeElimination::VRegChains::InsertInitialValueHigh(int v_reg, uint16_t value) {
+ DCHECK_NE(value, kNoValue);
+ DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+ uint16_t change = vreg_data_[v_reg].change;
+ if (change == kNPos) {
+ vreg_data_[v_reg].value = value;
+ } else {
+ while (true) {
+ MIRData* data = &mir_data_[change];
+ DCHECK(data->vreg_def == v_reg || data->vreg_def + 1 == v_reg);
+ if (data->vreg_def == v_reg) { // Low word, use prev_value.
+ if (data->prev_value.change == kNPos) {
+ DCHECK_EQ(data->prev_value.value, kNoValue);
+ data->prev_value.value = value;
+ data->low_def_over_high_word = true;
+ break;
+ }
+ change = data->prev_value.change;
+ } else { // High word, use prev_value_high.
+ if (data->prev_value_high.change == kNPos) {
+ DCHECK_EQ(data->prev_value_high.value, kNoValue);
+ data->prev_value_high.value = value;
+ break;
+ }
+ change = data->prev_value_high.change;
+ }
+ }
+ }
+}
+
+void GvnDeadCodeElimination::VRegChains::UpdateInitialVRegValue(int v_reg, bool wide,
+ const LocalValueNumbering* lvn) {
+ DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+ if (!wide) {
+ if (vreg_data_[v_reg].value == kNoValue) {
+ uint16_t old_value = lvn->GetStartingVregValueNumber(v_reg);
+ if (old_value == kNoValue) {
+ // Maybe there was a wide value in v_reg before. Do not check for wide value in v_reg-1,
+ // that will be done only if we see a definition of v_reg-1, otherwise it's unnecessary.
+ old_value = lvn->GetStartingVregValueNumberWide(v_reg);
+ if (old_value != kNoValue) {
+ InsertInitialValueHigh(v_reg + 1, old_value);
+ }
+ }
+ vreg_data_[v_reg].value = old_value;
+ }
+ } else {
+ DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
+ bool check_high = true;
+ if (vreg_data_[v_reg].value == kNoValue) {
+ uint16_t old_value = lvn->GetStartingVregValueNumberWide(v_reg);
+ if (old_value != kNoValue) {
+ InsertInitialValueHigh(v_reg + 1, old_value);
+ check_high = false; // High word has been processed.
+ } else {
+ // Maybe there was a narrow value before. Do not check for wide value in v_reg-1,
+ // that will be done only if we see a definition of v_reg-1, otherwise it's unnecessary.
+ old_value = lvn->GetStartingVregValueNumber(v_reg);
+ }
+ vreg_data_[v_reg].value = old_value;
+ }
+ if (check_high && vreg_data_[v_reg + 1].value == kNoValue) {
+ uint16_t old_value = lvn->GetStartingVregValueNumber(v_reg + 1);
+ if (old_value == kNoValue && static_cast<size_t>(v_reg + 2) < num_vregs_) {
+ // Maybe there was a wide value before.
+ old_value = lvn->GetStartingVregValueNumberWide(v_reg + 1);
+ if (old_value != kNoValue) {
+ InsertInitialValueHigh(v_reg + 2, old_value);
+ }
+ }
+ vreg_data_[v_reg + 1].value = old_value;
+ }
+ }
+}
+
+inline uint16_t GvnDeadCodeElimination::VRegChains::LastChange(int v_reg) {
+ DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+ return vreg_data_[v_reg].change;
+}
+
+inline uint16_t GvnDeadCodeElimination::VRegChains::CurrentValue(int v_reg) {
+ DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+ return vreg_data_[v_reg].value;
+}
+
+uint16_t GvnDeadCodeElimination::VRegChains::FindKillHead(int v_reg, uint16_t cutoff) {
+ uint16_t current_value = this->CurrentValue(v_reg);
+ DCHECK_NE(current_value, kNoValue);
+ uint16_t change = LastChange(v_reg);
+ DCHECK_LT(change, mir_data_.size());
+ DCHECK_GE(change, cutoff);
+ bool match_high_word = (mir_data_[change].vreg_def != v_reg);
+ do {
+ MIRData* data = &mir_data_[change];
+ DCHECK(data->vreg_def == v_reg || data->vreg_def + 1 == v_reg);
+ if (data->vreg_def == v_reg) { // Low word, use prev_value.
+ if (data->prev_value.value == current_value &&
+ match_high_word == data->low_def_over_high_word) {
+ break;
+ }
+ change = data->prev_value.change;
+ } else { // High word, use prev_value_high.
+ if (data->prev_value_high.value == current_value &&
+ match_high_word != data->high_def_over_low_word) {
+ break;
+ }
+ change = data->prev_value_high.change;
+ }
+ if (change < cutoff) {
+ change = kNPos;
+ }
+ } while (change != kNPos);
+ return change;
+}
+
+uint16_t GvnDeadCodeElimination::VRegChains::FindFirstChangeAfter(int v_reg,
+ uint16_t change) const {
+ DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+ DCHECK_LT(change, mir_data_.size());
+ uint16_t result = kNPos;
+ uint16_t search_change = vreg_data_[v_reg].change;
+ while (search_change != kNPos && search_change > change) {
+ result = search_change;
+ search_change = mir_data_[search_change].PrevChange(v_reg);
+ }
+ return result;
+}
+
+void GvnDeadCodeElimination::VRegChains::ReplaceChange(uint16_t old_change, uint16_t new_change) {
+ const MIRData* old_data = GetMIRData(old_change);
+ DCHECK(old_data->has_def);
+ int count = old_data->wide_def ? 2 : 1;
+ for (int v_reg = old_data->vreg_def, end = old_data->vreg_def + count; v_reg != end; ++v_reg) {
+ uint16_t next_change = FindFirstChangeAfter(v_reg, old_change);
+ if (next_change == kNPos) {
+ DCHECK_EQ(vreg_data_[v_reg].change, old_change);
+ vreg_data_[v_reg].change = new_change;
+ } else {
+ DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), old_change);
+ mir_data_[next_change].SetPrevChange(v_reg, new_change);
+ }
+ }
+}
+
+void GvnDeadCodeElimination::VRegChains::RemoveChange(uint16_t change) {
+ MIRData* data = &mir_data_[change];
+ DCHECK(data->has_def);
+ int count = data->wide_def ? 2 : 1;
+ for (int v_reg = data->vreg_def, end = data->vreg_def + count; v_reg != end; ++v_reg) {
+ uint16_t next_change = FindFirstChangeAfter(v_reg, change);
+ if (next_change == kNPos) {
+ DCHECK_EQ(vreg_data_[v_reg].change, change);
+ vreg_data_[v_reg] = (data->vreg_def == v_reg) ? data->prev_value : data->prev_value_high;
+ } else {
+ DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), change);
+ mir_data_[next_change].RemovePrevChange(v_reg, data);
+ }
+ }
+}
+
+inline bool GvnDeadCodeElimination::VRegChains::IsTopChange(uint16_t change) const {
+ DCHECK_LT(change, mir_data_.size());
+ const MIRData* data = &mir_data_[change];
+ DCHECK(data->has_def);
+ DCHECK_LT(data->wide_def ? data->vreg_def + 1u : data->vreg_def, num_vregs_);
+ return vreg_data_[data->vreg_def].change == change &&
+ (!data->wide_def || vreg_data_[data->vreg_def + 1u].change == change);
+}
+
+bool GvnDeadCodeElimination::VRegChains::IsSRegUsed(uint16_t first_change, uint16_t last_change,
+ int s_reg) const {
+ DCHECK_LE(first_change, last_change);
+ DCHECK_LE(last_change, mir_data_.size());
+ for (size_t c = first_change; c != last_change; ++c) {
+ SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
+ for (int i = 0; i != ssa_rep->num_uses; ++i) {
+ if (ssa_rep->uses[i] == s_reg) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void GvnDeadCodeElimination::VRegChains::RenameSRegUses(uint16_t first_change, uint16_t last_change,
+ int old_s_reg, int new_s_reg, bool wide) {
+ for (size_t c = first_change; c != last_change; ++c) {
+ SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
+ for (int i = 0; i != ssa_rep->num_uses; ++i) {
+ if (ssa_rep->uses[i] == old_s_reg) {
+ ssa_rep->uses[i] = new_s_reg;
+ if (wide) {
+ ++i;
+ DCHECK_LT(i, ssa_rep->num_uses);
+ ssa_rep->uses[i] = new_s_reg + 1;
+ }
+ }
+ }
+ }
+}
+
+void GvnDeadCodeElimination::VRegChains::RenameVRegUses(uint16_t first_change, uint16_t last_change,
+ int old_s_reg, int old_v_reg,
+ int new_s_reg, int new_v_reg) {
+ for (size_t c = first_change; c != last_change; ++c) {
+ MIR* mir = mir_data_[c].mir;
+ if (IsInstructionBinOp2Addr(mir->dalvikInsn.opcode) &&
+ mir->ssa_rep->uses[0] == old_s_reg && old_v_reg != new_v_reg) {
+ // Rewrite binop_2ADDR with plain binop before doing the register rename.
+ ChangeBinOp2AddrToPlainBinOp(mir);
+ }
+ uint64_t df_attr = MIRGraph::GetDataFlowAttributes(mir);
+ size_t use = 0u;
+#define REPLACE_VREG(REG) \
+ if ((df_attr & DF_U##REG) != 0) { \
+ if (mir->ssa_rep->uses[use] == old_s_reg) { \
+ DCHECK_EQ(mir->dalvikInsn.v##REG, static_cast<uint32_t>(old_v_reg)); \
+ mir->dalvikInsn.v##REG = new_v_reg; \
+ mir->ssa_rep->uses[use] = new_s_reg; \
+ if ((df_attr & DF_##REG##_WIDE) != 0) { \
+ DCHECK_EQ(mir->ssa_rep->uses[use + 1], old_s_reg + 1); \
+ mir->ssa_rep->uses[use + 1] = new_s_reg + 1; \
+ } \
+ } \
+ use += ((df_attr & DF_##REG##_WIDE) != 0) ? 2 : 1; \
+ }
+ REPLACE_VREG(A)
+ REPLACE_VREG(B)
+ REPLACE_VREG(C)
+#undef REPLACE_VREG
+ // We may encounter an out-of-order Phi which we need to ignore, otherwise we should
+ // only be asked to rename registers specified by DF_UA, DF_UB and DF_UC.
+ DCHECK_EQ(use,
+ static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi
+ ? 0u
+ : static_cast<size_t>(mir->ssa_rep->num_uses));
+ }
+}
+
+GvnDeadCodeElimination::GvnDeadCodeElimination(const GlobalValueNumbering* gvn,
+ ScopedArenaAllocator* alloc)
+ : gvn_(gvn),
+ mir_graph_(gvn_->GetMirGraph()),
+ vreg_chains_(mir_graph_->GetNumOfCodeAndTempVRs(), alloc),
+ bb_(nullptr),
+ lvn_(nullptr),
+ no_uses_all_since_(0u),
+ unused_vregs_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)),
+ vregs_to_kill_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)),
+ kill_heads_(alloc->AllocArray<uint16_t>(vreg_chains_.NumVRegs(), kArenaAllocMisc)),
+ changes_to_kill_(alloc->Adapter()),
+ dependent_vregs_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)) {
+ changes_to_kill_.reserve(16u);
+}
+
+void GvnDeadCodeElimination::Apply(BasicBlock* bb) {
+ bb_ = bb;
+ lvn_ = gvn_->GetLvn(bb->id);
+
+ RecordPass();
+ BackwardPass();
+
+ DCHECK_EQ(no_uses_all_since_, 0u);
+ lvn_ = nullptr;
+ bb_ = nullptr;
+}
+
+void GvnDeadCodeElimination::RecordPass() {
+ // Record MIRs with vreg definition data, eliminate single instructions.
+ vreg_chains_.Reset();
+ DCHECK_EQ(no_uses_all_since_, 0u);
+ for (MIR* mir = bb_->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (RecordMIR(mir)) {
+ RecordPassTryToKillOverwrittenMoveOrMoveSrc();
+ RecordPassTryToKillLastMIR();
+ }
+ }
+}
+
+void GvnDeadCodeElimination::BackwardPass() {
+ // Now process MIRs in reverse order, trying to eliminate them.
+ unused_vregs_->ClearAllBits(); // Implicitly depend on all vregs at the end of BB.
+ while (vreg_chains_.NumMIRs() != 0u) {
+ if (BackwardPassTryToKillLastMIR()) {
+ continue;
+ }
+ BackwardPassProcessLastMIR();
+ }
+}
+
+void GvnDeadCodeElimination::KillMIR(MIRData* data) {
+ DCHECK(!data->must_keep);
+ DCHECK(!data->uses_all_vregs);
+ DCHECK(data->has_def);
+ DCHECK(data->mir->ssa_rep->num_defs == 1 || data->mir->ssa_rep->num_defs == 2);
+
+ KillMIR(data->mir);
+ data->has_def = false;
+ data->is_move = false;
+ data->is_move_src = false;
+}
+
+void GvnDeadCodeElimination::KillMIR(MIR* mir) {
+ mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ mir->ssa_rep->num_uses = 0;
+ mir->ssa_rep->num_defs = 0;
+}
+
+void GvnDeadCodeElimination::ChangeBinOp2AddrToPlainBinOp(MIR* mir) {
+ mir->dalvikInsn.vC = mir->dalvikInsn.vB;
+ mir->dalvikInsn.vB = mir->dalvikInsn.vA;
+ mir->dalvikInsn.opcode = static_cast<Instruction::Code>(
+ mir->dalvikInsn.opcode - Instruction::ADD_INT_2ADDR + Instruction::ADD_INT);
+}
+
+MIR* GvnDeadCodeElimination::CreatePhi(int s_reg, bool fp) {
+ int v_reg = mir_graph_->SRegToVReg(s_reg);
+ MIR* phi = mir_graph_->NewMIR();
+ phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
+ phi->dalvikInsn.vA = v_reg;
+ phi->offset = bb_->start_offset;
+ phi->m_unit_index = 0; // Arbitrarily assign all Phi nodes to outermost method.
+
+ phi->ssa_rep = static_cast<struct SSARepresentation *>(mir_graph_->GetArena()->Alloc(
+ sizeof(SSARepresentation), kArenaAllocDFInfo));
+
+ mir_graph_->AllocateSSADefData(phi, 1);
+ phi->ssa_rep->defs[0] = s_reg;
+ phi->ssa_rep->fp_def[0] = fp;
+
+ size_t num_uses = bb_->predecessors.size();
+ mir_graph_->AllocateSSAUseData(phi, num_uses);
+ std::fill_n(phi->ssa_rep->fp_use, num_uses, fp);
+ size_t idx = 0u;
+ for (BasicBlockId pred_id : bb_->predecessors) {
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
+ phi->ssa_rep->uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
+ DCHECK_NE(phi->ssa_rep->uses[idx], INVALID_SREG);
+ idx++;
+ }
+
+ phi->meta.phi_incoming = static_cast<BasicBlockId*>(mir_graph_->GetArena()->Alloc(
+ sizeof(BasicBlockId) * num_uses, kArenaAllocDFInfo));
+ std::copy(bb_->predecessors.begin(), bb_->predecessors.end(), phi->meta.phi_incoming);
+ bb_->PrependMIR(phi);
+ return phi;
+}
+
+MIR* GvnDeadCodeElimination::RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change,
+ MIR* mir_to_kill) {
+ DCHECK(mir_to_kill->ssa_rep->num_defs == 1 || mir_to_kill->ssa_rep->num_defs == 2);
+ bool wide = (mir_to_kill->ssa_rep->num_defs != 1);
+ int new_s_reg = mir_to_kill->ssa_rep->defs[0];
+
+ // Just before we kill mir_to_kill, we need to replace the previous SSA reg assigned to the
+ // same dalvik reg to keep consistency with subsequent instructions. However, if there's no
+ // defining MIR for that dalvik reg, the preserved valus must come from its predecessors
+ // and we need to create a new Phi (a degenerate Phi if there's only a single predecessor).
+ if (def_change == kNPos) {
+ bool fp = mir_to_kill->ssa_rep->fp_def[0];
+ if (wide) {
+ DCHECK_EQ(new_s_reg + 1, mir_to_kill->ssa_rep->defs[1]);
+ DCHECK_EQ(fp, mir_to_kill->ssa_rep->fp_def[1]);
+ DCHECK_EQ(mir_graph_->SRegToVReg(new_s_reg) + 1, mir_graph_->SRegToVReg(new_s_reg + 1));
+ CreatePhi(new_s_reg + 1, fp); // High word Phi.
+ }
+ return CreatePhi(new_s_reg, fp);
+ } else {
+ DCHECK_LT(def_change, last_change);
+ DCHECK_LE(last_change, vreg_chains_.NumMIRs());
+ MIRData* def_data = vreg_chains_.GetMIRData(def_change);
+ DCHECK(def_data->has_def);
+ int old_s_reg = def_data->mir->ssa_rep->defs[0];
+ DCHECK_NE(old_s_reg, new_s_reg);
+ DCHECK_EQ(mir_graph_->SRegToVReg(old_s_reg), mir_graph_->SRegToVReg(new_s_reg));
+ def_data->mir->ssa_rep->defs[0] = new_s_reg;
+ if (wide) {
+ if (static_cast<int>(def_data->mir->dalvikInsn.opcode) == kMirOpPhi) {
+ // Currently the high word Phi is always located after the low word Phi.
+ MIR* phi_high = def_data->mir->next;
+ DCHECK(phi_high != nullptr && static_cast<int>(phi_high->dalvikInsn.opcode) == kMirOpPhi);
+ DCHECK_EQ(phi_high->ssa_rep->defs[0], old_s_reg + 1);
+ phi_high->ssa_rep->defs[0] = new_s_reg + 1;
+ } else {
+ DCHECK_EQ(def_data->mir->ssa_rep->defs[1], old_s_reg + 1);
+ def_data->mir->ssa_rep->defs[1] = new_s_reg + 1;
+ }
+ }
+ vreg_chains_.RenameSRegUses(def_change + 1u, last_change, old_s_reg, new_s_reg, wide);
+ return nullptr;
+ }
+}
+
+
+void GvnDeadCodeElimination::BackwardPassProcessLastMIR() {
+ MIRData* data = vreg_chains_.LastMIRData();
+ if (data->uses_all_vregs) {
+ DCHECK(data->must_keep);
+ unused_vregs_->ClearAllBits();
+ DCHECK_EQ(no_uses_all_since_, vreg_chains_.NumMIRs());
+ --no_uses_all_since_;
+ while (no_uses_all_since_ != 0u &&
+ !vreg_chains_.GetMIRData(no_uses_all_since_ - 1u)->uses_all_vregs) {
+ --no_uses_all_since_;
+ }
+ } else {
+ if (data->has_def) {
+ unused_vregs_->SetBit(data->vreg_def);
+ if (data->wide_def) {
+ unused_vregs_->SetBit(data->vreg_def + 1);
+ }
+ }
+ for (int i = 0, num_uses = data->mir->ssa_rep->num_uses; i != num_uses; ++i) {
+ int v_reg = mir_graph_->SRegToVReg(data->mir->ssa_rep->uses[i]);
+ unused_vregs_->ClearBit(v_reg);
+ }
+ }
+ vreg_chains_.RemoveLastMIRData();
+}
+
+void GvnDeadCodeElimination::RecordPassKillMoveByRenamingSrcDef(uint16_t src_change,
+ uint16_t move_change) {
+ DCHECK_LT(src_change, move_change);
+ MIRData* src_data = vreg_chains_.GetMIRData(src_change);
+ MIRData* move_data = vreg_chains_.GetMIRData(move_change);
+ DCHECK(src_data->is_move_src);
+ DCHECK_EQ(src_data->wide_def, move_data->wide_def);
+ DCHECK(move_data->prev_value.change == kNPos || move_data->prev_value.change <= src_change);
+ DCHECK(!move_data->wide_def || move_data->prev_value_high.change == kNPos ||
+ move_data->prev_value_high.change <= src_change);
+
+ int old_s_reg = src_data->mir->ssa_rep->defs[0];
+ // NOTE: old_s_reg may differ from move_data->mir->ssa_rep->uses[0]; value names must match.
+ int new_s_reg = move_data->mir->ssa_rep->defs[0];
+ DCHECK_NE(old_s_reg, new_s_reg);
+
+ if (IsInstructionBinOp2Addr(src_data->mir->dalvikInsn.opcode) &&
+ src_data->vreg_def != move_data->vreg_def) {
+ // Rewrite binop_2ADDR with plain binop before doing the register rename.
+ ChangeBinOp2AddrToPlainBinOp(src_data->mir);
+ }
+ // Remove src_change from the vreg chain(s).
+ vreg_chains_.RemoveChange(src_change);
+ // Replace the move_change with the src_change, copying all necessary data.
+ src_data->is_move_src = move_data->is_move_src;
+ src_data->low_def_over_high_word = move_data->low_def_over_high_word;
+ src_data->high_def_over_low_word = move_data->high_def_over_low_word;
+ src_data->vreg_def = move_data->vreg_def;
+ src_data->prev_value = move_data->prev_value;
+ src_data->prev_value_high = move_data->prev_value_high;
+ src_data->mir->dalvikInsn.vA = move_data->vreg_def;
+ src_data->mir->ssa_rep->defs[0] = new_s_reg;
+ if (move_data->wide_def) {
+ DCHECK_EQ(src_data->mir->ssa_rep->defs[1], old_s_reg + 1);
+ src_data->mir->ssa_rep->defs[1] = new_s_reg + 1;
+ }
+ vreg_chains_.ReplaceChange(move_change, src_change);
+
+ // Rename uses and kill the move.
+ vreg_chains_.RenameVRegUses(src_change + 1u, vreg_chains_.NumMIRs(),
+ old_s_reg, mir_graph_->SRegToVReg(old_s_reg),
+ new_s_reg, mir_graph_->SRegToVReg(new_s_reg));
+ KillMIR(move_data);
+}
+
+void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_t check_change) {
+ MIRData* data = vreg_chains_.GetMIRData(check_change);
+ DCHECK(data->is_move || data->is_move_src);
+ int32_t dest_s_reg = data->mir->ssa_rep->defs[0];
+
+ if (data->is_move) {
+ // Check if source vreg has changed since the MOVE.
+ int32_t src_s_reg = data->mir->ssa_rep->uses[0];
+ uint32_t src_v_reg = mir_graph_->SRegToVReg(src_s_reg);
+ uint16_t src_change = vreg_chains_.FindFirstChangeAfter(src_v_reg, check_change);
+ bool wide = data->wide_def;
+ if (wide) {
+ uint16_t src_change_high = vreg_chains_.FindFirstChangeAfter(src_v_reg + 1, check_change);
+ if (src_change_high != kNPos && (src_change == kNPos || src_change_high < src_change)) {
+ src_change = src_change_high;
+ }
+ }
+ if (src_change == kNPos ||
+ !vreg_chains_.IsSRegUsed(src_change + 1u, vreg_chains_.NumMIRs(), dest_s_reg)) {
+ // We can simply change all uses of dest to src.
+ size_t rename_end = (src_change != kNPos) ? src_change + 1u : vreg_chains_.NumMIRs();
+ vreg_chains_.RenameVRegUses(check_change + 1u, rename_end,
+ dest_s_reg, mir_graph_->SRegToVReg(dest_s_reg),
+ src_s_reg, mir_graph_->SRegToVReg(src_s_reg));
+
+ // Now, remove the MOVE from the vreg chain(s) and kill it.
+ vreg_chains_.RemoveChange(check_change);
+ KillMIR(data);
+ return;
+ }
+ }
+
+ if (data->is_move_src) {
+ // Try to find a MOVE to a vreg that wasn't changed since check_change.
+ uint16_t value_name =
+ data->wide_def ? lvn_->GetSregValueWide(dest_s_reg) : lvn_->GetSregValue(dest_s_reg);
+ for (size_t c = check_change + 1u, size = vreg_chains_.NumMIRs(); c != size; ++c) {
+ MIRData* d = vreg_chains_.GetMIRData(c);
+ if (d->is_move && d->wide_def == data->wide_def &&
+ (d->prev_value.change == kNPos || d->prev_value.change <= check_change) &&
+ (!d->wide_def ||
+ d->prev_value_high.change == kNPos || d->prev_value_high.change <= check_change)) {
+ // Compare value names to find move to move.
+ int32_t src_s_reg = d->mir->ssa_rep->uses[0];
+ uint16_t src_name =
+ (d->wide_def ? lvn_->GetSregValueWide(src_s_reg) : lvn_->GetSregValue(src_s_reg));
+ if (value_name == src_name) {
+ RecordPassKillMoveByRenamingSrcDef(check_change, c);
+ return;
+ }
+ }
+ }
+ }
+}
+
+void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc() {
+ // Check if we're overwriting a the result of a move or the definition of a source of a move.
+ // For MOVE_WIDE, we may be overwriting partially; if that's the case, check that the other
+ // word wasn't previously overwritten - we would have tried to rename back then.
+ MIRData* data = vreg_chains_.LastMIRData();
+ if (!data->has_def) {
+ return;
+ }
+ // NOTE: Instructions such as new-array implicitly use all vregs (if they throw) but they can
+ // define a move source which can be renamed. Therefore we allow the checked change to be the
+ // change before no_uses_all_since_. This has no effect on moves as they never use all vregs.
+ if (data->prev_value.change != kNPos && data->prev_value.change + 1u >= no_uses_all_since_) {
+ MIRData* check_data = vreg_chains_.GetMIRData(data->prev_value.change);
+ bool try_to_kill = false;
+ if (!check_data->is_move && !check_data->is_move_src) {
+ DCHECK(!try_to_kill);
+ } else if (!check_data->wide_def) {
+ // Narrow move; always fully overwritten by the last MIR.
+ try_to_kill = true;
+ } else if (data->low_def_over_high_word) {
+ // Overwriting only the high word; is the low word still valid?
+ DCHECK_EQ(check_data->vreg_def + 1u, data->vreg_def);
+ if (vreg_chains_.LastChange(check_data->vreg_def) == data->prev_value.change) {
+ try_to_kill = true;
+ }
+ } else if (!data->wide_def) {
+ // Overwriting only the low word, is the high word still valid?
+ if (vreg_chains_.LastChange(data->vreg_def + 1) == data->prev_value.change) {
+ try_to_kill = true;
+ }
+ } else {
+ // Overwriting both words; was the high word still from the same move?
+ if (data->prev_value_high.change == data->prev_value.change) {
+ try_to_kill = true;
+ }
+ }
+ if (try_to_kill) {
+ RecordPassTryToKillOverwrittenMoveOrMoveSrc(data->prev_value.change);
+ }
+ }
+ if (data->wide_def && data->high_def_over_low_word &&
+ data->prev_value_high.change != kNPos &&
+ data->prev_value_high.change + 1u >= no_uses_all_since_) {
+ MIRData* check_data = vreg_chains_.GetMIRData(data->prev_value_high.change);
+ bool try_to_kill = false;
+ if (!check_data->is_move && !check_data->is_move_src) {
+ DCHECK(!try_to_kill);
+ } else if (!check_data->wide_def) {
+ // Narrow move; always fully overwritten by the last MIR.
+ try_to_kill = true;
+ } else if (vreg_chains_.LastChange(check_data->vreg_def + 1) ==
+ data->prev_value_high.change) {
+ // High word is still valid.
+ try_to_kill = true;
+ }
+ if (try_to_kill) {
+ RecordPassTryToKillOverwrittenMoveOrMoveSrc(data->prev_value_high.change);
+ }
+ }
+}
+
+void GvnDeadCodeElimination::RecordPassTryToKillLastMIR() {
+ MIRData* last_data = vreg_chains_.LastMIRData();
+ if (last_data->must_keep) {
+ return;
+ }
+ if (UNLIKELY(!last_data->has_def)) {
+ // Must be an eliminated MOVE. Drop its data and data of all eliminated MIRs before it.
+ vreg_chains_.RemoveTrailingNops();
+ return;
+ }
+
+ // Try to kill a sequence of consecutive definitions of the same vreg. Allow mixing
+ // wide and non-wide defs; consider high word dead if low word has been overwritten.
+ uint16_t current_value = vreg_chains_.CurrentValue(last_data->vreg_def);
+ uint16_t change = vreg_chains_.NumMIRs() - 1u;
+ MIRData* data = last_data;
+ while (data->prev_value.value != current_value) {
+ --change;
+ if (data->prev_value.change == kNPos || data->prev_value.change != change) {
+ return;
+ }
+ data = vreg_chains_.GetMIRData(data->prev_value.change);
+ if (data->must_keep || !data->has_def || data->vreg_def != last_data->vreg_def) {
+ return;
+ }
+ }
+
+ bool wide = last_data->wide_def;
+ if (wide) {
+ // Check that the low word is valid.
+ if (data->low_def_over_high_word) {
+ return;
+ }
+ // Check that the high word is valid.
+ MIRData* high_data = data;
+ if (!high_data->wide_def) {
+ uint16_t high_change = vreg_chains_.FindFirstChangeAfter(data->vreg_def + 1, change);
+ DCHECK_NE(high_change, kNPos);
+ high_data = vreg_chains_.GetMIRData(high_change);
+ DCHECK_EQ(high_data->vreg_def, data->vreg_def);
+ }
+ if (high_data->prev_value_high.value != current_value || high_data->high_def_over_low_word) {
+ return;
+ }
+ }
+
+ MIR* phi = RenameSRegDefOrCreatePhi(data->prev_value.change, change, last_data->mir);
+ for (size_t i = 0, count = vreg_chains_.NumMIRs() - change; i != count; ++i) {
+ KillMIR(vreg_chains_.LastMIRData()->mir);
+ vreg_chains_.RemoveLastMIRData();
+ }
+ if (phi != nullptr) {
+ // Though the Phi has been added to the beginning, we can put the MIRData at the end.
+ vreg_chains_.AddMIRWithDef(phi, phi->dalvikInsn.vA, wide, current_value);
+ // Reset the previous value to avoid eventually eliminating the Phi itself (unless unused).
+ last_data = vreg_chains_.LastMIRData();
+ last_data->prev_value.value = kNoValue;
+ last_data->prev_value_high.value = kNoValue;
+ }
+}
+
+uint16_t GvnDeadCodeElimination::FindChangesToKill(uint16_t first_change, uint16_t last_change) {
+ // Process dependencies for changes in range [first_change, last_change) and record all
+ // changes that we need to kill. Return kNPos if there's a dependent change that must be
+ // kept unconditionally; otherwise the end of the range processed before encountering
+ // a change that defines a dalvik reg that we need to keep (last_change on full success).
+ changes_to_kill_.clear();
+ dependent_vregs_->ClearAllBits();
+ for (size_t change = first_change; change != last_change; ++change) {
+ MIRData* data = vreg_chains_.GetMIRData(change);
+ DCHECK(!data->uses_all_vregs);
+ bool must_not_depend = data->must_keep;
+ bool depends = false;
+ // Check if the MIR defines a vreg we're trying to eliminate.
+ if (data->has_def && vregs_to_kill_->IsBitSet(data->vreg_def)) {
+ if (change < kill_heads_[data->vreg_def]) {
+ must_not_depend = true;
+ } else {
+ depends = true;
+ }
+ }
+ if (data->has_def && data->wide_def && vregs_to_kill_->IsBitSet(data->vreg_def + 1)) {
+ if (change < kill_heads_[data->vreg_def + 1]) {
+ must_not_depend = true;
+ } else {
+ depends = true;
+ }
+ }
+ if (!depends) {
+ // Check for dependency through SSA reg uses.
+ SSARepresentation* ssa_rep = data->mir->ssa_rep;
+ for (int i = 0; i != ssa_rep->num_uses; ++i) {
+ if (dependent_vregs_->IsBitSet(mir_graph_->SRegToVReg(ssa_rep->uses[i]))) {
+ depends = true;
+ break;
+ }
+ }
+ }
+ // Now check if we can eliminate the insn if we need to.
+ if (depends && must_not_depend) {
+ return kNPos;
+ }
+ if (depends && data->has_def &&
+ vreg_chains_.IsTopChange(change) && !vregs_to_kill_->IsBitSet(data->vreg_def) &&
+ !unused_vregs_->IsBitSet(data->vreg_def) &&
+ (!data->wide_def || !unused_vregs_->IsBitSet(data->vreg_def + 1))) {
+ // This is a top change but neither unnecessary nor one of the top kill changes.
+ return change;
+ }
+ // Finally, update the data.
+ if (depends) {
+ changes_to_kill_.push_back(change);
+ if (data->has_def) {
+ dependent_vregs_->SetBit(data->vreg_def);
+ if (data->wide_def) {
+ dependent_vregs_->SetBit(data->vreg_def + 1);
+ }
+ }
+ } else {
+ if (data->has_def) {
+ dependent_vregs_->ClearBit(data->vreg_def);
+ if (data->wide_def) {
+ dependent_vregs_->ClearBit(data->vreg_def + 1);
+ }
+ }
+ }
+ }
+ return last_change;
+}
+
+void GvnDeadCodeElimination::BackwardPassTryToKillRevertVRegs() {
+}
+
+bool GvnDeadCodeElimination::BackwardPassTryToKillLastMIR() {
+ MIRData* last_data = vreg_chains_.LastMIRData();
+ if (last_data->must_keep) {
+ return false;
+ }
+ DCHECK(!last_data->uses_all_vregs);
+ if (!last_data->has_def) {
+ // Previously eliminated.
+ DCHECK_EQ(static_cast<int>(last_data->mir->dalvikInsn.opcode), static_cast<int>(kMirOpNop));
+ vreg_chains_.RemoveTrailingNops();
+ return true;
+ }
+ if (unused_vregs_->IsBitSet(last_data->vreg_def) ||
+ (last_data->wide_def && unused_vregs_->IsBitSet(last_data->vreg_def + 1))) {
+ if (last_data->wide_def) {
+ // For wide defs, one of the vregs may still be considered needed, fix that.
+ unused_vregs_->SetBit(last_data->vreg_def);
+ unused_vregs_->SetBit(last_data->vreg_def + 1);
+ }
+ KillMIR(last_data->mir);
+ vreg_chains_.RemoveLastMIRData();
+ return true;
+ }
+
+ vregs_to_kill_->ClearAllBits();
+ size_t num_mirs = vreg_chains_.NumMIRs();
+ DCHECK_NE(num_mirs, 0u);
+ uint16_t kill_change = num_mirs - 1u;
+ uint16_t start = num_mirs;
+ size_t num_killed_top_changes = 0u;
+ while (num_killed_top_changes != kMaxNumTopChangesToKill &&
+ kill_change != kNPos && kill_change != num_mirs) {
+ ++num_killed_top_changes;
+
+ DCHECK(vreg_chains_.IsTopChange(kill_change));
+ MIRData* data = vreg_chains_.GetMIRData(kill_change);
+ int count = data->wide_def ? 2 : 1;
+ for (int v_reg = data->vreg_def, end = data->vreg_def + count; v_reg != end; ++v_reg) {
+ uint16_t kill_head = vreg_chains_.FindKillHead(v_reg, no_uses_all_since_);
+ if (kill_head == kNPos) {
+ return false;
+ }
+ kill_heads_[v_reg] = kill_head;
+ vregs_to_kill_->SetBit(v_reg);
+ start = std::min(start, kill_head);
+ }
+ DCHECK_LT(start, vreg_chains_.NumMIRs());
+
+ kill_change = FindChangesToKill(start, num_mirs);
+ }
+
+ if (kill_change != num_mirs) {
+ return false;
+ }
+
+ // Kill all MIRs marked as dependent.
+ for (uint32_t v_reg : vregs_to_kill_->Indexes()) {
+ // Rename s_regs or create Phi only once for each MIR (only for low word).
+ MIRData* data = vreg_chains_.GetMIRData(vreg_chains_.LastChange(v_reg));
+ DCHECK(data->has_def);
+ if (data->vreg_def == v_reg) {
+ MIRData* kill_head_data = vreg_chains_.GetMIRData(kill_heads_[v_reg]);
+ RenameSRegDefOrCreatePhi(kill_head_data->PrevChange(v_reg), num_mirs, data->mir);
+ } else {
+ DCHECK_EQ(data->vreg_def + 1u, v_reg);
+ DCHECK_EQ(vreg_chains_.GetMIRData(kill_heads_[v_reg - 1u])->PrevChange(v_reg - 1u),
+ vreg_chains_.GetMIRData(kill_heads_[v_reg])->PrevChange(v_reg));
+ }
+ }
+ unused_vregs_->Union(vregs_to_kill_);
+ for (auto it = changes_to_kill_.rbegin(), end = changes_to_kill_.rend(); it != end; ++it) {
+ MIRData* data = vreg_chains_.GetMIRData(*it);
+ DCHECK(!data->must_keep);
+ DCHECK(data->has_def);
+ vreg_chains_.RemoveChange(*it);
+ KillMIR(data);
+ }
+
+ vreg_chains_.RemoveTrailingNops();
+ return true;
+}
+
+bool GvnDeadCodeElimination::RecordMIR(MIR* mir) {
+ bool must_keep = false;
+ bool uses_all_vregs = false;
+ bool is_move = false;
+ uint16_t opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case kMirOpPhi: {
+ // We can't recognize wide variables in Phi from num_defs == 2 as we've got two Phis instead.
+ DCHECK_EQ(mir->ssa_rep->num_defs, 1);
+ int s_reg = mir->ssa_rep->defs[0];
+ bool wide = false;
+ uint16_t new_value = lvn_->GetSregValue(s_reg);
+ if (new_value == kNoValue) {
+ wide = true;
+ new_value = lvn_->GetSregValueWide(s_reg);
+ if (new_value == kNoValue) {
+ return false; // Ignore the high word Phi.
+ }
+ }
+
+ int v_reg = mir_graph_->SRegToVReg(s_reg);
+ DCHECK_EQ(vreg_chains_.CurrentValue(v_reg), kNoValue); // No previous def for v_reg.
+ if (wide) {
+ DCHECK_EQ(vreg_chains_.CurrentValue(v_reg + 1), kNoValue);
+ }
+ vreg_chains_.AddMIRWithDef(mir, v_reg, wide, new_value);
+ return true; // Avoid the common processing.
+ }
+
+ case kMirOpNop:
+ case Instruction::NOP:
+ // Don't record NOPs.
+ return false;
+
+ case kMirOpCheck:
+ must_keep = true;
+ uses_all_vregs = true;
+ break;
+
+ case Instruction::RETURN_VOID:
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT:
+ case Instruction::RETURN_WIDE:
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32:
+ case Instruction::PACKED_SWITCH:
+ case Instruction::SPARSE_SWITCH:
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE:
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ:
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ:
+ case kMirOpFusedCmplFloat:
+ case kMirOpFusedCmpgFloat:
+ case kMirOpFusedCmplDouble:
+ case kMirOpFusedCmpgDouble:
+ case kMirOpFusedCmpLong:
+ must_keep = true;
+ uses_all_vregs = true; // Keep the implicit dependencies on all vregs.
+ break;
+
+ case Instruction::CONST_CLASS:
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ // NOTE: While we're currently treating CONST_CLASS, CONST_STRING and CONST_STRING_JUMBO
+ // as throwing but we could conceivably try and eliminate those exceptions if we're
+ // retrieving the class/string repeatedly.
+ must_keep = true;
+ uses_all_vregs = true;
+ break;
+
+ case Instruction::MONITOR_ENTER:
+ case Instruction::MONITOR_EXIT:
+ // We can actually try and optimize across the acquire operation of MONITOR_ENTER,
+ // the value names provided by GVN reflect the possible changes to memory visibility.
+ // NOTE: In ART, MONITOR_ENTER and MONITOR_EXIT can throw only NPE.
+ must_keep = true;
+ uses_all_vregs = (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0;
+ break;
+
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::CHECK_CAST:
+ case Instruction::THROW:
+ case Instruction::FILLED_NEW_ARRAY:
+ case Instruction::FILLED_NEW_ARRAY_RANGE:
+ case Instruction::FILL_ARRAY_DATA:
+ must_keep = true;
+ uses_all_vregs = true;
+ break;
+
+ case Instruction::NEW_INSTANCE:
+ case Instruction::NEW_ARRAY:
+ must_keep = true;
+ uses_all_vregs = true;
+ break;
+
+ case kMirOpNullCheck:
+ DCHECK_EQ(mir->ssa_rep->num_uses, 1);
+ if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
+ mir->ssa_rep->num_uses = 0;
+ mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ return false;
+ }
+ must_keep = true;
+ uses_all_vregs = true;
+ break;
+
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_OBJECT:
+ case Instruction::MOVE_RESULT_WIDE:
+ break;
+
+ case Instruction::INSTANCE_OF:
+ break;
+
+ case Instruction::MOVE_EXCEPTION:
+ must_keep = true;
+ break;
+
+ case kMirOpCopy:
+ case Instruction::MOVE:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_FROM16:
+ case Instruction::MOVE_WIDE_16:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_OBJECT_FROM16:
+ case Instruction::MOVE_OBJECT_16: {
+ is_move = true;
+ // If the MIR defining src vreg is known, allow renaming all uses of src vreg to dest vreg
+ // while updating the defining MIR to directly define dest vreg. However, changing Phi's
+ // def this way doesn't work without changing MIRs in other BBs.
+ int src_v_reg = mir_graph_->SRegToVReg(mir->ssa_rep->uses[0]);
+ int src_change = vreg_chains_.LastChange(src_v_reg);
+ if (src_change != kNPos) {
+ MIRData* src_data = vreg_chains_.GetMIRData(src_change);
+ if (static_cast<int>(src_data->mir->dalvikInsn.opcode) != kMirOpPhi) {
+ src_data->is_move_src = true;
+ }
+ }
+ break;
+ }
+
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ case Instruction::CONST:
+ case Instruction::CONST_HIGH16:
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ case Instruction::CONST_WIDE:
+ case Instruction::CONST_WIDE_HIGH16:
+ case Instruction::ARRAY_LENGTH:
+ case Instruction::CMPL_FLOAT:
+ case Instruction::CMPG_FLOAT:
+ case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPG_DOUBLE:
+ case Instruction::CMP_LONG:
+ case Instruction::NEG_INT:
+ case Instruction::NOT_INT:
+ case Instruction::NEG_LONG:
+ case Instruction::NOT_LONG:
+ case Instruction::NEG_FLOAT:
+ case Instruction::NEG_DOUBLE:
+ case Instruction::INT_TO_LONG:
+ case Instruction::INT_TO_FLOAT:
+ case Instruction::INT_TO_DOUBLE:
+ case Instruction::LONG_TO_INT:
+ case Instruction::LONG_TO_FLOAT:
+ case Instruction::LONG_TO_DOUBLE:
+ case Instruction::FLOAT_TO_INT:
+ case Instruction::FLOAT_TO_LONG:
+ case Instruction::FLOAT_TO_DOUBLE:
+ case Instruction::DOUBLE_TO_INT:
+ case Instruction::DOUBLE_TO_LONG:
+ case Instruction::DOUBLE_TO_FLOAT:
+ case Instruction::INT_TO_BYTE:
+ case Instruction::INT_TO_CHAR:
+ case Instruction::INT_TO_SHORT:
+ case Instruction::ADD_INT:
+ case Instruction::SUB_INT:
+ case Instruction::MUL_INT:
+ case Instruction::AND_INT:
+ case Instruction::OR_INT:
+ case Instruction::XOR_INT:
+ case Instruction::SHL_INT:
+ case Instruction::SHR_INT:
+ case Instruction::USHR_INT:
+ case Instruction::ADD_LONG:
+ case Instruction::SUB_LONG:
+ case Instruction::MUL_LONG:
+ case Instruction::AND_LONG:
+ case Instruction::OR_LONG:
+ case Instruction::XOR_LONG:
+ case Instruction::SHL_LONG:
+ case Instruction::SHR_LONG:
+ case Instruction::USHR_LONG:
+ case Instruction::ADD_FLOAT:
+ case Instruction::SUB_FLOAT:
+ case Instruction::MUL_FLOAT:
+ case Instruction::DIV_FLOAT:
+ case Instruction::REM_FLOAT:
+ case Instruction::ADD_DOUBLE:
+ case Instruction::SUB_DOUBLE:
+ case Instruction::MUL_DOUBLE:
+ case Instruction::DIV_DOUBLE:
+ case Instruction::REM_DOUBLE:
+ case Instruction::ADD_INT_2ADDR:
+ case Instruction::SUB_INT_2ADDR:
+ case Instruction::MUL_INT_2ADDR:
+ case Instruction::AND_INT_2ADDR:
+ case Instruction::OR_INT_2ADDR:
+ case Instruction::XOR_INT_2ADDR:
+ case Instruction::SHL_INT_2ADDR:
+ case Instruction::SHR_INT_2ADDR:
+ case Instruction::USHR_INT_2ADDR:
+ case Instruction::ADD_LONG_2ADDR:
+ case Instruction::SUB_LONG_2ADDR:
+ case Instruction::MUL_LONG_2ADDR:
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::OR_LONG_2ADDR:
+ case Instruction::XOR_LONG_2ADDR:
+ case Instruction::SHL_LONG_2ADDR:
+ case Instruction::SHR_LONG_2ADDR:
+ case Instruction::USHR_LONG_2ADDR:
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE_2ADDR:
+ case Instruction::ADD_INT_LIT16:
+ case Instruction::RSUB_INT:
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::AND_INT_LIT16:
+ case Instruction::OR_INT_LIT16:
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::ADD_INT_LIT8:
+ case Instruction::RSUB_INT_LIT8:
+ case Instruction::MUL_INT_LIT8:
+ case Instruction::AND_INT_LIT8:
+ case Instruction::OR_INT_LIT8:
+ case Instruction::XOR_INT_LIT8:
+ case Instruction::SHL_INT_LIT8:
+ case Instruction::SHR_INT_LIT8:
+ case Instruction::USHR_INT_LIT8:
+ break;
+
+ case Instruction::DIV_INT:
+ case Instruction::REM_INT:
+ case Instruction::DIV_LONG:
+ case Instruction::REM_LONG:
+ case Instruction::DIV_INT_2ADDR:
+ case Instruction::REM_INT_2ADDR:
+ case Instruction::DIV_LONG_2ADDR:
+ case Instruction::REM_LONG_2ADDR:
+ if ((mir->optimization_flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+ must_keep = true;
+ uses_all_vregs = true;
+ }
+ break;
+
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT_LIT16:
+ case Instruction::DIV_INT_LIT8:
+ case Instruction::REM_INT_LIT8:
+ if (mir->dalvikInsn.vC == 0) { // Explicit division by 0?
+ must_keep = true;
+ uses_all_vregs = true;
+ }
+ break;
+
+ case Instruction::AGET_OBJECT:
+ case Instruction::AGET:
+ case Instruction::AGET_WIDE:
+ case Instruction::AGET_BOOLEAN:
+ case Instruction::AGET_BYTE:
+ case Instruction::AGET_CHAR:
+ case Instruction::AGET_SHORT:
+ if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
+ (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) == 0) {
+ must_keep = true;
+ uses_all_vregs = true;
+ }
+ break;
+
+ case Instruction::APUT_OBJECT:
+ case Instruction::APUT:
+ case Instruction::APUT_WIDE:
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_BOOLEAN:
+ case Instruction::APUT_SHORT:
+ case Instruction::APUT_CHAR:
+ must_keep = true;
+ if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
+ (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) == 0) {
+ uses_all_vregs = true;
+ }
+ break;
+
+ case Instruction::IGET_OBJECT:
+ case Instruction::IGET:
+ case Instruction::IGET_WIDE:
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ case Instruction::IGET_CHAR:
+ case Instruction::IGET_SHORT: {
+ const MirIFieldLoweringInfo& info = mir_graph_->GetIFieldLoweringInfo(mir);
+ if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
+ !info.IsResolved() || !info.FastGet()) {
+ must_keep = true;
+ uses_all_vregs = true;
+ } else if (info.IsVolatile()) {
+ must_keep = true;
+ }
+ break;
+ }
+
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT:
+ case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT: {
+ must_keep = true;
+ const MirIFieldLoweringInfo& info = mir_graph_->GetIFieldLoweringInfo(mir);
+ if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
+ !info.IsResolved() || !info.FastPut()) {
+ uses_all_vregs = true;
+ }
+ break;
+ }
+
+ case Instruction::SGET_OBJECT:
+ case Instruction::SGET:
+ case Instruction::SGET_WIDE:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT: {
+ const MirSFieldLoweringInfo& info = mir_graph_->GetSFieldLoweringInfo(mir);
+ if ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0 ||
+ !info.IsResolved() || !info.FastGet()) {
+ must_keep = true;
+ uses_all_vregs = true;
+ } else if (info.IsVolatile()) {
+ must_keep = true;
+ }
+ break;
+ }
+
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT:
+ case Instruction::SPUT_WIDE:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT: {
+ must_keep = true;
+ const MirSFieldLoweringInfo& info = mir_graph_->GetSFieldLoweringInfo(mir);
+ if ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0 ||
+ !info.IsResolved() || !info.FastPut()) {
+ uses_all_vregs = true;
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ UNREACHABLE();
+ break;
+ }
+
+ if (mir->ssa_rep->num_defs != 0) {
+ DCHECK(mir->ssa_rep->num_defs == 1 || mir->ssa_rep->num_defs == 2);
+ bool wide = (mir->ssa_rep->num_defs == 2);
+ int s_reg = mir->ssa_rep->defs[0];
+ int v_reg = mir_graph_->SRegToVReg(s_reg);
+ uint16_t new_value = wide ? lvn_->GetSregValueWide(s_reg) : lvn_->GetSregValue(s_reg);
+ DCHECK_NE(new_value, kNoValue);
+
+ vreg_chains_.UpdateInitialVRegValue(v_reg, wide, lvn_);
+ vreg_chains_.AddMIRWithDef(mir, v_reg, wide, new_value);
+ if (is_move) {
+ // Allow renaming all uses of dest vreg to src vreg.
+ vreg_chains_.LastMIRData()->is_move = true;
+ }
+ } else {
+ vreg_chains_.AddMIRWithoutDef(mir);
+ DCHECK(!is_move) << opcode;
+ }
+
+ if (must_keep) {
+ MIRData* last_data = vreg_chains_.LastMIRData();
+ last_data->must_keep = true;
+ if (uses_all_vregs) {
+ last_data->uses_all_vregs = true;
+ no_uses_all_since_ = vreg_chains_.NumMIRs();
+ }
+ } else {
+ DCHECK_NE(mir->ssa_rep->num_defs, 0) << opcode;
+ DCHECK(!uses_all_vregs) << opcode;
+ }
+ return true;
+}
+
+} // namespace art
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
new file mode 100644
index 0000000..ea28039
--- /dev/null
+++ b/compiler/dex/gvn_dead_code_elimination.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
+#define ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
+
+#include "global_value_numbering.h"
+#include "utils/arena_object.h"
+#include "utils/scoped_arena_containers.h"
+
+namespace art {
+
+class ArenaBitVector;
+class BasicBlock;
+class LocalValueNumbering;
+class MIR;
+class MIRGraph;
+
+/**
+ * @class DeadCodeElimination
+ * @details Eliminate dead code based on the results of global value numbering.
+ * Also get rid of MOVE insns when we can use the source instead of destination
+ * without affecting the vreg values at safepoints; this is useful in methods
+ * with a large number of vregs that frequently move values to and from low vregs
+ * to accommodate insns that can work only with the low 16 or 256 vregs.
+ */
+class GvnDeadCodeElimination : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+ GvnDeadCodeElimination(const GlobalValueNumbering* gvn, ScopedArenaAllocator* alloc);
+
+ // Apply the DCE to a basic block.
+ void Apply(BasicBlock* bb);
+
+ private:
+ static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
+ static constexpr uint16_t kNPos = 0xffffu;
+ static constexpr size_t kMaxNumTopChangesToKill = 2;
+
+ struct VRegValue {
+ VRegValue() : value(kNoValue), change(kNPos) { }
+
+ // Value name as reported by GVN, kNoValue if not available.
+ uint16_t value;
+ // Index of the change in mir_data_ that defined the value, kNPos if initial value for the BB.
+ uint16_t change;
+ };
+
+ struct MIRData {
+ explicit MIRData(MIR* m)
+ : mir(m), uses_all_vregs(false), must_keep(false), is_move(false), is_move_src(false),
+ has_def(false), wide_def(false),
+ low_def_over_high_word(false), high_def_over_low_word(false), vreg_def(0u),
+ prev_value(), prev_value_high() {
+ }
+
+ uint16_t PrevChange(int v_reg) const;
+ void SetPrevChange(int v_reg, uint16_t change);
+ void RemovePrevChange(int v_reg, MIRData* prev_data);
+
+ MIR* mir;
+ bool uses_all_vregs : 1; // If mir uses all vregs, uses in mir->ssa_rep are irrelevant.
+ bool must_keep : 1;
+ bool is_move : 1;
+ bool is_move_src : 1;
+ bool has_def : 1;
+ bool wide_def : 1;
+ bool low_def_over_high_word : 1;
+ bool high_def_over_low_word : 1;
+ uint16_t vreg_def;
+ VRegValue prev_value;
+ VRegValue prev_value_high; // For wide defs.
+ };
+
+ class VRegChains {
+ public:
+ VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc);
+
+ void Reset();
+
+ void AddMIRWithDef(MIR* mir, int v_reg, bool wide, uint16_t new_value);
+ void AddMIRWithoutDef(MIR* mir);
+ void RemoveLastMIRData();
+ void RemoveTrailingNops();
+
+ size_t NumMIRs() const;
+ MIRData* GetMIRData(size_t pos);
+ MIRData* LastMIRData();
+
+ uint32_t NumVRegs() const;
+ void InsertInitialValueHigh(int v_reg, uint16_t value);
+ void UpdateInitialVRegValue(int v_reg, bool wide, const LocalValueNumbering* lvn);
+ uint16_t LastChange(int v_reg);
+ uint16_t CurrentValue(int v_reg);
+
+ uint16_t FindKillHead(int v_reg, uint16_t cutoff);
+ uint16_t FindFirstChangeAfter(int v_reg, uint16_t change) const;
+ void ReplaceChange(uint16_t old_change, uint16_t new_change);
+ void RemoveChange(uint16_t change);
+ bool IsTopChange(uint16_t change) const;
+ bool IsSRegUsed(uint16_t first_change, uint16_t last_change, int s_reg) const;
+ void RenameSRegUses(uint16_t first_change, uint16_t last_change,
+ int old_s_reg, int new_s_reg, bool wide);
+ void RenameVRegUses(uint16_t first_change, uint16_t last_change,
+ int old_s_reg, int old_v_reg, int new_s_reg, int new_v_reg);
+
+ private:
+ const uint32_t num_vregs_;
+ VRegValue* const vreg_data_;
+ ScopedArenaVector<MIRData> mir_data_;
+ };
+
+ void RecordPass();
+ void BackwardPass();
+
+ void KillMIR(MIRData* data);
+ static void KillMIR(MIR* mir);
+ static void ChangeBinOp2AddrToPlainBinOp(MIR* mir);
+ MIR* CreatePhi(int s_reg, bool fp);
+ MIR* RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change, MIR* mir_to_kill);
+
+ // Update state variables going backwards through a MIR.
+ void BackwardPassProcessLastMIR();
+
+ uint16_t FindChangesToKill(uint16_t first_change, uint16_t last_change);
+ void BackwardPassTryToKillRevertVRegs();
+ bool BackwardPassTryToKillLastMIR();
+
+ void RecordPassKillMoveByRenamingSrcDef(uint16_t src_change, uint16_t move_change);
+ void RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_t check_change);
+ void RecordPassTryToKillOverwrittenMoveOrMoveSrc();
+ void RecordPassTryToKillLastMIR();
+
+ bool RecordMIR(MIR* mir);
+
+ const GlobalValueNumbering* const gvn_;
+ MIRGraph* const mir_graph_;
+
+ VRegChains vreg_chains_;
+ BasicBlock* bb_;
+ const LocalValueNumbering* lvn_;
+ size_t no_uses_all_since_; // The change index after the last change with uses_all_vregs set.
+
+ // Data used when processing MIRs in reverse order.
+ ArenaBitVector* unused_vregs_; // vregs that are not needed later.
+ ArenaBitVector* vregs_to_kill_; // vregs that revert to a previous value.
+ uint16_t* kill_heads_; // For each vreg in vregs_to_kill_, the first change to kill.
+ ScopedArenaVector<uint16_t> changes_to_kill_;
+ ArenaBitVector* dependent_vregs_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
new file mode 100644
index 0000000..954e9f1
--- /dev/null
+++ b/compiler/dex/gvn_dead_code_elimination_test.cc
@@ -0,0 +1,1800 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dataflow_iterator-inl.h"
+#include "dex/mir_field_info.h"
+#include "global_value_numbering.h"
+#include "gvn_dead_code_elimination.h"
+#include "local_value_numbering.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+class GvnDeadCodeEliminationTest : public testing::Test {
+ protected:
+ static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
+
+ struct IFieldDef {
+ uint16_t field_idx;
+ uintptr_t declaring_dex_file;
+ uint16_t declaring_field_idx;
+ bool is_volatile;
+ DexMemAccessType type;
+ };
+
+ struct SFieldDef {
+ uint16_t field_idx;
+ uintptr_t declaring_dex_file;
+ uint16_t declaring_field_idx;
+ bool is_volatile;
+ DexMemAccessType type;
+ };
+
+ struct BBDef {
+ static constexpr size_t kMaxSuccessors = 4;
+ static constexpr size_t kMaxPredecessors = 4;
+
+ BBType type;
+ size_t num_successors;
+ BasicBlockId successors[kMaxPredecessors];
+ size_t num_predecessors;
+ BasicBlockId predecessors[kMaxPredecessors];
+ };
+
+ struct MIRDef {
+ static constexpr size_t kMaxSsaDefs = 2;
+ static constexpr size_t kMaxSsaUses = 4;
+
+ BasicBlockId bbid;
+ Instruction::Code opcode;
+ int64_t value;
+ uint32_t field_info;
+ size_t num_uses;
+ int32_t uses[kMaxSsaUses];
+ size_t num_defs;
+ int32_t defs[kMaxSsaDefs];
+ };
+
+#define DEF_SUCC0() \
+ 0u, { }
+#define DEF_SUCC1(s1) \
+ 1u, { s1 }
+#define DEF_SUCC2(s1, s2) \
+ 2u, { s1, s2 }
+#define DEF_SUCC3(s1, s2, s3) \
+ 3u, { s1, s2, s3 }
+#define DEF_SUCC4(s1, s2, s3, s4) \
+ 4u, { s1, s2, s3, s4 }
+#define DEF_PRED0() \
+ 0u, { }
+#define DEF_PRED1(p1) \
+ 1u, { p1 }
+#define DEF_PRED2(p1, p2) \
+ 2u, { p1, p2 }
+#define DEF_PRED3(p1, p2, p3) \
+ 3u, { p1, p2, p3 }
+#define DEF_PRED4(p1, p2, p3, p4) \
+ 4u, { p1, p2, p3, p4 }
+#define DEF_BB(type, succ, pred) \
+ { type, succ, pred }
+
+#define DEF_CONST(bb, opcode, reg, value) \
+ { bb, opcode, value, 0u, 0, { }, 1, { reg } }
+#define DEF_CONST_WIDE(bb, opcode, reg, value) \
+ { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(bb, opcode, reg, index) \
+ { bb, opcode, index, 0u, 0, { }, 1, { reg } }
+#define DEF_IGET(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
+#define DEF_INVOKE1(bb, opcode, reg) \
+ { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_UNIQUE_REF(bb, opcode, reg) \
+ { bb, opcode, 0u, 0u, 0, { }, 1, { reg } } // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
+#define DEF_IFZ(bb, opcode, reg) \
+ { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_MOVE(bb, opcode, reg, src) \
+ { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
+#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
+ { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
+#define DEF_PHI2(bb, reg, src1, src2) \
+ { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
+#define DEF_UNOP(bb, opcode, result, src1) \
+ { bb, opcode, 0u, 0u, 1, { src1 }, 1, { result } }
+#define DEF_BINOP(bb, opcode, result, src1, src2) \
+ { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
+
+ void DoPrepareIFields(const IFieldDef* defs, size_t count) {
+ cu_.mir_graph->ifield_lowering_infos_.clear();
+ cu_.mir_graph->ifield_lowering_infos_.reserve(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const IFieldDef* def = &defs[i];
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ =
+ MirIFieldLoweringInfo::kFlagFastGet | MirIFieldLoweringInfo::kFlagFastPut |
+ (field_info.flags_ & ~(def->is_volatile ? 0u : MirIFieldLoweringInfo::kFlagIsVolatile));
+ }
+ cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
+ }
+ }
+
+ template <size_t count>
+ void PrepareIFields(const IFieldDef (&defs)[count]) {
+ DoPrepareIFields(defs, count);
+ }
+
+ void DoPrepareSFields(const SFieldDef* defs, size_t count) {
+ cu_.mir_graph->sfield_lowering_infos_.clear();
+ cu_.mir_graph->sfield_lowering_infos_.reserve(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const SFieldDef* def = &defs[i];
+ MirSFieldLoweringInfo field_info(def->field_idx, def->type);
+ // Mark even unresolved fields as initialized.
+ field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
+ // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by GVN.
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ =
+ MirSFieldLoweringInfo::kFlagFastGet | MirSFieldLoweringInfo::kFlagFastPut |
+ (field_info.flags_ & ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile));
+ }
+ cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
+ }
+ }
+
+ template <size_t count>
+ void PrepareSFields(const SFieldDef (&defs)[count]) {
+ DoPrepareSFields(defs, count);
+ }
+
+ void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
+ cu_.mir_graph->block_id_map_.clear();
+ cu_.mir_graph->block_list_.clear();
+ ASSERT_LT(3u, count); // null, entry, exit and at least one bytecode block.
+ ASSERT_EQ(kNullBlock, defs[0].type);
+ ASSERT_EQ(kEntryBlock, defs[1].type);
+ ASSERT_EQ(kExitBlock, defs[2].type);
+ for (size_t i = 0u; i != count; ++i) {
+ const BBDef* def = &defs[i];
+ BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
+ if (def->num_successors <= 2) {
+ bb->successor_block_list_type = kNotUsed;
+ bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
+ bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
+ } else {
+ bb->successor_block_list_type = kPackedSwitch;
+ bb->fall_through = 0u;
+ bb->taken = 0u;
+ bb->successor_blocks.reserve(def->num_successors);
+ for (size_t j = 0u; j != def->num_successors; ++j) {
+ SuccessorBlockInfo* successor_block_info =
+ static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
+ kArenaAllocSuccessor));
+ successor_block_info->block = j;
+ successor_block_info->key = 0u; // Not used by class init check elimination.
+ bb->successor_blocks.push_back(successor_block_info);
+ }
+ }
+ bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
+ if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
+ bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
+ cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+ bb->data_flow_info->live_in_v = live_in_v_;
+ bb->data_flow_info->vreg_to_ssa_map_exit = nullptr;
+ }
+ }
+ ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+ cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
+ ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
+ cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
+ ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+ }
+
+ template <size_t count>
+ void PrepareBasicBlocks(const BBDef (&defs)[count]) {
+ DoPrepareBasicBlocks(defs, count);
+ }
+
+ int SRegToVReg(int32_t s_reg, bool wide) {
+ int v_reg = cu_.mir_graph->SRegToVReg(s_reg);
+ CHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+ if (wide) {
+ CHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
+ }
+ return v_reg;
+ }
+
+ int SRegToVReg(int32_t* uses, size_t* use, bool wide) {
+ int v_reg = SRegToVReg(uses[*use], wide);
+ if (wide) {
+ CHECK_EQ(uses[*use] + 1, uses[*use + 1]);
+ *use += 2u;
+ } else {
+ *use += 1u;
+ }
+ return v_reg;
+ }
+
+ void DoPrepareMIRs(const MIRDef* defs, size_t count) {
+ mir_count_ = count;
+ mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
+ ssa_reps_.resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const MIRDef* def = &defs[i];
+ MIR* mir = &mirs_[i];
+ ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
+ BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
+ bb->AppendMIR(mir);
+ mir->dalvikInsn.opcode = def->opcode;
+ mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
+ mir->dalvikInsn.vB_wide = def->value;
+ if (IsInstructionIGetOrIPut(def->opcode)) {
+ ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
+ mir->meta.ifield_lowering_info = def->field_info;
+ ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
+ IGetOrIPutMemAccessType(def->opcode));
+ } else if (IsInstructionSGetOrSPut(def->opcode)) {
+ ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
+ mir->meta.sfield_lowering_info = def->field_info;
+ ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
+ SGetOrSPutMemAccessType(def->opcode));
+ } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
+ mir->meta.phi_incoming =
+ allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
+ ASSERT_EQ(def->num_uses, bb->predecessors.size());
+ std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
+ }
+ mir->ssa_rep = &ssa_reps_[i];
+ cu_.mir_graph->AllocateSSAUseData(mir, def->num_uses);
+ std::copy_n(def->uses, def->num_uses, mir->ssa_rep->uses);
+ // Keep mir->ssa_rep->fp_use[.] zero-initialized (false). Not used by DCE, only copied.
+ cu_.mir_graph->AllocateSSADefData(mir, def->num_defs);
+ std::copy_n(def->defs, def->num_defs, mir->ssa_rep->defs);
+ // Keep mir->ssa_rep->fp_def[.] zero-initialized (false). Not used by DCE, only copied.
+ mir->dalvikInsn.opcode = def->opcode;
+ mir->offset = i; // LVN uses offset only for debug output
+ mir->optimization_flags = 0u;
+ uint64_t df_attrs = MIRGraph::GetDataFlowAttributes(mir);
+ if ((df_attrs & DF_DA) != 0) {
+ CHECK_NE(def->num_defs, 0u);
+ mir->dalvikInsn.vA = SRegToVReg(def->defs[0], (df_attrs & DF_A_WIDE) != 0);
+ bb->data_flow_info->vreg_to_ssa_map_exit[mir->dalvikInsn.vA] = def->defs[0];
+ if ((df_attrs & DF_A_WIDE) != 0) {
+ CHECK_EQ(def->defs[0] + 1, def->defs[1]);
+ bb->data_flow_info->vreg_to_ssa_map_exit[mir->dalvikInsn.vA + 1u] = def->defs[0] + 1;
+ }
+ }
+ if ((df_attrs & (DF_UA | DF_UB | DF_UC)) != 0) {
+ size_t use = 0;
+ if ((df_attrs & DF_UA) != 0) {
+ mir->dalvikInsn.vA = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_A_WIDE) != 0);
+ }
+ if ((df_attrs & DF_UB) != 0) {
+ mir->dalvikInsn.vB = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_B_WIDE) != 0);
+ }
+ if ((df_attrs & DF_UC) != 0) {
+ mir->dalvikInsn.vC = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_C_WIDE) != 0);
+ }
+ DCHECK_EQ(def->num_uses, use);
+ }
+ }
+ DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
+ cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+ code_item->insns_size_in_code_units_ = 2u * count;
+ code_item->registers_size_ = kMaxVRegs;
+ cu_.mir_graph->current_code_item_ = code_item;
+ }
+
+ template <size_t count>
+ void PrepareMIRs(const MIRDef (&defs)[count]) {
+ DoPrepareMIRs(defs, count);
+ }
+
+ template <size_t count>
+ void PrepareSRegToVRegMap(const int (&map)[count]) {
+ cu_.mir_graph->ssa_base_vregs_.assign(map, map + count);
+ num_vregs_ = *std::max_element(map, map + count) + 1u;
+ AllNodesIterator iterator(cu_.mir_graph.get());
+ for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+ if (bb->data_flow_info != nullptr) {
+ bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
+ cu_.arena.Alloc(sizeof(int32_t) * num_vregs_, kArenaAllocDFInfo));
+ std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs_, INVALID_SREG);
+ }
+ }
+ }
+
+ void PerformGVN() {
+ cu_.mir_graph->SSATransformationStart();
+ cu_.mir_graph->ComputeDFSOrders();
+ cu_.mir_graph->ComputeDominators();
+ cu_.mir_graph->ComputeTopologicalSortOrder();
+ cu_.mir_graph->SSATransformationEnd();
+ cu_.mir_graph->temp_.gvn.ifield_ids = GlobalValueNumbering::PrepareGvnFieldIds(
+ allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
+ cu_.mir_graph->temp_.gvn.sfield_ids = GlobalValueNumbering::PrepareGvnFieldIds(
+ allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
+ ASSERT_TRUE(gvn_ == nullptr);
+ gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
+ GlobalValueNumbering::kModeGvn));
+ value_names_.resize(mir_count_, 0xffffu);
+ LoopRepeatingTopologicalSortIterator iterator(cu_.mir_graph.get());
+ bool change = false;
+ for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
+ LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
+ if (lvn != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ value_names_[mir - mirs_] = lvn->GetValueNumber(mir);
+ }
+ }
+ change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
+ ASSERT_TRUE(gvn_->Good());
+ }
+ }
+
+ void PerformGVNCodeModifications() {
+ ASSERT_TRUE(gvn_ != nullptr);
+ ASSERT_TRUE(gvn_->Good());
+ gvn_->StartPostProcessing();
+ TopologicalSortIterator iterator(cu_.mir_graph.get());
+ for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+ LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
+ if (lvn != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ uint16_t value_name = lvn->GetValueNumber(mir);
+ ASSERT_EQ(value_name, value_names_[mir - mirs_]);
+ }
+ }
+ bool change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
+ ASSERT_FALSE(change);
+ ASSERT_TRUE(gvn_->Good());
+ }
+ }
+
+ void FillVregToSsaRegExitMaps() {
+ // Fill in vreg_to_ssa_map_exit for each BB.
+ PreOrderDfsIterator iterator(cu_.mir_graph.get());
+ for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+ if (bb->block_type == kDalvikByteCode) {
+ CHECK(!bb->predecessors.empty());
+ BasicBlock* pred_bb = cu_.mir_graph->GetBasicBlock(bb->predecessors[0]);
+ for (size_t v_reg = 0; v_reg != num_vregs_; ++v_reg) {
+ if (bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] == INVALID_SREG) {
+ bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] =
+ pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
+ }
+ }
+ }
+ }
+ }
+
+ void PerformDCE() {
+ FillVregToSsaRegExitMaps();
+ cu_.mir_graph->GetNumOfCodeAndTempVRs();
+ dce_.reset(new (allocator_.get()) GvnDeadCodeElimination(gvn_.get(), allocator_.get()));
+ PreOrderDfsIterator iterator(cu_.mir_graph.get());
+ for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+ if (bb->block_type == kDalvikByteCode) {
+ dce_->Apply(bb);
+ }
+ }
+ }
+
+ void PerformGVN_DCE() {
+ PerformGVN();
+ PerformGVNCodeModifications(); // Eliminate null/range checks.
+ PerformDCE();
+ }
+
+ template <size_t count>
+ void ExpectValueNamesNE(const size_t (&indexes)[count]) {
+ for (size_t i1 = 0; i1 != count; ++i1) {
+ size_t idx1 = indexes[i1];
+ for (size_t i2 = i1 + 1; i2 != count; ++i2) {
+ size_t idx2 = indexes[i2];
+ EXPECT_NE(value_names_[idx1], value_names_[idx2]) << idx1 << " " << idx2;
+ }
+ }
+ }
+
+ template <size_t count>
+ void ExpectNoNullCheck(const size_t (&indexes)[count]) {
+ for (size_t i = 0; i != count; ++i) {
+ size_t idx = indexes[i];
+ EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[idx].optimization_flags & MIR_IGNORE_NULL_CHECK)
+ << idx;
+ }
+ size_t num_no_null_ck = 0u;
+ for (size_t i = 0; i != mir_count_; ++i) {
+ if ((mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
+ ++num_no_null_ck;
+ }
+ }
+ EXPECT_EQ(count, num_no_null_ck);
+ }
+
+ GvnDeadCodeEliminationTest()
+ : pool_(),
+ cu_(&pool_, kRuntimeISA, nullptr, nullptr),
+ num_vregs_(0u),
+ mir_count_(0u),
+ mirs_(nullptr),
+ ssa_reps_(),
+ allocator_(),
+ gvn_(),
+ dce_(),
+ value_names_(),
+ live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)) {
+ cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+ cu_.access_flags = kAccStatic; // Don't let "this" interfere with this test.
+ allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+ // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
+ // 0 constants are integral, not references. Nothing else is used by LVN/GVN.
+ cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
+ kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
+ // Bind all possible sregs to live vregs for test purposes.
+ live_in_v_->SetInitialBits(kMaxSsaRegs);
+ cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
+ cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
+ for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+ cu_.mir_graph->ssa_base_vregs_.push_back(i);
+ cu_.mir_graph->ssa_subscripts_.push_back(0);
+ }
+ // Set shorty for a void-returning method without arguments.
+ cu_.shorty = "V";
+ }
+
+ static constexpr size_t kMaxSsaRegs = 16384u;
+ static constexpr size_t kMaxVRegs = 256u;
+
+ ArenaPool pool_;
+ CompilationUnit cu_;
+ size_t num_vregs_;
+ size_t mir_count_;
+ MIR* mirs_;
+ std::vector<SSARepresentation> ssa_reps_;
+ std::unique_ptr<ScopedArenaAllocator> allocator_;
+ std::unique_ptr<GlobalValueNumbering> gvn_;
+ std::unique_ptr<GvnDeadCodeElimination> dce_;
+ std::vector<uint16_t> value_names_;
+ ArenaBitVector* live_in_v_;
+};
+
+constexpr uint16_t GvnDeadCodeEliminationTest::kNoValue;
+
+class GvnDeadCodeEliminationTestSimple : public GvnDeadCodeEliminationTest {
+ public:
+ GvnDeadCodeEliminationTestSimple();
+
+ private:
+ static const BBDef kSimpleBbs[];
+};
+
+const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestSimple::kSimpleBbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
+};
+
+GvnDeadCodeEliminationTestSimple::GvnDeadCodeEliminationTestSimple()
+ : GvnDeadCodeEliminationTest() {
+ PrepareBasicBlocks(kSimpleBbs);
+}
+
+class GvnDeadCodeEliminationTestDiamond : public GvnDeadCodeEliminationTest {
+ public:
+ GvnDeadCodeEliminationTestDiamond();
+
+ private:
+ static const BBDef kDiamondBbs[];
+};
+
+const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestDiamond::kDiamondBbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)), // Block #3, top of the diamond.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)), // Block #4, left side.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)), // Block #5, right side.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)), // Block #6, bottom.
+};
+
+GvnDeadCodeEliminationTestDiamond::GvnDeadCodeEliminationTestDiamond()
+ : GvnDeadCodeEliminationTest() {
+ PrepareBasicBlocks(kDiamondBbs);
+}
+
+class GvnDeadCodeEliminationTestLoop : public GvnDeadCodeEliminationTest {
+ public:
+ GvnDeadCodeEliminationTestLoop();
+
+ private:
+ static const BBDef kLoopBbs[];
+};
+
+const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestLoop::kLoopBbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)), // "taken" loops to self.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+};
+
+GvnDeadCodeEliminationTestLoop::GvnDeadCodeEliminationTestLoop()
+ : GvnDeadCodeEliminationTest() {
+ PrepareBasicBlocks(kLoopBbs);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename1) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
+ DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 3 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[2]);
+
+ const size_t no_null_ck_indexes[] = { 1, 3 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, true, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the IGET uses the s_reg 0, v_reg 0, defined by mirs_[0].
+ ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
+ EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
+ EXPECT_EQ(0u, mirs_[3].dalvikInsn.vB);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename2) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
+ DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
+ DEF_CONST(3, Instruction::CONST, 4u, 1000),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 3, 4 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[2]);
+
+ const size_t no_null_ck_indexes[] = { 1, 3 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, true, false, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the IGET uses the s_reg 0, v_reg 0, defined by mirs_[0].
+ ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
+ EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
+ EXPECT_EQ(0u, mirs_[3].dalvikInsn.vB);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename3) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
+ DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 0 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 3 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[2]);
+
+ const size_t no_null_ck_indexes[] = { 1, 3 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, true, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the NEW_INSTANCE defines the s_reg 2, v_reg 2, originally defined by the move.
+ ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+ EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
+ EXPECT_EQ(2u, mirs_[0].dalvikInsn.vA);
+ // Check that the first IGET is using the s_reg 2, v_reg 2.
+ ASSERT_EQ(1, mirs_[1].ssa_rep->num_uses);
+ EXPECT_EQ(2, mirs_[1].ssa_rep->uses[0]);
+ EXPECT_EQ(2u, mirs_[1].dalvikInsn.vB);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename4) {
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 1u, 0u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 1u),
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 3u, 1000u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 0, 1 /* high word */ };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 3 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+ EXPECT_EQ(value_names_[0], value_names_[2]);
+
+ static const bool eliminated[] = {
+ false, true, true, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the NEW_INSTANCE defines the s_reg 2, v_reg 2, originally defined by the move 2u.
+ ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+ EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
+ EXPECT_EQ(2u, mirs_[0].dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename5) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 2u, 1u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 4u, 3u),
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 5u, 1000u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 3, 0, 1 /* high word */ };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 5 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[3]);
+ EXPECT_EQ(value_names_[0], value_names_[4]);
+
+ static const bool eliminated[] = {
+ false, false, false, true, true, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the NEW_INSTANCE defines the s_reg 4, v_reg 3, originally defined by the move 4u.
+ ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+ EXPECT_EQ(4, mirs_[0].ssa_rep->defs[0]);
+ EXPECT_EQ(3u, mirs_[0].dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename6) {
+ static const MIRDef mirs[] = {
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
+ DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1 /* high word */, 1, 2 /* high word */ };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+
+ static const bool eliminated[] = {
+ false, true
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the CONST_WIDE defines the s_reg 2, v_reg 1, originally defined by the move 2u.
+ ASSERT_EQ(2, mirs_[0].ssa_rep->num_defs);
+ EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
+ EXPECT_EQ(3, mirs_[0].ssa_rep->defs[1]);
+ EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename7) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+ DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 2u, 0u, 1u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_NE(value_names_[0], value_names_[2]);
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+
+ static const bool eliminated[] = {
+ false, true, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the CONST defines the s_reg 1, v_reg 1, originally defined by the move 1u.
+ ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+ EXPECT_EQ(1, mirs_[0].ssa_rep->defs[0]);
+ EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
+ // Check that the ADD_INT inputs are both s_reg1, vreg 1.
+ ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
+ EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
+ EXPECT_EQ(1, mirs_[2].ssa_rep->uses[1]);
+ EXPECT_EQ(1u, mirs_[2].dalvikInsn.vB);
+ EXPECT_EQ(1u, mirs_[2].dalvikInsn.vC);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename8) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+ DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 2u, 0u, 1u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_NE(value_names_[0], value_names_[2]);
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+
+ static const bool eliminated[] = {
+ false, true, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the CONST defines the s_reg 1, v_reg 1, originally defined by the move 1u.
+ ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+ EXPECT_EQ(1, mirs_[0].ssa_rep->defs[0]);
+ EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
+ // Check that the ADD_INT_2ADDR was replaced by ADD_INT and inputs are both s_reg 1, vreg 1.
+ EXPECT_EQ(Instruction::ADD_INT, mirs_[2].dalvikInsn.opcode);
+ ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
+ EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
+ EXPECT_EQ(1, mirs_[2].ssa_rep->uses[1]);
+ EXPECT_EQ(1u, mirs_[2].dalvikInsn.vB);
+ EXPECT_EQ(1u, mirs_[2].dalvikInsn.vC);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename9) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+ DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 1u, 0u, 0u),
+ DEF_MOVE(3, Instruction::MOVE, 2u, 1u),
+ DEF_CONST(3, Instruction::CONST, 3u, 3000u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 0, 1, 0 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 3 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[1], value_names_[2]);
+
+ static const bool eliminated[] = {
+ false, false, true, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the ADD_INT_2ADDR was replaced by ADD_INT and output is in s_reg 2, vreg 1.
+ EXPECT_EQ(Instruction::ADD_INT, mirs_[1].dalvikInsn.opcode);
+ ASSERT_EQ(2, mirs_[1].ssa_rep->num_uses);
+ EXPECT_EQ(0, mirs_[1].ssa_rep->uses[0]);
+ EXPECT_EQ(0, mirs_[1].ssa_rep->uses[1]);
+ EXPECT_EQ(0u, mirs_[1].dalvikInsn.vB);
+ EXPECT_EQ(0u, mirs_[1].dalvikInsn.vC);
+ ASSERT_EQ(1, mirs_[1].ssa_rep->num_defs);
+ EXPECT_EQ(2, mirs_[1].ssa_rep->defs[0]);
+ EXPECT_EQ(1u, mirs_[1].dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, NoRename1) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 2u, 1u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
+ DEF_CONST(3, Instruction::CONST, 4u, 1000),
+ DEF_IGET(3, Instruction::IGET, 5u, 3u, 1u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 0, 1 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 4, 5 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[3]);
+
+ const size_t no_null_ck_indexes[] = { 1, 5 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, NoRename2) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 2u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
+ DEF_CONST(3, Instruction::CONST, 4u, 1000),
+ DEF_IGET(3, Instruction::IGET, 5u, 3u, 1u),
+ DEF_CONST(3, Instruction::CONST, 6u, 2000),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2, 0, 3, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 4, 5, 6 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[3]);
+
+ const size_t no_null_ck_indexes[] = { 1, 5 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, NoRename3) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+ DEF_IGET(3, Instruction::IGET, 2u, 0u, 2u),
+ DEF_BINOP(3, Instruction::ADD_INT, 3u, 1u, 2u),
+ DEF_MOVE(3, Instruction::MOVE_OBJECT, 4u, 0u),
+ DEF_IGET(3, Instruction::IGET, 5u, 4u, 1u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2, 0 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 5 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[4]);
+
+ const size_t no_null_ck_indexes[] = { 1, 2, 5 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Simple1) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessObject },
+ { 1u, 1u, 1u, false, kDexMemAccessObject },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 1u),
+ DEF_IGET(3, Instruction::IGET, 3u, 2u, 2u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 0u, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 5u, 4u, 1u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 1, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_NE(value_names_[0], value_names_[1]);
+ EXPECT_NE(value_names_[0], value_names_[2]);
+ EXPECT_NE(value_names_[0], value_names_[3]);
+ EXPECT_NE(value_names_[1], value_names_[2]);
+ EXPECT_NE(value_names_[1], value_names_[3]);
+ EXPECT_NE(value_names_[2], value_names_[3]);
+ EXPECT_EQ(value_names_[1], value_names_[4]);
+ EXPECT_EQ(value_names_[2], value_names_[5]);
+
+ EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[4].optimization_flags & MIR_IGNORE_NULL_CHECK);
+ EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[5].optimization_flags & MIR_IGNORE_NULL_CHECK);
+
+ static const bool eliminated[] = {
+ false, false, false, false, true, true
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the sregs have been renamed correctly.
+ ASSERT_EQ(1, mirs_[1].ssa_rep->num_defs);
+ EXPECT_EQ(4, mirs_[1].ssa_rep->defs[0]);
+ ASSERT_EQ(1, mirs_[1].ssa_rep->num_uses);
+ EXPECT_EQ(0, mirs_[1].ssa_rep->uses[0]);
+ ASSERT_EQ(1, mirs_[2].ssa_rep->num_defs);
+ EXPECT_EQ(5, mirs_[2].ssa_rep->defs[0]);
+ ASSERT_EQ(1, mirs_[2].ssa_rep->num_uses);
+ EXPECT_EQ(4, mirs_[2].ssa_rep->uses[0]);
+ ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
+ EXPECT_EQ(3, mirs_[3].ssa_rep->defs[0]);
+ ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
+ EXPECT_EQ(5, mirs_[3].ssa_rep->uses[0]);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Simple2) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST(3, Instruction::CONST, 1u, 1000),
+ DEF_IGET(3, Instruction::IGET, 2u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 3u, 2u, 1u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 4u, 3u),
+ DEF_IGET(3, Instruction::IGET, 5u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 6u, 5u, 1u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2, 3, 2, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[2], value_names_[5]);
+ EXPECT_EQ(value_names_[3], value_names_[6]);
+
+ const size_t no_null_ck_indexes[] = { 2, 5 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, true, true
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the sregs have been renamed correctly.
+ ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
+ EXPECT_EQ(6, mirs_[3].ssa_rep->defs[0]);
+ ASSERT_EQ(2, mirs_[3].ssa_rep->num_uses);
+ EXPECT_EQ(2, mirs_[3].ssa_rep->uses[0]);
+ EXPECT_EQ(1, mirs_[3].ssa_rep->uses[1]);
+ ASSERT_EQ(1, mirs_[4].ssa_rep->num_defs);
+ EXPECT_EQ(4, mirs_[4].ssa_rep->defs[0]);
+ ASSERT_EQ(1, mirs_[4].ssa_rep->num_uses);
+ EXPECT_EQ(6, mirs_[4].ssa_rep->uses[0]);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Simple3) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST(3, Instruction::CONST, 1u, 1000),
+ DEF_CONST(3, Instruction::CONST, 2u, 2000),
+ DEF_CONST(3, Instruction::CONST, 3u, 3000),
+ DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+ DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u), // Simple elimination of ADD+MUL
+ DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u), // allows simple elimination of IGET+SUB.
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 5, 5, 4 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[4], value_names_[9]);
+ EXPECT_EQ(value_names_[5], value_names_[10]);
+ EXPECT_EQ(value_names_[6], value_names_[11]);
+ EXPECT_EQ(value_names_[7], value_names_[12]);
+
+ const size_t no_null_ck_indexes[] = { 4, 9 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, false, false, false, true, true, true, true
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the sregs have been renamed correctly.
+ ASSERT_EQ(1, mirs_[6].ssa_rep->num_defs);
+ EXPECT_EQ(11, mirs_[6].ssa_rep->defs[0]); // 6 -> 11
+ ASSERT_EQ(2, mirs_[6].ssa_rep->num_uses);
+ EXPECT_EQ(5, mirs_[6].ssa_rep->uses[0]);
+ EXPECT_EQ(2, mirs_[6].ssa_rep->uses[1]);
+ ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
+ EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]); // 7 -> 12
+ ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
+ EXPECT_EQ(11, mirs_[7].ssa_rep->uses[0]); // 6 -> 11
+ EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
+ ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
+ EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
+ ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
+ EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]); // 7 -> 12
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Simple4) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 1u, INT64_C(1)),
+ DEF_BINOP(3, Instruction::LONG_TO_FLOAT, 3u, 1u, 2u),
+ DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 5u, 4u),
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 6u, INT64_C(1)),
+ DEF_BINOP(3, Instruction::LONG_TO_FLOAT, 8u, 6u, 7u),
+ DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3, 1, 2, 1, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[1], value_names_[5]);
+ EXPECT_EQ(value_names_[2], value_names_[6]);
+ EXPECT_EQ(value_names_[3], value_names_[7]);
+
+ const size_t no_null_ck_indexes[] = { 3, 7 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ // Simple elimination of CONST_WIDE+LONG_TO_FLOAT allows simple eliminatiion of IGET.
+ false, false, false, false, false, true, true, true
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the sregs have been renamed correctly.
+ ASSERT_EQ(1, mirs_[2].ssa_rep->num_defs);
+ EXPECT_EQ(8, mirs_[2].ssa_rep->defs[0]); // 3 -> 8
+ ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
+ EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
+ EXPECT_EQ(2, mirs_[2].ssa_rep->uses[1]);
+ ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
+ EXPECT_EQ(9, mirs_[3].ssa_rep->defs[0]); // 4 -> 9
+ ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
+ EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
+ ASSERT_EQ(1, mirs_[4].ssa_rep->num_defs);
+ EXPECT_EQ(5, mirs_[4].ssa_rep->defs[0]);
+ ASSERT_EQ(1, mirs_[4].ssa_rep->num_uses);
+ EXPECT_EQ(9, mirs_[4].ssa_rep->uses[0]); // 4 -> 9
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KillChain1) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST(3, Instruction::CONST, 1u, 1000),
+ DEF_CONST(3, Instruction::CONST, 2u, 2000),
+ DEF_CONST(3, Instruction::CONST, 3u, 3000),
+ DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+ DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 4, 5, 6, 4, 5, 4, 5 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[4], value_names_[9]);
+ EXPECT_EQ(value_names_[5], value_names_[10]);
+ EXPECT_EQ(value_names_[6], value_names_[11]);
+ EXPECT_EQ(value_names_[7], value_names_[12]);
+
+ const size_t no_null_ck_indexes[] = { 4, 9 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, false, false, false, true, true, true, true
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the sregs have been renamed correctly.
+ ASSERT_EQ(1, mirs_[6].ssa_rep->num_defs);
+ EXPECT_EQ(11, mirs_[6].ssa_rep->defs[0]); // 6 -> 11
+ ASSERT_EQ(2, mirs_[6].ssa_rep->num_uses);
+ EXPECT_EQ(5, mirs_[6].ssa_rep->uses[0]);
+ EXPECT_EQ(2, mirs_[6].ssa_rep->uses[1]);
+ ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
+ EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]); // 7 -> 12
+ ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
+ EXPECT_EQ(11, mirs_[7].ssa_rep->uses[0]); // 6 -> 11
+ EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
+ ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
+ EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
+ ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
+ EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]); // 7 -> 12
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KillChain2) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST(3, Instruction::CONST, 1u, 1000),
+ DEF_CONST(3, Instruction::CONST, 2u, 2000),
+ DEF_CONST(3, Instruction::CONST, 3u, 3000),
+ DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+ DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
+ DEF_CONST(3, Instruction::CONST, 13u, 4000),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 7, 4, 7 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 13 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[4], value_names_[9]);
+ EXPECT_EQ(value_names_[5], value_names_[10]);
+ EXPECT_EQ(value_names_[6], value_names_[11]);
+ EXPECT_EQ(value_names_[7], value_names_[12]);
+
+ const size_t no_null_ck_indexes[] = { 4, 9 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, false, false, false, true, true, true, true, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the sregs have been renamed correctly.
+ ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
+ EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]); // 7 -> 12
+ ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
+ EXPECT_EQ(6, mirs_[7].ssa_rep->uses[0]);
+ EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
+ ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
+ EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
+ ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
+ EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]); // 7 -> 12
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KillChain3) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST(3, Instruction::CONST, 1u, 1000),
+ DEF_CONST(3, Instruction::CONST, 2u, 2000),
+ DEF_CONST(3, Instruction::CONST, 3u, 3000),
+ DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+ DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
+ DEF_CONST(3, Instruction::CONST, 12u, 4000),
+ DEF_BINOP(3, Instruction::SUB_INT, 13u, 11u, 3u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 4, 7, 4 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 12 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[4], value_names_[9]);
+ EXPECT_EQ(value_names_[5], value_names_[10]);
+ EXPECT_EQ(value_names_[6], value_names_[11]);
+ EXPECT_EQ(value_names_[7], value_names_[13]);
+
+ const size_t no_null_ck_indexes[] = { 4, 9 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, false, false, false, true, true, true, false, true
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the sregs have been renamed correctly.
+ ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
+ EXPECT_EQ(13, mirs_[7].ssa_rep->defs[0]); // 7 -> 13
+ ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
+ EXPECT_EQ(6, mirs_[7].ssa_rep->uses[0]);
+ EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
+ ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
+ EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
+ ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
+ EXPECT_EQ(13, mirs_[8].ssa_rep->uses[0]); // 7 -> 13
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KeepChain1) {
+ // KillChain2 without the final CONST.
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST(3, Instruction::CONST, 1u, 1000),
+ DEF_CONST(3, Instruction::CONST, 2u, 2000),
+ DEF_CONST(3, Instruction::CONST, 3u, 3000),
+ DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+ DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 7, 4 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[4], value_names_[9]);
+ EXPECT_EQ(value_names_[5], value_names_[10]);
+ EXPECT_EQ(value_names_[6], value_names_[11]);
+ EXPECT_EQ(value_names_[7], value_names_[12]);
+
+ const size_t no_null_ck_indexes[] = { 4, 9 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, false, false, false, false, false, false, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KeepChain2) {
+ // KillChain1 with MIRs in the middle of the chain.
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST(3, Instruction::CONST, 1u, 1000),
+ DEF_CONST(3, Instruction::CONST, 2u, 2000),
+ DEF_CONST(3, Instruction::CONST, 3u, 3000),
+ DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+ DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+ DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+ DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+ DEF_CONST(3, Instruction::CONST, 11u, 4000),
+ DEF_UNOP(3, Instruction::INT_TO_FLOAT, 12u, 11u),
+ DEF_BINOP(3, Instruction::MUL_INT, 13u, 10u, 2u),
+ DEF_BINOP(3, Instruction::SUB_INT, 14u, 13u, 3u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 4, 5, 6, 4, 5, 4, 7, 4, 5 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[4], value_names_[9]);
+ EXPECT_EQ(value_names_[5], value_names_[10]);
+ EXPECT_EQ(value_names_[6], value_names_[13]);
+ EXPECT_EQ(value_names_[7], value_names_[14]);
+
+ const size_t no_null_ck_indexes[] = { 4, 9 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
+TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 1000),
+ DEF_CONST(4, Instruction::CONST, 1u, 1000),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 0 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+
+ static const bool eliminated[] = {
+ false, true,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that we've created a single-input Phi to replace the CONST 3u.
+ BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
+ MIR* phi = bb4->first_mir_insn;
+ ASSERT_TRUE(phi != nullptr);
+ ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
+ ASSERT_EQ(1, phi->ssa_rep->num_uses);
+ EXPECT_EQ(0, phi->ssa_rep->uses[0]);
+ ASSERT_EQ(1, phi->ssa_rep->num_defs);
+ EXPECT_EQ(1, phi->ssa_rep->defs[0]);
+ EXPECT_EQ(0u, phi->dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi2) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST(4, Instruction::CONST, 1u, 1000),
+ DEF_IPUT(4, Instruction::IPUT, 1u, 0u, 0u),
+ DEF_CONST(5, Instruction::CONST, 3u, 2000),
+ DEF_IPUT(5, Instruction::IPUT, 3u, 0u, 0u),
+ DEF_IGET(6, Instruction::IGET, 5u, 0u, 0u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2 /* dummy */, 1, 2 /* dummy */, 1 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 3, 5 };
+ ExpectValueNamesNE(diff_indexes);
+
+ const size_t no_null_ck_indexes[] = { 2, 4, 5 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, true,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that we've created a two-input Phi to replace the IGET 5u.
+ BasicBlock* bb6 = cu_.mir_graph->GetBasicBlock(6);
+ MIR* phi = bb6->first_mir_insn;
+ ASSERT_TRUE(phi != nullptr);
+ ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
+ ASSERT_EQ(2, phi->ssa_rep->num_uses);
+ EXPECT_EQ(1, phi->ssa_rep->uses[0]);
+ EXPECT_EQ(3, phi->ssa_rep->uses[1]);
+ ASSERT_EQ(1, phi->ssa_rep->num_defs);
+ EXPECT_EQ(5, phi->ssa_rep->defs[0]);
+ EXPECT_EQ(1u, phi->dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestDiamond, KillChainInAnotherBlock1) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessObject }, // linked list
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 3u, 2u, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 3u, 0u),
+ DEF_IFZ(3, Instruction::IF_NEZ, 4u),
+ DEF_IGET(4, Instruction::IGET_OBJECT, 6u, 0u, 0u),
+ DEF_IGET(4, Instruction::IGET_OBJECT, 7u, 6u, 0u),
+ DEF_IGET(4, Instruction::IGET_OBJECT, 8u, 7u, 0u),
+ DEF_IGET(4, Instruction::IGET_OBJECT, 9u, 8u, 0u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3 /* dummy */, 1, 2, 1, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[1], value_names_[6]);
+ EXPECT_EQ(value_names_[2], value_names_[7]);
+ EXPECT_EQ(value_names_[3], value_names_[8]);
+ EXPECT_EQ(value_names_[4], value_names_[9]);
+
+ const size_t no_null_ck_indexes[] = { 1, 6, 7, 8, 9 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, true, true, true, true,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that we've created two single-input Phis to replace the IGET 8u and IGET 9u;
+ // the IGET 6u and IGET 7u were killed without a replacement.
+ BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
+ MIR* phi1 = bb4->first_mir_insn;
+ ASSERT_TRUE(phi1 != nullptr);
+ ASSERT_EQ(kMirOpPhi, static_cast<int>(phi1->dalvikInsn.opcode));
+ MIR* phi2 = phi1->next;
+ ASSERT_TRUE(phi2 != nullptr);
+ ASSERT_EQ(kMirOpPhi, static_cast<int>(phi2->dalvikInsn.opcode));
+ ASSERT_TRUE(phi2->next == &mirs_[6]);
+ if (phi1->dalvikInsn.vA == 2u) {
+ std::swap(phi1, phi2);
+ }
+ ASSERT_EQ(1, phi1->ssa_rep->num_uses);
+ EXPECT_EQ(3, phi1->ssa_rep->uses[0]);
+ ASSERT_EQ(1, phi1->ssa_rep->num_defs);
+ EXPECT_EQ(8, phi1->ssa_rep->defs[0]);
+ EXPECT_EQ(1u, phi1->dalvikInsn.vA);
+ ASSERT_EQ(1, phi2->ssa_rep->num_uses);
+ EXPECT_EQ(4, phi2->ssa_rep->uses[0]);
+ ASSERT_EQ(1, phi2->ssa_rep->num_defs);
+ EXPECT_EQ(9, phi2->ssa_rep->defs[0]);
+ EXPECT_EQ(2u, phi2->dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestDiamond, KillChainInAnotherBlock2) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessObject }, // linked list
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 3u, 2u, 0u),
+ DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 3u, 0u),
+ DEF_IFZ(3, Instruction::IF_NEZ, 4u),
+ DEF_IGET(4, Instruction::IGET_OBJECT, 6u, 0u, 0u),
+ DEF_IGET(4, Instruction::IGET_OBJECT, 7u, 6u, 0u),
+ DEF_IGET(4, Instruction::IGET_OBJECT, 8u, 7u, 0u),
+ DEF_CONST(4, Instruction::CONST, 9u, 1000),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3 /* dummy */, 1, 2, 1, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 9 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[1], value_names_[6]);
+ EXPECT_EQ(value_names_[2], value_names_[7]);
+ EXPECT_EQ(value_names_[3], value_names_[8]);
+
+ const size_t no_null_ck_indexes[] = { 1, 6, 7, 8 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, true, true, true, false,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that we've created a single-input Phi to replace the IGET 8u;
+ // the IGET 6u and IGET 7u were killed without a replacement.
+ BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
+ MIR* phi = bb4->first_mir_insn;
+ ASSERT_TRUE(phi != nullptr);
+ ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
+ ASSERT_TRUE(phi->next == &mirs_[6]);
+ ASSERT_EQ(1, phi->ssa_rep->num_uses);
+ EXPECT_EQ(3, phi->ssa_rep->uses[0]);
+ ASSERT_EQ(1, phi->ssa_rep->num_defs);
+ EXPECT_EQ(8, phi->ssa_rep->defs[0]);
+ EXPECT_EQ(1u, phi->dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestLoop, IFieldLoopVariable) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+ DEF_CONST(3, Instruction::CONST, 1u, 1),
+ DEF_CONST(3, Instruction::CONST, 2u, 0),
+ DEF_IPUT(3, Instruction::IPUT, 2u, 0u, 0u),
+ DEF_IGET(4, Instruction::IGET, 4u, 0u, 0u),
+ DEF_BINOP(4, Instruction::ADD_INT, 5u, 4u, 1u),
+ DEF_IPUT(4, Instruction::IPUT, 5u, 0u, 0u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3 /* dummy */, 2, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 4, 5 };
+ ExpectValueNamesNE(diff_indexes);
+
+ const size_t no_null_ck_indexes[] = { 3, 4, 6 };
+ ExpectNoNullCheck(no_null_ck_indexes);
+
+ static const bool eliminated[] = {
+ false, false, false, false, true, false, false,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that we've created a two-input Phi to replace the IGET 3u.
+ BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
+ MIR* phi = bb4->first_mir_insn;
+ ASSERT_TRUE(phi != nullptr);
+ ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
+ ASSERT_TRUE(phi->next == &mirs_[4]);
+ ASSERT_EQ(2, phi->ssa_rep->num_uses);
+ EXPECT_EQ(2, phi->ssa_rep->uses[0]);
+ EXPECT_EQ(5, phi->ssa_rep->uses[1]);
+ ASSERT_EQ(1, phi->ssa_rep->num_defs);
+ EXPECT_EQ(4, phi->ssa_rep->defs[0]);
+ EXPECT_EQ(2u, phi->dalvikInsn.vA);
+}
+
+} // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index aecde51..d677680 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -901,9 +901,9 @@
// Calculate merged values for the intersection.
for (auto& load_value_entry : my_values->load_value_map) {
uint16_t location = load_value_entry.first;
- bool same_values = true;
- uint16_t value_name = kNoValue;
merge_names_.clear();
+ uint16_t value_name = kNoValue;
+ bool same_values = true;
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
value_name = Versions::LookupMergeValue(gvn_, lvn, key, location);
same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
@@ -937,6 +937,10 @@
void LocalValueNumbering::Merge(MergeType merge_type) {
DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
+ // Always reserve space in merge_names_. Even if we don't use it in Merge() we may need it
+ // in GetStartingVregValueNumberImpl() when the merge_names_'s allocator is not the top.
+ merge_names_.reserve(gvn_->merge_lvns_.size());
+
IntersectSregValueMaps<&LocalValueNumbering::sreg_value_map_>();
IntersectSregValueMaps<&LocalValueNumbering::sreg_wide_value_map_>();
if (merge_type == kReturnMerge) {
@@ -1169,8 +1173,8 @@
int first_s_reg = uses[pos];
bool wide = (first_lvn->sreg_wide_value_map_.count(first_s_reg) != 0u);
// Iterate over *merge_lvns_ and skip incoming sregs for BBs without associated LVN.
- uint16_t value_name = kNoValue;
merge_names_.clear();
+ uint16_t value_name = kNoValue;
bool same_values = true;
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
DCHECK_LT(pos, mir->ssa_rep->num_uses);
@@ -1592,12 +1596,18 @@
break;
case Instruction::MOVE_EXCEPTION:
case Instruction::NEW_INSTANCE:
- case Instruction::CONST_CLASS:
case Instruction::NEW_ARRAY:
// 1 result, treat as unique each time, use result s_reg - will be unique.
res = MarkNonAliasingNonNull(mir);
SetOperandValue(mir->ssa_rep->defs[0], res);
break;
+ case Instruction::CONST_CLASS:
+ DCHECK_EQ(Low16Bits(mir->dalvikInsn.vB), mir->dalvikInsn.vB);
+ res = gvn_->LookupValue(Instruction::CONST_CLASS, mir->dalvikInsn.vB, 0, 0);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ null_checked_.insert(res);
+ non_aliasing_refs_.insert(res);
+ break;
case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO:
// These strings are internalized, so assign value based on the string pool index.
@@ -1641,16 +1651,22 @@
SetOperandValueWide(mir->ssa_rep->defs[0], res);
break;
+ case Instruction::CONST_HIGH16:
+ if (mir->dalvikInsn.vB != 0) {
+ res = gvn_->LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ break;
+ }
+ FALLTHROUGH_INTENDED;
case Instruction::CONST:
case Instruction::CONST_4:
case Instruction::CONST_16:
- res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
- High16Bits(mir->dalvikInsn.vB), 0);
- SetOperandValue(mir->ssa_rep->defs[0], res);
- break;
-
- case Instruction::CONST_HIGH16:
- res = gvn_->LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
+ if (mir->dalvikInsn.vB == 0 && gvn_->GetMirGraph()->GetRawDest(mir).ref) {
+ res = GlobalValueNumbering::kNullValue;
+ } else {
+ res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
+ High16Bits(mir->dalvikInsn.vB), 0);
+ }
SetOperandValue(mir->ssa_rep->defs[0], res);
break;
@@ -1956,4 +1972,55 @@
return res;
}
+uint16_t LocalValueNumbering::GetEndingVregValueNumberImpl(int v_reg, bool wide) const {
+ const BasicBlock* bb = gvn_->GetBasicBlock(Id());
+ DCHECK(bb != nullptr);
+ int s_reg = bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
+ if (s_reg == INVALID_SREG) {
+ return kNoValue;
+ }
+ if (wide) {
+ int high_s_reg = bb->data_flow_info->vreg_to_ssa_map_exit[v_reg + 1];
+ if (high_s_reg != s_reg + 1) {
+ return kNoValue; // High word has been overwritten.
+ }
+ return GetSregValueWide(s_reg);
+ } else {
+ return GetSregValue(s_reg);
+ }
+}
+
+uint16_t LocalValueNumbering::GetStartingVregValueNumberImpl(int v_reg, bool wide) const {
+ DCHECK_EQ(gvn_->mode_, GlobalValueNumbering::kModeGvnPostProcessing);
+ DCHECK(gvn_->CanModify());
+ const BasicBlock* bb = gvn_->GetBasicBlock(Id());
+ DCHECK(bb != nullptr);
+ DCHECK_NE(bb->predecessors.size(), 0u);
+ if (bb->predecessors.size() == 1u) {
+ return gvn_->GetLvn(bb->predecessors[0])->GetEndingVregValueNumberImpl(v_reg, wide);
+ }
+ merge_names_.clear();
+ uint16_t value_name = kNoValue;
+ bool same_values = true;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ value_name = gvn_->GetLvn(pred_id)->GetEndingVregValueNumberImpl(v_reg, wide);
+ if (value_name == kNoValue) {
+ return kNoValue;
+ }
+ same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
+ merge_names_.push_back(value_name);
+ }
+ if (same_values) {
+ // value_name already contains the result.
+ } else {
+ auto lb = merge_map_.lower_bound(merge_names_);
+ if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
+ value_name = lb->second;
+ } else {
+ value_name = kNoValue; // We never assigned a value name to this set of merged names.
+ }
+ }
+ return value_name;
+}
+
} // namespace art
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index aef8c6d..f51b886 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -52,13 +52,22 @@
return div_zero_checked_.find(value_name) != div_zero_checked_.end();
}
- bool IsSregValue(uint16_t s_reg, uint16_t value_name) const {
- auto it = sreg_value_map_.find(s_reg);
- if (it != sreg_value_map_.end()) {
- return it->second == value_name;
- } else {
- return gvn_->HasValue(kNoValue, s_reg, kNoValue, kNoValue, value_name);
- }
+ uint16_t GetSregValue(uint16_t s_reg) const {
+ return GetSregValueImpl(s_reg, &sreg_value_map_);
+ }
+
+ uint16_t GetSregValueWide(uint16_t s_reg) const {
+ return GetSregValueImpl(s_reg, &sreg_wide_value_map_);
+ }
+
+ // Get the starting value number for a given dalvik register.
+ uint16_t GetStartingVregValueNumber(int v_reg) const {
+ return GetStartingVregValueNumberImpl(v_reg, false);
+ }
+
+ // Get the starting value number for a given wide dalvik register.
+ uint16_t GetStartingVregValueNumberWide(int v_reg) const {
+ return GetStartingVregValueNumberImpl(v_reg, true);
}
enum MergeType {
@@ -80,6 +89,20 @@
// Key is s_reg, value is value name.
typedef ScopedArenaSafeMap<uint16_t, uint16_t> SregValueMap;
+ uint16_t GetEndingVregValueNumberImpl(int v_reg, bool wide) const;
+ uint16_t GetStartingVregValueNumberImpl(int v_reg, bool wide) const;
+
+ uint16_t GetSregValueImpl(int s_reg, const SregValueMap* map) const {
+ uint16_t res = kNoValue;
+ auto lb = map->find(s_reg);
+ if (lb != map->end()) {
+ res = lb->second;
+ } else {
+ res = gvn_->FindValue(kNoValue, s_reg, kNoValue, kNoValue);
+ }
+ return res;
+ }
+
void SetOperandValueImpl(uint16_t s_reg, uint16_t value, SregValueMap* map) {
DCHECK_EQ(map->count(s_reg), 0u) << PrettyMethod(gvn_->cu_->method_idx, *gvn_->cu_->dex_file)
<< " LVN id: " << id_ << ", s_reg: " << s_reg;
@@ -370,9 +393,9 @@
ValueNameSet div_zero_checked_;
// Reuse one vector for all merges to avoid leaking too much memory on the ArenaStack.
- ScopedArenaVector<BasicBlockId> merge_names_;
+ mutable ScopedArenaVector<uint16_t> merge_names_;
// Map to identify when different locations merge the same values.
- ScopedArenaSafeMap<ScopedArenaVector<BasicBlockId>, uint16_t> merge_map_;
+ ScopedArenaSafeMap<ScopedArenaVector<uint16_t>, uint16_t> merge_map_;
// New memory version for merge, kNoValue if all memory versions matched.
uint16_t merge_new_memory_version_;
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 9f18a3e..ec1c39e 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -185,9 +185,9 @@
}
void PerformLVN() {
- cu_.mir_graph->temp_.gvn.ifield_ids_ = GlobalValueNumbering::PrepareGvnFieldIds(
+ cu_.mir_graph->temp_.gvn.ifield_ids = GlobalValueNumbering::PrepareGvnFieldIds(
allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
- cu_.mir_graph->temp_.gvn.sfield_ids_ = GlobalValueNumbering::PrepareGvnFieldIds(
+ cu_.mir_graph->temp_.gvn.sfield_ids = GlobalValueNumbering::PrepareGvnFieldIds(
allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
GlobalValueNumbering::kModeLvn));
@@ -211,8 +211,14 @@
value_names_() {
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+ // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
+ // 0 constants are integral, not references. Nothing else is used by LVN/GVN.
+ cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
+ kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
}
+ static constexpr size_t kMaxSsaRegs = 16384u;
+
ArenaPool pool_;
CompilationUnit cu_;
size_t mir_count_;
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 1f56276..f9f7e22 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -910,11 +910,6 @@
DF_FORMAT_EXTENDED,
};
-/* Return the base virtual register for a SSA name */
-int MIRGraph::SRegToVReg(int ssa_reg) const {
- return ssa_base_vregs_[ssa_reg];
-}
-
/* Any register that is used before being defined is considered live-in */
void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
ArenaBitVector* live_in_v, int dalvik_reg_id) {
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index ff427f8..98b2da8 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -149,6 +149,7 @@
friend class NullCheckEliminationTest;
friend class GlobalValueNumberingTest;
+ friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
};
@@ -223,6 +224,7 @@
friend class ClassInitCheckEliminationTest;
friend class GlobalValueNumberingTest;
+ friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
};
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 9f98589..08ca1b2 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -2559,4 +2559,13 @@
return m_units_[m_unit_index]->GetCodeItem()->insns_;
}
+void MIRGraph::SetPuntToInterpreter(bool val) {
+ punt_to_interpreter_ = val;
+ if (val) {
+ // Disable all subsequent optimizations. They may not be safe to run. (For example,
+ // LVN/GVN assumes there are no conflicts found by the type inference pass.)
+ cu_->disable_opt = ~static_cast<decltype(cu_->disable_opt)>(0);
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index c33825b..020136c 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -37,6 +37,7 @@
class DexCompilationUnit;
class DexFileMethodInliner;
class GlobalValueNumbering;
+class GvnDeadCodeElimination;
// Forward declaration.
class MIRGraph;
@@ -1052,7 +1053,12 @@
void DumpCheckStats();
MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
- int SRegToVReg(int ssa_reg) const;
+
+ /* Return the base virtual register for a SSA name */
+ int SRegToVReg(int ssa_reg) const {
+ return ssa_base_vregs_[ssa_reg];
+ }
+
void VerifyDataflow();
void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
bool EliminateNullChecksGate();
@@ -1065,6 +1071,9 @@
bool ApplyGlobalValueNumberingGate();
bool ApplyGlobalValueNumbering(BasicBlock* bb);
void ApplyGlobalValueNumberingEnd();
+ bool EliminateDeadCodeGate();
+ bool EliminateDeadCode(BasicBlock* bb);
+ void EliminateDeadCodeEnd();
bool EliminateSuspendChecksGate();
bool EliminateSuspendChecks(BasicBlock* bb);
void EliminateSuspendChecksEnd();
@@ -1072,15 +1081,15 @@
uint16_t GetGvnIFieldId(MIR* mir) const {
DCHECK(IsInstructionIGetOrIPut(mir->dalvikInsn.opcode));
DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.size());
- DCHECK(temp_.gvn.ifield_ids_ != nullptr);
- return temp_.gvn.ifield_ids_[mir->meta.ifield_lowering_info];
+ DCHECK(temp_.gvn.ifield_ids != nullptr);
+ return temp_.gvn.ifield_ids[mir->meta.ifield_lowering_info];
}
uint16_t GetGvnSFieldId(MIR* mir) const {
DCHECK(IsInstructionSGetOrSPut(mir->dalvikInsn.opcode));
DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.size());
- DCHECK(temp_.gvn.sfield_ids_ != nullptr);
- return temp_.gvn.sfield_ids_[mir->meta.sfield_lowering_info];
+ DCHECK(temp_.gvn.sfield_ids != nullptr);
+ return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info];
}
/*
@@ -1115,9 +1124,7 @@
return punt_to_interpreter_;
}
- void SetPuntToInterpreter(bool val) {
- punt_to_interpreter_ = val;
- }
+ void SetPuntToInterpreter(bool val);
void DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir);
char* GetDalvikDisassembly(const MIR* mir);
@@ -1284,7 +1291,8 @@
* @param mir The mir to check.
* @return Returns 'true' if the given MIR might throw an exception.
*/
- bool CanThrow(MIR* mir);
+ bool CanThrow(MIR* mir) const;
+
/**
* @brief Combine multiply and add/sub MIRs into corresponding extended MAC MIR.
* @param mul_mir The multiply MIR to be combined.
@@ -1382,8 +1390,9 @@
// Global value numbering.
struct {
GlobalValueNumbering* gvn;
- uint16_t* ifield_ids_; // Part of GVN/LVN but cached here for LVN to avoid recalculation.
- uint16_t* sfield_ids_; // Ditto.
+ uint16_t* ifield_ids; // Part of GVN/LVN but cached here for LVN to avoid recalculation.
+ uint16_t* sfield_ids; // Ditto.
+ GvnDeadCodeElimination* dce;
} gvn;
// Suspend check elimination.
struct {
@@ -1437,6 +1446,7 @@
friend class SuspendCheckEliminationTest;
friend class NullCheckEliminationTest;
friend class GlobalValueNumberingTest;
+ friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
friend class TopologicalSortOrderTest;
};
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index dac0210..2f547ea 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -21,6 +21,7 @@
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "global_value_numbering.h"
+#include "gvn_dead_code_elimination.h"
#include "local_value_numbering.h"
#include "mir_field_info.h"
#include "quick/dex_file_method_inliner.h"
@@ -1333,9 +1334,9 @@
DCHECK(temp_scoped_alloc_ == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_.gvn.ifield_ids_ =
+ temp_.gvn.ifield_ids =
GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
- temp_.gvn.sfield_ids_ =
+ temp_.gvn.sfield_ids =
GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
DCHECK(temp_.gvn.gvn == nullptr);
temp_.gvn.gvn = new (temp_scoped_alloc_.get()) GlobalValueNumbering(
@@ -1359,8 +1360,8 @@
// Perform modifications.
DCHECK(temp_.gvn.gvn != nullptr);
if (temp_.gvn.gvn->Good()) {
+ temp_.gvn.gvn->StartPostProcessing();
if (max_nested_loops_ != 0u) {
- temp_.gvn.gvn->StartPostProcessing();
TopologicalSortIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
ScopedArenaAllocator allocator(&cu_->arena_stack); // Reclaim memory after each LVN.
@@ -1378,12 +1379,45 @@
cu_->disable_opt |= (1u << kLocalValueNumbering);
} else {
LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+ cu_->disable_opt |= (1u << kGvnDeadCodeElimination);
}
+ if ((cu_->disable_opt & (1 << kGvnDeadCodeElimination)) != 0) {
+ EliminateDeadCodeEnd();
+ } // else preserve GVN data for CSE.
+}
+
+bool MIRGraph::EliminateDeadCodeGate() {
+ if ((cu_->disable_opt & (1 << kGvnDeadCodeElimination)) != 0) {
+ return false;
+ }
+ DCHECK(temp_scoped_alloc_ != nullptr);
+ temp_.gvn.dce = new (temp_scoped_alloc_.get()) GvnDeadCodeElimination(temp_.gvn.gvn,
+ temp_scoped_alloc_.get());
+ return true;
+}
+
+bool MIRGraph::EliminateDeadCode(BasicBlock* bb) {
+ DCHECK(temp_scoped_alloc_ != nullptr);
+ DCHECK(temp_.gvn.gvn != nullptr);
+ if (bb->block_type != kDalvikByteCode) {
+ return false;
+ }
+ DCHECK(temp_.gvn.dce != nullptr);
+ temp_.gvn.dce->Apply(bb);
+ return false; // No need to repeat.
+}
+
+void MIRGraph::EliminateDeadCodeEnd() {
+ DCHECK_EQ(temp_.gvn.dce != nullptr, (cu_->disable_opt & (1 << kGvnDeadCodeElimination)) == 0);
+ if (temp_.gvn.dce != nullptr) {
+ delete temp_.gvn.dce;
+ temp_.gvn.dce = nullptr;
+ }
delete temp_.gvn.gvn;
temp_.gvn.gvn = nullptr;
- temp_.gvn.ifield_ids_ = nullptr;
- temp_.gvn.sfield_ids_ = nullptr;
+ temp_.gvn.ifield_ids = nullptr;
+ temp_.gvn.sfield_ids = nullptr;
DCHECK(temp_scoped_alloc_ != nullptr);
temp_scoped_alloc_.reset();
}
@@ -1553,9 +1587,9 @@
void MIRGraph::BasicBlockOptimizationStart() {
if ((cu_->disable_opt & (1 << kLocalValueNumbering)) == 0) {
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_.gvn.ifield_ids_ =
+ temp_.gvn.ifield_ids =
GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
- temp_.gvn.sfield_ids_ =
+ temp_.gvn.sfield_ids =
GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
}
}
@@ -1581,8 +1615,8 @@
void MIRGraph::BasicBlockOptimizationEnd() {
// Clean up after LVN.
- temp_.gvn.ifield_ids_ = nullptr;
- temp_.gvn.sfield_ids_ = nullptr;
+ temp_.gvn.ifield_ids = nullptr;
+ temp_.gvn.sfield_ids = nullptr;
temp_scoped_alloc_.reset();
}
@@ -1684,7 +1718,7 @@
temp_.sce.inliner = nullptr;
}
-bool MIRGraph::CanThrow(MIR* mir) {
+bool MIRGraph::CanThrow(MIR* mir) const {
if ((mir->dalvikInsn.FlagsOf() & Instruction::kThrow) == 0) {
return false;
}
@@ -1718,7 +1752,6 @@
// Non-throwing only if range check has been eliminated.
return ((opt_flags & MIR_IGNORE_RANGE_CHECK) == 0);
} else if (mir->dalvikInsn.opcode == Instruction::ARRAY_LENGTH ||
- mir->dalvikInsn.opcode == Instruction::FILL_ARRAY_DATA ||
static_cast<int>(mir->dalvikInsn.opcode) == kMirOpNullCheck) {
// No more checks for these (null check was processed above).
return false;
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index 8c8bde6..320d06a 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -45,6 +45,7 @@
pass_manager->AddPass(new BBCombine);
pass_manager->AddPass(new CodeLayout);
pass_manager->AddPass(new GlobalValueNumberingPass);
+ pass_manager->AddPass(new DeadCodeEliminationPass);
pass_manager->AddPass(new ConstantPropagation);
pass_manager->AddPass(new MethodUseCount);
pass_manager->AddPass(new BBOptimizations);
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 909077e..f399429 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -560,6 +560,7 @@
// (1 << kNullCheckElimination) |
// (1 << kClassInitCheckElimination) |
// (1 << kGlobalValueNumbering) |
+ // (1 << kGvnDeadCodeElimination) |
// (1 << kLocalValueNumbering) |
// (1 << kPromoteRegs) |
// (1 << kTrackLiveTemps) |
diff --git a/compiler/utils/dex_instruction_utils.h b/compiler/utils/dex_instruction_utils.h
index 2c6e525..bb2c592 100644
--- a/compiler/utils/dex_instruction_utils.h
+++ b/compiler/utils/dex_instruction_utils.h
@@ -110,6 +110,10 @@
return Instruction::AGET <= code && code <= Instruction::APUT_SHORT;
}
+constexpr bool IsInstructionBinOp2Addr(Instruction::Code code) {
+ return Instruction::ADD_INT_2ADDR <= code && code <= Instruction::REM_DOUBLE_2ADDR;
+}
+
// TODO: Remove the #if guards below when we fully migrate to C++14.
constexpr bool IsInvokeInstructionRange(Instruction::Code opcode) {