summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/mir_graph.cc33
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc2
-rw-r--r--compiler/dex/quick/x86/quick_assemble_x86_test.cc7
-rw-r--r--compiler/optimizing/nodes.cc14
-rw-r--r--compiler/optimizing/nodes.h2
5 files changed, 52 insertions, 6 deletions
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index b5c42f11ac..9e3fbbc967 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -291,8 +291,12 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
BasicBlock** immed_pred_block_p,
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
- if (code_offset >= current_code_item_->insns_size_in_code_units_) {
- return nullptr;
+ if (UNLIKELY(code_offset >= current_code_item_->insns_size_in_code_units_)) {
+ // There can be a fall-through out of the method code. We shall record such a block
+ // here (assuming create==true) and check that it's dead at the end of InlineMethod().
+ // Though we're only aware of the cases where code_offset is exactly the same as
+ // insns_size_in_code_units_, treat greater code_offset the same just in case.
+ code_offset = current_code_item_->insns_size_in_code_units_;
}
int block_id = (*dex_pc_to_block_map)[code_offset];
@@ -483,6 +487,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
BasicBlock* taken_block = FindBlock(target, /* create */ true,
/* immed_pred_block_p */ &cur_block,
dex_pc_to_block_map);
+ DCHECK(taken_block != nullptr);
cur_block->taken = taken_block->id;
taken_block->predecessors.push_back(cur_block->id);
@@ -494,6 +499,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
/* immed_pred_block_p */
&cur_block,
dex_pc_to_block_map);
+ DCHECK(fallthrough_block != nullptr);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
} else if (code_ptr < code_end) {
@@ -508,7 +514,8 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
UNUSED(flags);
const uint16_t* switch_data =
- reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
+ reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset +
+ static_cast<int32_t>(insn->dalvikInsn.vB));
int size;
const int* keyTable;
const int* target_table;
@@ -561,6 +568,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true,
/* immed_pred_block_p */ &cur_block,
dex_pc_to_block_map);
+ DCHECK(case_block != nullptr);
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
@@ -576,6 +584,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true,
/* immed_pred_block_p */ nullptr,
dex_pc_to_block_map);
+ DCHECK(fallthrough_block != nullptr);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
return cur_block;
@@ -709,8 +718,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
// FindBlock lookup cache.
ScopedArenaAllocator allocator(&cu_->arena_stack);
ScopedArenaVector<uint16_t> dex_pc_to_block_map(allocator.Adapter());
- dex_pc_to_block_map.resize(dex_pc_to_block_map.size() +
- current_code_item_->insns_size_in_code_units_);
+ dex_pc_to_block_map.resize(current_code_item_->insns_size_in_code_units_ +
+ 1 /* Fall-through on last insn; dead or punt to interpreter. */);
// TODO: replace with explicit resize routine. Using automatic extension side effect for now.
try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
@@ -876,6 +885,20 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
if (cu_->verbose) {
DumpMIRGraph();
}
+
+ // Check if there's been a fall-through out of the method code.
+ BasicBlockId out_bb_id = dex_pc_to_block_map[current_code_item_->insns_size_in_code_units_];
+ if (UNLIKELY(out_bb_id != NullBasicBlockId)) {
+ // Eagerly calculate DFS order to determine if the block is dead.
+ DCHECK(!DfsOrdersUpToDate());
+ ComputeDFSOrders();
+ BasicBlock* out_bb = GetBasicBlock(out_bb_id);
+ DCHECK(out_bb != nullptr);
+ if (out_bb->block_type != kDead) {
+ LOG(WARNING) << "Live fall-through out of method in " << PrettyMethod(method_idx, dex_file);
+ SetPuntToInterpreter(true);
+ }
+ }
}
void MIRGraph::ShowOpcodeStats() {
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 934fa3509c..8467b718a1 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -428,7 +428,7 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
{ kX86PextrwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0xC5, 0x00, 0, 0, 1, false }, "PextwRRI", "!0r,!1r,!2d" },
{ kX86PextrdRRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextdRRI", "!0r,!1r,!2d" },
{ kX86PextrbMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrbMRI", "[!0r+!1d],!2r,!3d" },
- { kX86PextrwMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrwMRI", "[!0r+!1d],!2r,!3d" },
+ { kX86PextrwMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x15, 0, 0, 1, false }, "PextrwMRI", "[!0r+!1d],!2r,!3d" },
{ kX86PextrdMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrdMRI", "[!0r+!1d],!2r,!3d" },
{ kX86PshuflwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0xF2, 0, 0x0F, 0x70, 0, 0, 0, 1, false }, "PshuflwRRI", "!0r,!1r,!2d" },
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index 36339f72e4..f58f206af5 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -180,6 +180,13 @@ TEST_F(QuickAssembleX86LowLevelTest, Mulpd) {
RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
}
+TEST_F(QuickAssembleX86LowLevelTest, Pextrw) {
+ Test(kX86, "Pextrw", "pextrw $7, %xmm3, 8(%eax)\n", kX86PextrwMRI,
+ RegStorage::Solo32(r0).GetReg(), 8, RegStorage::Solo128(3).GetReg(), 7);
+ Test(kX86_64, "Pextrw", "pextrw $7, %xmm8, 8(%r10)\n", kX86PextrwMRI,
+ RegStorage::Solo64(r10q).GetReg(), 8, RegStorage::Solo128(8).GetReg(), 7);
+}
+
class QuickAssembleX86MacroTest : public QuickAssembleX86TestBase {
protected:
typedef void (X86Mir2Lir::*AsmFn)(MIR*);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index e2eb46aabb..699987c05e 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -449,6 +449,20 @@ void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstructio
instructions_.InsertInstructionBefore(instruction, cursor);
}
+void HBasicBlock::InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor) {
+ DCHECK(!cursor->IsPhi());
+ DCHECK(!instruction->IsPhi());
+ DCHECK_EQ(instruction->GetId(), -1);
+ DCHECK_NE(cursor->GetId(), -1);
+ DCHECK_EQ(cursor->GetBlock(), this);
+ DCHECK(!instruction->IsControlFlow());
+ DCHECK(!cursor->IsControlFlow());
+ instruction->SetBlock(this);
+ instruction->SetId(GetGraph()->GetNextInstructionId());
+ UpdateInputsUsers(instruction);
+ instructions_.InsertInstructionAfter(instruction, cursor);
+}
+
void HBasicBlock::InsertPhiAfter(HPhi* phi, HPhi* cursor) {
DCHECK_EQ(phi->GetId(), -1);
DCHECK_NE(cursor->GetId(), -1);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index f64086e607..3fe23e1816 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -620,7 +620,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
void DisconnectAndDelete();
void AddInstruction(HInstruction* instruction);
+ // Insert `instruction` before/after an existing instruction `cursor`.
void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
+ void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor);
// Replace instruction `initial` with `replacement` within this block.
void ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement);