summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/mir_graph.cc80
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h1
-rw-r--r--compiler/dex/quick/arm64/fp_arm64.cc6
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc26
-rw-r--r--compiler/dex/quick/mir_to_lir.h2
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc24
-rw-r--r--compiler/utils/arm64/assembler_arm64.h28
7 files changed, 83 insertions, 84 deletions
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index edfd57fd8a..574b6ea66f 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -274,7 +274,7 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
*/
BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
BasicBlock** immed_pred_block_p) {
- if (code_offset >= cu_->code_item->insns_size_in_code_units_) {
+ if (code_offset >= current_code_item_->insns_size_in_code_units_) {
return NULL;
}
@@ -348,10 +348,10 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
// (We don't want to ignore all monitor-exit catches since one could enclose a synchronized
// block in a try-block and catch the NPE, Error or Throwable and we should let it through;
// even though a throwing monitor-exit certainly indicates a bytecode error.)
- const Instruction* monitor_exit = Instruction::At(cu_->code_item->insns_ + monitor_exit_offset);
+ const Instruction* monitor_exit = Instruction::At(current_code_item_->insns_ + monitor_exit_offset);
DCHECK(monitor_exit->Opcode() == Instruction::MONITOR_EXIT);
int monitor_reg = monitor_exit->VRegA_11x();
- const Instruction* check_insn = Instruction::At(cu_->code_item->insns_ + catch_offset);
+ const Instruction* check_insn = Instruction::At(current_code_item_->insns_ + catch_offset);
DCHECK(check_insn->Opcode() == Instruction::MOVE_EXCEPTION);
if (check_insn->VRegA_11x() == monitor_reg) {
// Unexpected move-exception to the same register. Probably not the pattern we're looking for.
@@ -1228,8 +1228,6 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
bool nop = false;
SSARepresentation* ssa_rep = mir->ssa_rep;
Instruction::Format dalvik_format = Instruction::k10x; // Default to no-operand format.
- int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
- int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
// Handle special cases.
if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
@@ -1238,8 +1236,6 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
// Recover the original Dex instruction.
insn = mir->meta.throw_insn->dalvikInsn;
ssa_rep = mir->meta.throw_insn->ssa_rep;
- defs = ssa_rep->num_defs;
- uses = ssa_rep->num_uses;
opcode = insn.opcode;
} else if (opcode == kMirOpNop) {
str.append("[");
@@ -1248,6 +1244,8 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
opcode = insn.opcode;
nop = true;
}
+ int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
+ int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
@@ -1259,40 +1257,21 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
if (opcode == kMirOpPhi) {
BasicBlockId* incoming = mir->meta.phi_incoming;
- str.append(StringPrintf(" %s = (%s",
- GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
- GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
- str.append(StringPrintf(":%d", incoming[0]));
- int i;
- for (i = 1; i < uses; i++) {
- str.append(StringPrintf(", %s:%d",
- GetSSANameWithConst(ssa_rep->uses[i], true).c_str(),
- incoming[i]));
- }
- str.append(")");
- } else if ((flags & Instruction::kBranch) != 0) {
- // For branches, decode the instructions to print out the branch targets.
- int offset = 0;
- switch (dalvik_format) {
- case Instruction::k21t:
- str.append(StringPrintf(" %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
- offset = insn.vB;
- break;
- case Instruction::k22t:
- str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str(),
- GetSSANameWithConst(ssa_rep->uses[1], false).c_str()));
- offset = insn.vC;
- break;
- case Instruction::k10t:
- case Instruction::k20t:
- case Instruction::k30t:
- offset = insn.vA;
- break;
- default:
- LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
+ if (defs > 0 && uses > 0) {
+ str.append(StringPrintf(" %s = (%s",
+ GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
+ GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
+ str.append(StringPrintf(":%d", incoming[0]));
+ int i;
+ for (i = 1; i < uses; i++) {
+ str.append(StringPrintf(", %s:%d",
+ GetSSANameWithConst(ssa_rep->uses[i], true).c_str(),
+ incoming[i]));
+ }
+ str.append(")");
+ } else {
+ str.append(StringPrintf(" v%d", mir->dalvikInsn.vA));
}
- str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
- offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
} else {
// For invokes-style formats, treat wide regs as a pair of singles.
bool show_singles = ((dalvik_format == Instruction::k35c) ||
@@ -1339,6 +1318,27 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
// Nothing left to print.
}
}
+ if ((flags & Instruction::kBranch) != 0) {
+ // For branches, decode the instructions to print out the branch targets.
+ int offset = 0;
+ switch (dalvik_format) {
+ case Instruction::k21t:
+ offset = insn.vB;
+ break;
+ case Instruction::k22t:
+ offset = insn.vC;
+ break;
+ case Instruction::k10t:
+ case Instruction::k20t:
+ case Instruction::k30t:
+ offset = insn.vA;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
+ }
+ str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
+ offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
+ }
}
if (nop) {
str.append("]--optimized away");
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 3e1c18baf4..be10dd702e 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -167,6 +167,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
bool GenInlinedRound(CallInfo* info, bool is_double) OVERRIDE;
bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
+ bool GenInlinedAbsInt(CallInfo* info) OVERRIDE;
bool GenInlinedAbsLong(CallInfo* info) OVERRIDE;
bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index d0b2636453..5d63dd0ee2 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -353,7 +353,8 @@ bool Arm64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
if (reg_class == kFPReg) {
NewLIR2(kA64Fabs2ff, rl_result.reg.GetReg(), rl_src.reg.GetReg());
} else {
- NewLIR4(kA64Ubfm4rrdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 30);
+ // Clear the sign bit in an integer register.
+ OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
}
StoreValue(rl_dest, rl_result);
return true;
@@ -371,7 +372,8 @@ bool Arm64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
if (reg_class == kFPReg) {
NewLIR2(FWIDE(kA64Fabs2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
} else {
- NewLIR4(WIDE(kA64Ubfm4rrdd), rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 62);
+ // Clear the sign bit in an integer register.
+ OpRegRegImm64(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffffffffffff);
}
StoreValueWide(rl_dest, rl_result);
return true;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index d1b9c81d09..e4a895e043 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -646,16 +646,32 @@ RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegS
return rl_result;
}
+bool Arm64Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
+ RegLocation rl_src = info->args[0];
+ rl_src = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_dest = InlineTarget(info);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+
+ // Compare the source value with zero. Write the negated value to the result if
+ // negative, otherwise write the original value.
+ OpRegImm(kOpCmp, rl_src.reg, 0);
+ NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
+ kArmCondPl);
+ StoreValue(rl_dest, rl_result);
+ return true;
+}
+
bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
RegLocation rl_src = info->args[0];
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_dest = InlineTargetWide(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- RegStorage sign_reg = AllocTempWide();
- // abs(x) = y<=x>>63, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
- OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
- OpRegReg(kOpXor, rl_result.reg, sign_reg);
+
+ // Compare the source value with zero. Write the negated value to the result if
+ // negative, otherwise write the original value.
+ OpRegImm(kOpCmp, rl_src.reg, 0);
+ NewLIR4(WIDE(kA64Csneg4rrrc), rl_result.reg.GetReg(), rl_src.reg.GetReg(),
+ rl_src.reg.GetReg(), kArmCondPl);
StoreValueWide(rl_dest, rl_result);
return true;
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 573bd9143d..8e737280e0 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -964,7 +964,7 @@ class Mir2Lir : public Backend {
bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
- bool GenInlinedAbsInt(CallInfo* info);
+ virtual bool GenInlinedAbsInt(CallInfo* info);
virtual bool GenInlinedAbsLong(CallInfo* info);
virtual bool GenInlinedAbsFloat(CallInfo* info) = 0;
virtual bool GenInlinedAbsDouble(CallInfo* info) = 0;
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 3f90f21b66..3edf59be2a 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -21,6 +21,8 @@
#include "thread.h"
#include "utils.h"
+using namespace vixl; // NOLINT(build/namespaces)
+
namespace art {
namespace arm64 {
@@ -75,7 +77,7 @@ void Arm64Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
Condition cond) {
- if ((cond == AL) || (cond == NV)) {
+ if ((cond == al) || (cond == nv)) {
// VIXL macro-assembler handles all variants.
___ Add(reg_x(rd), reg_x(rn), value);
} else {
@@ -85,7 +87,7 @@ void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
temps.Exclude(reg_x(rd), reg_x(rn));
vixl::Register temp = temps.AcquireX();
___ Add(temp, reg_x(rn), value);
- ___ Csel(reg_x(rd), temp, reg_x(rd), COND_OP(cond));
+ ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
}
}
@@ -195,7 +197,7 @@ void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_sourc
// Load routines.
void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
Condition cond) {
- if ((cond == AL) || (cond == NV)) {
+ if ((cond == al) || (cond == nv)) {
___ Mov(reg_x(dest), value);
} else {
// temp = value
@@ -205,9 +207,9 @@ void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
temps.Exclude(reg_x(dest));
vixl::Register temp = temps.AcquireX();
___ Mov(temp, value);
- ___ Csel(reg_x(dest), temp, reg_x(dest), COND_OP(cond));
+ ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
} else {
- ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), COND_OP(cond));
+ ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
}
}
}
@@ -557,11 +559,11 @@ void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffs
}
___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ LoadImmediate(out_reg.AsCoreRegister(), 0, eq);
}
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), NE);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), ne);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), AL);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), al);
}
}
@@ -577,9 +579,9 @@ void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset han
// e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
// Move this logic in add constants with flags.
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), ne);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), al);
}
StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
@@ -593,7 +595,7 @@ void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
vixl::Label exit;
if (!out_reg.Equals(in_reg)) {
// FIXME: Who sets the flags here?
- LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ LoadImmediate(out_reg.AsCoreRegister(), 0, eq);
}
___ Cbz(reg_x(in_reg.AsCoreRegister()), &exit);
LoadFromOffset(out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0);
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index ab4999a2bc..788950b0b4 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -34,28 +34,6 @@ namespace art {
namespace arm64 {
#define MEM_OP(x...) vixl::MemOperand(x)
-#define COND_OP(x) static_cast<vixl::Condition>(x)
-
-enum Condition {
- kNoCondition = -1,
- EQ = 0,
- NE = 1,
- HS = 2,
- LO = 3,
- MI = 4,
- PL = 5,
- VS = 6,
- VC = 7,
- HI = 8,
- LS = 9,
- GE = 10,
- LT = 11,
- GT = 12,
- LE = 13,
- AL = 14, // Always.
- NV = 15, // Behaves as always/al.
- kMaxCondition = 16,
-};
enum LoadOperandType {
kLoadSignedByte,
@@ -225,15 +203,15 @@ class Arm64Assembler FINAL : public Assembler {
void StoreSToOffset(SRegister source, Register base, int32_t offset);
void StoreDToOffset(DRegister source, Register base, int32_t offset);
- void LoadImmediate(Register dest, int32_t value, Condition cond = AL);
+ void LoadImmediate(Register dest, int32_t value, vixl::Condition cond = vixl::al);
void Load(Arm64ManagedRegister dst, Register src, int32_t src_offset, size_t size);
void LoadWFromOffset(LoadOperandType type, WRegister dest,
Register base, int32_t offset);
void LoadFromOffset(Register dest, Register base, int32_t offset);
void LoadSFromOffset(SRegister dest, Register base, int32_t offset);
void LoadDFromOffset(DRegister dest, Register base, int32_t offset);
- void AddConstant(Register rd, int32_t value, Condition cond = AL);
- void AddConstant(Register rd, Register rn, int32_t value, Condition cond = AL);
+ void AddConstant(Register rd, int32_t value, vixl::Condition cond = vixl::al);
+ void AddConstant(Register rd, Register rn, int32_t value, vixl::Condition cond = vixl::al);
// Vixl buffer.
byte* vixl_buf_;