Redo x86 int arithmetic
Make Mir2Lir::GenArithOpInt virtual, and implement an x86 version of it
to allow use of memory operands and knowledge of the fact that x86 has
(mostly) two operand instructions. Remove x86 specific code from the
generic version.
Add StoreFinalValue (matches StoreFinalValueWide) to handle the non-wide
cases. Add some x86 helper routines to simplify generation.
Change-Id: I6c13689c6da981f2570ab5af7a97f9816108b7ae
Signed-off-by: Mark Mendell <mark.p.mendell@intel.com>
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index c29d6c4..916bc7a 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -863,6 +863,20 @@
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
+void X86Mir2Lir::EmitShiftMemCl(const X86EncodingMap* entry, uint8_t base,
+ int displacement, uint8_t cl) {
+ DCHECK_EQ(cl, static_cast<uint8_t>(rCX));
+ EmitPrefix(entry);
+ code_buffer_.push_back(entry->skeleton.opcode);
+ DCHECK_NE(0x0F, entry->skeleton.opcode);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ DCHECK_LT(base, 8);
+ EmitModrmDisp(entry->skeleton.modrm_opcode, base, displacement);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
void X86Mir2Lir::EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition) {
if (entry->skeleton.prefix1 != 0) {
code_buffer_.push_back(entry->skeleton.prefix1);
@@ -1230,6 +1244,9 @@
case kShiftRegCl: // lir operands - 0: reg, 1: cl
EmitShiftRegCl(entry, lir->operands[0], lir->operands[1]);
break;
+ case kShiftMemCl: // lir operands - 0: base, 1:displacement, 2: cl
+ EmitShiftMemCl(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+ break;
case kRegCond: // lir operands - 0: reg, 1: condition
EmitRegCond(entry, lir->operands[0], lir->operands[1]);
break;