Add support for vex coding scheme in x86 assembler
This patch adds support to emit VEX prefix which is needed
to emit instructions namely andn, blsmsk, blsr, blsi
on a cpu that has AVX2.
Test: ./test.py --host --64, test-art-host-gtest
Change-Id: I6b4902caf8560e4406c5053b142686ed28ba5404
Signed-off-by: Shalini Salomi Bodapati <shalini.salomi.bodapati@intel.com>
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index e696635..ff13ea3 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -543,6 +543,7 @@
void andps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void pand(XmmRegister dst, XmmRegister src);
+ void andn(CpuRegister dst, CpuRegister src1, CpuRegister src2);
void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void andnps(XmmRegister dst, XmmRegister src);
void pandn(XmmRegister dst, XmmRegister src);
@@ -796,6 +797,10 @@
void bsfq(CpuRegister dst, CpuRegister src);
void bsfq(CpuRegister dst, const Address& src);
+ void blsi(CpuRegister dst, CpuRegister src); // no addr variant (for now)
+ void blsmsk(CpuRegister dst, CpuRegister src); // no addr variant (for now)
+ void blsr(CpuRegister dst, CpuRegister src); // no addr variant (for now)
+
void bsrl(CpuRegister dst, CpuRegister src);
void bsrl(CpuRegister dst, const Address& src);
void bsrq(CpuRegister dst, CpuRegister src);
@@ -951,6 +956,11 @@
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand& operand);
+ // Emit a 3 byte VEX Prefix
+ uint8_t EmitVexByteZero(bool is_two_byte);
+ uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
+ uint8_t EmitVexByte2(bool w , int l , X86_64ManagedRegister operand, int pp);
+
ConstantArea constant_area_;
DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);