#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
+#define AlignMask ((u64)7 << 41)
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
-#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
-#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
+#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
+#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
+#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
#define NearBranch ((u64)1 << 52) /* Near branches */
#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
-#define Aligned16 ((u64)1 << 55) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
*/
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
{
+ u64 alignment = ctxt->d & AlignMask;
+
if (likely(size < 16))
return 1;
- if (ctxt->d & Aligned)
- return size;
- else if (ctxt->d & Unaligned)
- return 1;
- else if (ctxt->d & Avx)
+ switch (alignment) {
+ case Unaligned:
+ case Avx:
return 1;
- else if (ctxt->d & Aligned16)
+ case Aligned16:
return 16;
- else
+ case Aligned:
+ default:
return size;
+ }
}
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,