1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2012 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * disasm.c where all the _work_ gets done in the Netwide Disassembler
54 * Flags that go into the `segment' field of `insn' structures
57 #define SEG_RELATIVE 1
64 #define SEG_SIGNED 128
71 uint8_t osize; /* Operand size */
72 uint8_t asize; /* Address size */
73 uint8_t osp; /* Operand size prefix present */
74 uint8_t asp; /* Address size prefix present */
75 uint8_t rep; /* Rep prefix present */
76 uint8_t seg; /* Segment override prefix present */
77 uint8_t wait; /* WAIT "prefix" present */
78 uint8_t lock; /* Lock prefix present */
79 uint8_t vex[3]; /* VEX prefix present */
80 uint8_t vex_c; /* VEX "class" (VEX, XOP, ...) */
81 uint8_t vex_m; /* VEX.M field */
83 uint8_t vex_lp; /* VEX.LP fields */
84 uint32_t rex; /* REX prefix present */
85 uint8_t evex[3]; /* EVEX prefix present */
88 #define getu8(x) (*(uint8_t *)(x))
90 /* Littleendian CPU which can handle unaligned references */
91 #define getu16(x) (*(uint16_t *)(x))
92 #define getu32(x) (*(uint32_t *)(x))
93 #define getu64(x) (*(uint64_t *)(x))
95 static uint16_t getu16(uint8_t *data)
97 return (uint16_t)data[0] + ((uint16_t)data[1] << 8);
99 static uint32_t getu32(uint8_t *data)
101 return (uint32_t)getu16(data) + ((uint32_t)getu16(data+2) << 16);
103 static uint64_t getu64(uint8_t *data)
105 return (uint64_t)getu32(data) + ((uint64_t)getu32(data+4) << 32);
109 #define gets8(x) ((int8_t)getu8(x))
110 #define gets16(x) ((int16_t)getu16(x))
111 #define gets32(x) ((int32_t)getu32(x))
112 #define gets64(x) ((int64_t)getu64(x))
114 /* Important: regval must already have been adjusted for rex extensions */
115 static enum reg_enum whichreg(opflags_t regflags, int regval, int rex)
119 static const struct {
122 } specific_registers[] = {
148 if (!(regflags & (REGISTER|REGMEM)))
149 return 0; /* Registers not permissible?! */
151 regflags |= REGISTER;
153 for (i = 0; i < ARRAY_SIZE(specific_registers); i++)
154 if (!(specific_registers[i].flags & ~regflags))
155 return specific_registers[i].reg;
157 /* All the entries below look up regval in an 16-entry array */
158 if (regval < 0 || regval > (rex & REX_EV ? 31 : 15))
161 if (!(REG8 & ~regflags)) {
162 if (rex & (REX_P|REX_NH))
163 return nasm_rd_reg8_rex[regval];
165 return nasm_rd_reg8[regval];
167 if (!(REG16 & ~regflags))
168 return nasm_rd_reg16[regval];
169 if (!(REG32 & ~regflags))
170 return nasm_rd_reg32[regval];
171 if (!(REG64 & ~regflags))
172 return nasm_rd_reg64[regval];
173 if (!(REG_SREG & ~regflags))
174 return nasm_rd_sreg[regval & 7]; /* Ignore REX */
175 if (!(REG_CREG & ~regflags))
176 return nasm_rd_creg[regval];
177 if (!(REG_DREG & ~regflags))
178 return nasm_rd_dreg[regval];
179 if (!(REG_TREG & ~regflags)) {
181 return 0; /* TR registers are ill-defined with rex */
182 return nasm_rd_treg[regval];
184 if (!(FPUREG & ~regflags))
185 return nasm_rd_fpureg[regval & 7]; /* Ignore REX */
186 if (!(MMXREG & ~regflags))
187 return nasm_rd_mmxreg[regval & 7]; /* Ignore REX */
188 if (!(XMMREG & ~regflags))
189 return nasm_rd_xmmreg[regval];
190 if (!(YMMREG & ~regflags))
191 return nasm_rd_ymmreg[regval];
192 if (!(ZMMREG & ~regflags))
193 return nasm_rd_zmmreg[regval];
194 if (!(OPMASKREG & ~regflags))
195 return nasm_rd_opmaskreg[regval];
196 if (!(BNDREG & ~regflags))
197 return nasm_rd_bndreg[regval];
202 static uint32_t append_evex_reg_deco(char *buf, uint32_t num,
203 decoflags_t deco, uint8_t *evex)
205 const char * const er_names[] = {"rn-sae", "rd-sae", "ru-sae", "rz-sae"};
206 uint32_t num_chars = 0;
208 if ((deco & MASK) && (evex[2] & EVEX_P2AAA)) {
209 enum reg_enum opmasknum = nasm_rd_opmaskreg[evex[2] & EVEX_P2AAA];
210 const char * regname = nasm_reg_names[opmasknum - EXPR_REG_START];
212 num_chars += snprintf(buf + num_chars, num - num_chars,
215 if ((deco & Z) && (evex[2] & EVEX_P2Z)) {
216 num_chars += snprintf(buf + num_chars, num - num_chars,
221 if (evex[2] & EVEX_P2B) {
223 uint8_t er_type = (evex[2] & EVEX_P2LL) >> 5;
224 num_chars += snprintf(buf + num_chars, num - num_chars,
225 ",{%s}", er_names[er_type]);
226 } else if (deco & SAE) {
227 num_chars += snprintf(buf + num_chars, num - num_chars,
235 static uint32_t append_evex_mem_deco(char *buf, uint32_t num, opflags_t type,
236 decoflags_t deco, uint8_t *evex)
238 uint32_t num_chars = 0;
240 if ((evex[2] & EVEX_P2B) && (deco & BRDCAST_MASK)) {
241 decoflags_t deco_brsize = deco & BRSIZE_MASK;
242 opflags_t template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
243 uint8_t br_num = (type & SIZE_MASK) / BITS128 *
244 BITS64 / template_opsize * 2;
246 num_chars += snprintf(buf + num_chars, num - num_chars,
250 if ((deco & MASK) && (evex[2] & EVEX_P2AAA)) {
251 enum reg_enum opmasknum = nasm_rd_opmaskreg[evex[2] & EVEX_P2AAA];
252 const char * regname = nasm_reg_names[opmasknum - EXPR_REG_START];
254 num_chars += snprintf(buf + num_chars, num - num_chars,
257 if ((deco & Z) && (evex[2] & EVEX_P2Z)) {
258 num_chars += snprintf(buf + num_chars, num - num_chars,
268 * Process an effective address (ModRM) specification.
270 static uint8_t *do_ea(uint8_t *data, int modrm, int asize,
271 int segsize, enum ea_type type,
272 operand *op, insn *ins)
274 int mod, rm, scale, index, base;
278 bool is_evex = !!(ins->rex & REX_EV);
280 mod = (modrm >> 6) & 03;
283 if (mod != 3 && asize != 16 && rm == 4)
289 if (mod == 3) { /* pure register version */
290 op->basereg = rm+(rex & REX_B ? 8 : 0);
291 op->segment |= SEG_RMREG;
292 if (is_evex && segsize == 64) {
293 op->basereg += (evex[0] & EVEX_P0X ? 0 : 16);
303 * <mod> specifies the displacement size (none, byte or
304 * word), and <rm> specifies the register combination.
305 * Exception: mod=0,rm=6 does not specify [BP] as one might
306 * expect, but instead specifies [disp16].
309 if (type != EA_SCALAR)
312 op->indexreg = op->basereg = -1;
313 op->scale = 1; /* always, in 16 bits */
344 if (rm == 6 && mod == 0) { /* special case */
348 mod = 2; /* fake disp16 */
352 op->segment |= SEG_NODISP;
355 op->segment |= SEG_DISP8;
356 if (ins->evex_tuple != 0) {
357 op->offset = gets8(data) * get_disp8N(ins);
359 op->offset = gets8(data);
364 op->segment |= SEG_DISP16;
365 op->offset = *data++;
366 op->offset |= ((unsigned)*data++) << 8;
372 * Once again, <mod> specifies displacement size (this time
373 * none, byte or *dword*), while <rm> specifies the base
374 * register. Again, [EBP] is missing, replaced by a pure
375 * disp32 (this time that's mod=0,rm=*5*) in 32-bit mode,
376 * and RIP-relative addressing in 64-bit mode.
379 * indicates not a single base register, but instead the
380 * presence of a SIB byte...
382 int a64 = asize == 64;
387 op->basereg = nasm_rd_reg64[rm | ((rex & REX_B) ? 8 : 0)];
389 op->basereg = nasm_rd_reg32[rm | ((rex & REX_B) ? 8 : 0)];
391 if (rm == 5 && mod == 0) {
393 op->eaflags |= EAF_REL;
394 op->segment |= SEG_RELATIVE;
395 mod = 2; /* fake disp32 */
399 op->disp_size = asize;
402 mod = 2; /* fake disp32 */
406 if (rm == 4) { /* process SIB */
408 scale = (sib >> 6) & 03;
409 index = (sib >> 3) & 07;
412 op->scale = 1 << scale;
415 vsib_hi = (rex & REX_X ? 8 : 0) |
416 (evex[2] & EVEX_P2VP ? 0 : 16);
419 if (type == EA_XMMVSIB)
420 op->indexreg = nasm_rd_xmmreg[index | vsib_hi];
421 else if (type == EA_YMMVSIB)
422 op->indexreg = nasm_rd_ymmreg[index | vsib_hi];
423 else if (type == EA_ZMMVSIB)
424 op->indexreg = nasm_rd_zmmreg[index | vsib_hi];
425 else if (index == 4 && !(rex & REX_X))
426 op->indexreg = -1; /* ESP/RSP cannot be an index */
428 op->indexreg = nasm_rd_reg64[index | ((rex & REX_X) ? 8 : 0)];
430 op->indexreg = nasm_rd_reg32[index | ((rex & REX_X) ? 8 : 0)];
432 if (base == 5 && mod == 0) {
434 mod = 2; /* Fake disp32 */
436 op->basereg = nasm_rd_reg64[base | ((rex & REX_B) ? 8 : 0)];
438 op->basereg = nasm_rd_reg32[base | ((rex & REX_B) ? 8 : 0)];
442 } else if (type != EA_SCALAR) {
443 /* Can't have VSIB without SIB */
449 op->segment |= SEG_NODISP;
452 op->segment |= SEG_DISP8;
453 if (ins->evex_tuple != 0) {
454 op->offset = gets8(data) * get_disp8N(ins);
456 op->offset = gets8(data);
461 op->segment |= SEG_DISP32;
462 op->offset = gets32(data);
471 * Determine whether the instruction template in t corresponds to the data
472 * stream in data. Return the number of bytes matched if so.
474 #define case4(x) case (x): case (x)+1: case (x)+2: case (x)+3
476 static int matches(const struct itemplate *t, uint8_t *data,
477 const struct prefix_info *prefix, int segsize, insn *ins)
479 uint8_t *r = (uint8_t *)(t->code);
480 uint8_t *origdata = data;
481 bool a_used = false, o_used = false;
482 enum prefixes drep = 0;
483 enum prefixes dwait = 0;
484 uint8_t lock = prefix->lock;
485 int osize = prefix->osize;
486 int asize = prefix->asize;
489 struct operand *opx, *opy;
492 int regmask = (segsize == 64) ? 15 : 7;
493 enum ea_type eat = EA_SCALAR;
495 for (i = 0; i < MAX_OPERANDS; i++) {
496 ins->oprs[i].segment = ins->oprs[i].disp_size =
497 (segsize == 64 ? SEG_64BIT : segsize == 32 ? SEG_32BIT : 0);
501 ins->rex = prefix->rex;
502 memset(ins->prefixes, 0, sizeof ins->prefixes);
504 if (itemp_has(t, (segsize == 64 ? IF_NOLONG : IF_LONG)))
507 if (prefix->rep == 0xF2)
508 drep = (itemp_has(t, IF_BND) ? P_BND : P_REPNE);
509 else if (prefix->rep == 0xF3)
512 dwait = prefix->wait ? P_WAIT : 0;
514 while ((c = *r++) != 0) {
515 op1 = (c & 3) + ((opex & 1) << 2);
516 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
517 opx = &ins->oprs[op1];
518 opy = &ins->oprs[op2];
539 int t = *r++, d = *data++;
540 if (d < t || d > t + 7)
543 opx->basereg = (d-t)+
544 (ins->rex & REX_B ? 8 : 0);
545 opx->segment |= SEG_RMREG;
551 /* this is an separate index reg position of MIB operand (ICC) */
552 /* Disassembler uses NASM's split EA form only */
556 opx->offset = (int8_t)*data++;
557 opx->segment |= SEG_SIGNED;
561 opx->offset = *data++;
565 opx->offset = *data++;
569 opx->offset = getu16(data);
575 opx->offset = getu32(data);
578 opx->offset = getu16(data);
581 if (segsize != asize)
582 opx->disp_size = asize;
586 opx->offset = getu32(data);
591 opx->offset = gets32(data);
598 opx->offset = getu16(data);
604 opx->offset = getu32(data);
610 opx->offset = getu64(data);
618 opx->offset = gets8(data++);
619 opx->segment |= SEG_RELATIVE;
623 opx->offset = getu64(data);
628 opx->offset = gets16(data);
630 opx->segment |= SEG_RELATIVE;
631 opx->segment &= ~SEG_32BIT;
634 case4(064): /* rel */
635 opx->segment |= SEG_RELATIVE;
636 /* In long mode rel is always 32 bits, sign extended. */
637 if (segsize == 64 || osize == 32) {
638 opx->offset = gets32(data);
641 opx->segment |= SEG_32BIT;
642 opx->type = (opx->type & ~SIZE_MASK)
643 | (segsize == 64 ? BITS64 : BITS32);
645 opx->offset = gets16(data);
647 opx->segment &= ~SEG_32BIT;
648 opx->type = (opx->type & ~SIZE_MASK) | BITS16;
653 opx->offset = gets32(data);
655 opx->segment |= SEG_32BIT | SEG_RELATIVE;
664 opx->segment |= SEG_RMREG;
665 data = do_ea(data, modrm, asize, segsize, eat, opy, ins);
668 opx->basereg = ((modrm >> 3) & 7) + (ins->rex & REX_R ? 8 : 0);
669 if ((ins->rex & REX_EV) && (segsize == 64))
670 opx->basereg += (ins->evex_p[0] & EVEX_P0RP ? 0 : 16);
676 uint8_t ximm = *data++;
678 ins->oprs[c >> 3].basereg = (ximm >> 4) & regmask;
679 ins->oprs[c >> 3].segment |= SEG_RMREG;
680 ins->oprs[c & 7].offset = ximm & 15;
686 uint8_t ximm = *data++;
692 ins->oprs[c >> 4].basereg = (ximm >> 4) & regmask;
693 ins->oprs[c >> 4].segment |= SEG_RMREG;
699 uint8_t ximm = *data++;
701 opx->basereg = (ximm >> 4) & regmask;
702 opx->segment |= SEG_RMREG;
716 if (((modrm >> 3) & 07) != (c & 07))
717 return false; /* spare field doesn't match up */
718 data = do_ea(data, modrm, asize, segsize, eat, opy, ins);
727 uint8_t evexm = *r++;
728 uint8_t evexwlp = *r++;
729 ins->evex_tuple = *r++ - 0300;
732 if ((prefix->rex & (REX_EV|REX_V|REX_P)) != REX_EV)
735 if ((evexm & 0x1f) != prefix->vex_m)
738 switch (evexwlp & 060) {
740 if (prefix->rex & REX_W)
744 if (!(prefix->rex & REX_W))
748 case 040: /* VEX.W is a don't care */
755 /* If EVEX.b is set, EVEX.L'L can be rounding control bits */
756 if ((evexwlp ^ prefix->vex_lp) &
757 ((prefix->evex[2] & EVEX_P2B) ? 0x03 : 0x0f))
761 if ((prefix->vex_v != 0) ||
762 (!(prefix->evex[2] & EVEX_P2VP) &&
763 ((eat < EA_XMMVSIB) || (eat > EA_ZMMVSIB))))
766 opx->segment |= SEG_RMREG;
767 opx->basereg = ((~prefix->evex[2] & EVEX_P2VP) << (4 - 3) ) |
771 memcpy(ins->evex_p, prefix->evex, 3);
782 if ((prefix->rex & (REX_V|REX_P)) != REX_V)
785 if ((vexm & 0x1f) != prefix->vex_m)
788 switch (vexwlp & 060) {
790 if (prefix->rex & REX_W)
794 if (!(prefix->rex & REX_W))
798 case 040: /* VEX.W is a don't care */
805 /* The 010 bit of vexwlp is set if VEX.L is ignored */
806 if ((vexwlp ^ prefix->vex_lp) & ((vexwlp & 010) ? 03 : 07))
810 if (prefix->vex_v != 0)
813 opx->segment |= SEG_RMREG;
814 opx->basereg = prefix->vex_v;
821 if (prefix->rep == 0xF3)
826 if (prefix->rep == 0xF2)
828 else if (prefix->rep == 0xF3)
833 if (prefix->lock == 0xF0) {
834 if (prefix->rep == 0xF2)
836 else if (prefix->rep == 0xF3)
856 if (asize != segsize)
870 if (prefix->rex & REX_B)
875 if (prefix->rex & REX_X)
880 if (prefix->rex & REX_R)
885 if (prefix->rex & REX_W)
904 if (osize != (segsize == 16) ? 16 : 32)
911 ins->rex |= REX_W; /* 64-bit only instruction */
928 int t = *r++, d = *data++;
929 if (d < t || d > t + 15)
932 ins->condition = d - t;
937 if (prefix->rep == 0xF3)
947 if (prefix->rep != 0xF2)
953 if (prefix->rep != 0xF3)
978 if (prefix->wait != 0x9B)
984 if (prefix->osp || prefix->rep)
989 if (!prefix->osp || prefix->rep)
1033 return false; /* Unknown code */
1037 if (!vex_ok && (ins->rex & (REX_V | REX_EV)))
1040 /* REX cannot be combined with VEX */
1041 if ((ins->rex & REX_V) && (prefix->rex & REX_P))
1045 * Check for unused rep or a/o prefixes.
1047 for (i = 0; i < t->operands; i++) {
1048 if (ins->oprs[i].segment != SEG_RMREG)
1053 if (ins->prefixes[PPS_LOCK])
1055 ins->prefixes[PPS_LOCK] = P_LOCK;
1058 if (ins->prefixes[PPS_REP])
1060 ins->prefixes[PPS_REP] = drep;
1062 ins->prefixes[PPS_WAIT] = dwait;
1064 if (osize != ((segsize == 16) ? 16 : 32)) {
1065 enum prefixes pfx = 0;
1079 if (ins->prefixes[PPS_OSIZE])
1081 ins->prefixes[PPS_OSIZE] = pfx;
1084 if (!a_used && asize != segsize) {
1085 if (ins->prefixes[PPS_ASIZE])
1087 ins->prefixes[PPS_ASIZE] = asize == 16 ? P_A16 : P_A32;
1090 /* Fix: check for redundant REX prefixes */
1092 return data - origdata;
1095 /* Condition names for disassembly, sorted by x86 code */
1096 static const char * const condition_name[16] = {
1097 "o", "no", "c", "nc", "z", "nz", "na", "a",
1098 "s", "ns", "pe", "po", "l", "nl", "ng", "g"
1101 int32_t disasm(uint8_t *data, char *output, int outbufsize, int segsize,
1102 int32_t offset, int autosync, iflag_t *prefer)
1104 const struct itemplate * const *p, * const *best_p;
1105 const struct disasm_index *ix;
1107 int length, best_length = 0;
1109 int i, slen, colon, n;
1113 iflag_t goodness, best;
1115 struct prefix_info prefix;
1119 memset(&ins, 0, sizeof ins);
1122 * Scan for prefixes.
1124 memset(&prefix, 0, sizeof prefix);
1125 prefix.asize = segsize;
1126 prefix.osize = (segsize == 64) ? 32 : segsize;
1133 while (!end_prefix) {
1137 prefix.rep = *data++;
1141 prefix.wait = *data++;
1145 prefix.lock = *data++;
1149 segover = "cs", prefix.seg = *data++;
1152 segover = "ss", prefix.seg = *data++;
1155 segover = "ds", prefix.seg = *data++;
1158 segover = "es", prefix.seg = *data++;
1161 segover = "fs", prefix.seg = *data++;
1164 segover = "gs", prefix.seg = *data++;
1168 prefix.osize = (segsize == 16) ? 32 : 16;
1169 prefix.osp = *data++;
1172 prefix.asize = (segsize == 32) ? 16 : 32;
1173 prefix.asp = *data++;
1178 if (segsize == 64 || (data[1] & 0xc0) == 0xc0) {
1179 prefix.vex[0] = *data++;
1180 prefix.vex[1] = *data++;
1183 prefix.vex_c = RV_VEX;
1185 if (prefix.vex[0] == 0xc4) {
1186 prefix.vex[2] = *data++;
1187 prefix.rex |= (~prefix.vex[1] >> 5) & 7; /* REX_RXB */
1188 prefix.rex |= (prefix.vex[2] >> (7-3)) & REX_W;
1189 prefix.vex_m = prefix.vex[1] & 0x1f;
1190 prefix.vex_v = (~prefix.vex[2] >> 3) & 15;
1191 prefix.vex_lp = prefix.vex[2] & 7;
1193 prefix.rex |= (~prefix.vex[1] >> (7-2)) & REX_R;
1195 prefix.vex_v = (~prefix.vex[1] >> 3) & 15;
1196 prefix.vex_lp = prefix.vex[1] & 7;
1199 ix = itable_vex[RV_VEX][prefix.vex_m][prefix.vex_lp & 3];
1206 uint8_t evex_p0 = data[1] & 0x0f;
1207 if (segsize == 64 ||
1208 ((evex_p0 >= 0x01) && (evex_p0 <= 0x03))) {
1209 data++; /* 62h EVEX prefix */
1210 prefix.evex[0] = *data++;
1211 prefix.evex[1] = *data++;
1212 prefix.evex[2] = *data++;
1214 prefix.rex = REX_EV;
1215 prefix.vex_c = RV_EVEX;
1216 prefix.rex |= (~prefix.evex[0] >> 5) & 7; /* REX_RXB */
1217 prefix.rex |= (prefix.evex[1] >> (7-3)) & REX_W;
1218 prefix.vex_m = prefix.evex[0] & EVEX_P0MM;
1219 prefix.vex_v = (~prefix.evex[1] & EVEX_P1VVVV) >> 3;
1220 prefix.vex_lp = ((prefix.evex[2] & EVEX_P2LL) >> (5-2)) |
1221 (prefix.evex[1] & EVEX_P1PP);
1223 ix = itable_vex[prefix.vex_c][prefix.vex_m][prefix.vex_lp & 3];
1230 if ((data[1] & 030) != 0 &&
1231 (segsize == 64 || (data[1] & 0xc0) == 0xc0)) {
1232 prefix.vex[0] = *data++;
1233 prefix.vex[1] = *data++;
1234 prefix.vex[2] = *data++;
1237 prefix.vex_c = RV_XOP;
1239 prefix.rex |= (~prefix.vex[1] >> 5) & 7; /* REX_RXB */
1240 prefix.rex |= (prefix.vex[2] >> (7-3)) & REX_W;
1241 prefix.vex_m = prefix.vex[1] & 0x1f;
1242 prefix.vex_v = (~prefix.vex[2] >> 3) & 15;
1243 prefix.vex_lp = prefix.vex[2] & 7;
1245 ix = itable_vex[RV_XOP][prefix.vex_m][prefix.vex_lp & 3];
1266 if (segsize == 64) {
1267 prefix.rex = *data++;
1268 if (prefix.rex & REX_W)
1280 iflag_set_all(&best); /* Worst possible */
1282 best_pref = INT_MAX;
1285 return 0; /* No instruction table at all... */
1289 while (ix->n == -1) {
1290 ix = (const struct disasm_index *)ix->p + *dp++;
1293 p = (const struct itemplate * const *)ix->p;
1294 for (n = ix->n; n; n--, p++) {
1295 if ((length = matches(*p, data, &prefix, segsize, &tmp_ins))) {
1298 * Final check to make sure the types of r/m match up.
1299 * XXX: Need to make sure this is actually correct.
1301 for (i = 0; i < (*p)->operands; i++) {
1303 /* If it's a mem-only EA but we have a
1305 ((tmp_ins.oprs[i].segment & SEG_RMREG) &&
1306 is_class(MEMORY, (*p)->opd[i])) ||
1307 /* If it's a reg-only EA but we have a memory
1309 (!(tmp_ins.oprs[i].segment & SEG_RMREG) &&
1310 !(REG_EA & ~(*p)->opd[i]) &&
1311 !((*p)->opd[i] & REG_SMASK)) ||
1312 /* Register type mismatch (eg FS vs REG_DESS):
1314 ((((*p)->opd[i] & (REGISTER | FPUREG)) ||
1315 (tmp_ins.oprs[i].segment & SEG_RMREG)) &&
1316 !whichreg((*p)->opd[i],
1317 tmp_ins.oprs[i].basereg, tmp_ins.rex))
1325 * Note: we always prefer instructions which incorporate
1326 * prefixes in the instructions themselves. This is to allow
1327 * e.g. PAUSE to be preferred to REP NOP, and deal with
1328 * MMX/SSE instructions where prefixes are used to select
1329 * between MMX and SSE register sets or outright opcode
1334 goodness = iflag_pfmask(*p);
1335 goodness = iflag_xor(&goodness, prefer);
1337 for (i = 0; i < MAXPREFIX; i++)
1338 if (tmp_ins.prefixes[i])
1340 if (nprefix < best_pref ||
1341 (nprefix == best_pref &&
1342 iflag_cmp(&goodness, &best) < 0)) {
1343 /* This is the best one found so far */
1346 best_pref = nprefix;
1347 best_length = length;
1355 return 0; /* no instruction was matched */
1357 /* Pick the best match */
1359 length = best_length;
1363 /* TODO: snprintf returns the value that the string would have if
1364 * the buffer were long enough, and not the actual length of
1365 * the returned string, so each instance of using the return
1366 * value of snprintf should actually be checked to assure that
1367 * the return value is "sane." Maybe a macro wrapper could
1368 * be used for that purpose.
1370 for (i = 0; i < MAXPREFIX; i++) {
1371 const char *prefix = prefix_name(ins.prefixes[i]);
1373 slen += snprintf(output+slen, outbufsize-slen, "%s ", prefix);
1377 if (i >= FIRST_COND_OPCODE)
1378 slen += snprintf(output + slen, outbufsize - slen, "%s%s",
1379 nasm_insn_names[i], condition_name[ins.condition]);
1381 slen += snprintf(output + slen, outbufsize - slen, "%s",
1382 nasm_insn_names[i]);
1385 is_evex = !!(ins.rex & REX_EV);
1386 length += data - origdata; /* fix up for prefixes */
1387 for (i = 0; i < (*p)->operands; i++) {
1388 opflags_t t = (*p)->opd[i];
1389 decoflags_t deco = (*p)->deco[i];
1390 const operand *o = &ins.oprs[i];
1393 output[slen++] = (colon ? ':' : i == 0 ? ' ' : ',');
1396 if (o->segment & SEG_RELATIVE) {
1397 offs += offset + length;
1399 * sort out wraparound
1401 if (!(o->segment & (SEG_32BIT|SEG_64BIT)))
1403 else if (segsize != 64)
1407 * add sync marker, if autosync is on
1418 if ((t & (REGISTER | FPUREG)) ||
1419 (o->segment & SEG_RMREG)) {
1421 reg = whichreg(t, o->basereg, ins.rex);
1423 slen += snprintf(output + slen, outbufsize - slen, "to ");
1424 slen += snprintf(output + slen, outbufsize - slen, "%s",
1425 nasm_reg_names[reg-EXPR_REG_START]);
1426 if (is_evex && deco)
1427 slen += append_evex_reg_deco(output + slen, outbufsize - slen,
1429 } else if (!(UNITY & ~t)) {
1430 output[slen++] = '1';
1431 } else if (t & IMMEDIATE) {
1434 snprintf(output + slen, outbufsize - slen, "byte ");
1435 if (o->segment & SEG_SIGNED) {
1438 output[slen++] = '-';
1440 output[slen++] = '+';
1442 } else if (t & BITS16) {
1444 snprintf(output + slen, outbufsize - slen, "word ");
1445 } else if (t & BITS32) {
1447 snprintf(output + slen, outbufsize - slen, "dword ");
1448 } else if (t & BITS64) {
1450 snprintf(output + slen, outbufsize - slen, "qword ");
1451 } else if (t & NEAR) {
1453 snprintf(output + slen, outbufsize - slen, "near ");
1454 } else if (t & SHORT) {
1456 snprintf(output + slen, outbufsize - slen, "short ");
1459 snprintf(output + slen, outbufsize - slen, "0x%"PRIx64"",
1461 } else if (!(MEM_OFFS & ~t)) {
1463 snprintf(output + slen, outbufsize - slen,
1464 "[%s%s%s0x%"PRIx64"]",
1465 (segover ? segover : ""),
1466 (segover ? ":" : ""),
1467 (o->disp_size == 64 ? "qword " :
1468 o->disp_size == 32 ? "dword " :
1469 o->disp_size == 16 ? "word " : ""), offs);
1471 } else if (is_class(REGMEM, t)) {
1472 int started = false;
1475 snprintf(output + slen, outbufsize - slen, "byte ");
1478 snprintf(output + slen, outbufsize - slen, "word ");
1481 snprintf(output + slen, outbufsize - slen, "dword ");
1484 snprintf(output + slen, outbufsize - slen, "qword ");
1487 snprintf(output + slen, outbufsize - slen, "tword ");
1488 if ((ins.evex_p[2] & EVEX_P2B) && (deco & BRDCAST_MASK)) {
1489 /* when broadcasting, each element size should be used */
1490 if (deco & BR_BITS32)
1492 snprintf(output + slen, outbufsize - slen, "dword ");
1493 else if (deco & BR_BITS64)
1495 snprintf(output + slen, outbufsize - slen, "qword ");
1499 snprintf(output + slen, outbufsize - slen, "oword ");
1502 snprintf(output + slen, outbufsize - slen, "yword ");
1505 snprintf(output + slen, outbufsize - slen, "zword ");
1508 slen += snprintf(output + slen, outbufsize - slen, "far ");
1511 snprintf(output + slen, outbufsize - slen, "near ");
1512 output[slen++] = '[';
1514 slen += snprintf(output + slen, outbufsize - slen, "%s",
1515 (o->disp_size == 64 ? "qword " :
1516 o->disp_size == 32 ? "dword " :
1517 o->disp_size == 16 ? "word " :
1519 if (o->eaflags & EAF_REL)
1520 slen += snprintf(output + slen, outbufsize - slen, "rel ");
1523 snprintf(output + slen, outbufsize - slen, "%s:",
1527 if (o->basereg != -1) {
1528 slen += snprintf(output + slen, outbufsize - slen, "%s",
1529 nasm_reg_names[(o->basereg-EXPR_REG_START)]);
1532 if (o->indexreg != -1 && !itemp_has(*best_p, IF_MIB)) {
1534 output[slen++] = '+';
1535 slen += snprintf(output + slen, outbufsize - slen, "%s",
1536 nasm_reg_names[(o->indexreg-EXPR_REG_START)]);
1539 snprintf(output + slen, outbufsize - slen, "*%d",
1545 if (o->segment & SEG_DISP8) {
1548 uint32_t offset = offs;
1549 if ((int32_t)offset < 0) {
1556 snprintf(output + slen, outbufsize - slen, "%s0x%"PRIx32"",
1560 uint8_t offset = offs;
1561 if ((int8_t)offset < 0) {
1568 snprintf(output + slen, outbufsize - slen, "%s0x%"PRIx8"",
1571 } else if (o->segment & SEG_DISP16) {
1573 uint16_t offset = offs;
1574 if ((int16_t)offset < 0 && started) {
1578 prefix = started ? "+" : "";
1581 snprintf(output + slen, outbufsize - slen,
1582 "%s0x%"PRIx16"", prefix, offset);
1583 } else if (o->segment & SEG_DISP32) {
1584 if (prefix.asize == 64) {
1586 uint64_t offset = (int64_t)(int32_t)offs;
1587 if ((int32_t)offs < 0 && started) {
1591 prefix = started ? "+" : "";
1594 snprintf(output + slen, outbufsize - slen,
1595 "%s0x%"PRIx64"", prefix, offset);
1598 uint32_t offset = offs;
1599 if ((int32_t) offset < 0 && started) {
1603 prefix = started ? "+" : "";
1606 snprintf(output + slen, outbufsize - slen,
1607 "%s0x%"PRIx32"", prefix, offset);
1611 if (o->indexreg != -1 && itemp_has(*best_p, IF_MIB)) {
1612 output[slen++] = ',';
1613 slen += snprintf(output + slen, outbufsize - slen, "%s",
1614 nasm_reg_names[(o->indexreg-EXPR_REG_START)]);
1617 snprintf(output + slen, outbufsize - slen, "*%d",
1622 output[slen++] = ']';
1624 if (is_evex && deco)
1625 slen += append_evex_mem_deco(output + slen, outbufsize - slen,
1626 t, deco, ins.evex_p);
1629 snprintf(output + slen, outbufsize - slen, "<operand%d>",
1633 output[slen] = '\0';
1634 if (segover) { /* unused segment override */
1636 int count = slen + 1;
1638 p[count + 3] = p[count];
1639 strncpy(output, segover, 2);
1646 * This is called when we don't have a complete instruction. If it
1647 * is a standalone *single-byte* prefix show it as such, otherwise
1648 * print it as a literal.
1650 int32_t eatbyte(uint8_t *data, char *output, int outbufsize, int segsize)
1652 uint8_t byte = *data;
1653 const char *str = NULL;
1687 str = (segsize == 16) ? "o32" : "o16";
1690 str = (segsize == 32) ? "a16" : "a32";
1708 if (segsize == 64) {
1709 snprintf(output, outbufsize, "rex%s%s%s%s%s",
1710 (byte == REX_P) ? "" : ".",
1711 (byte & REX_W) ? "w" : "",
1712 (byte & REX_R) ? "r" : "",
1713 (byte & REX_X) ? "x" : "",
1714 (byte & REX_B) ? "b" : "");
1717 /* else fall through */
1719 snprintf(output, outbufsize, "db 0x%02x", byte);
1724 snprintf(output, outbufsize, "%s", str);