1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * x86 instruction analysis
5 * Copyright (C) IBM Corporation, 2002, 2004, 2009
8 #include <linux/kernel.h>
10 #include <linux/string.h>
14 #include <asm/inat.h> /*__ignore_sync_check__ */
15 #include <asm/insn.h> /* __ignore_sync_check__ */
17 #include <linux/errno.h>
18 #include <linux/kconfig.h>
20 #include <asm/emulate_prefix.h> /* __ignore_sync_check__ */
22 #define leXX_to_cpu(t, r) \
25 switch (sizeof(t)) { \
26 case 4: v = le32_to_cpu(r); break; \
27 case 2: v = le16_to_cpu(r); break; \
28 case 1: v = r; break; \
35 /* Verify next sizeof(t) bytes can be on the same instruction */
36 #define validate_next(t, insn, n) \
37 ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
39 #define __get_next(t, insn) \
40 ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
42 #define __peek_nbyte_next(t, insn, n) \
43 ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
45 #define get_next(t, insn) \
46 ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
48 #define peek_nbyte_next(t, insn, n) \
49 ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
51 #define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
54 * insn_init() - initialize struct insn
55 * @insn: &struct insn to be initialized
56 * @kaddr: address (in kernel memory) of instruction (or copy thereof)
57 * @buf_len: length of the insn buffer at @kaddr
58 * @x86_64: !0 for 64-bit kernel or 64-bit app
60 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
63 * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
64 * even if the input buffer is long enough to hold them.
66 if (buf_len > MAX_INSN_SIZE)
67 buf_len = MAX_INSN_SIZE;
69 memset(insn, 0, sizeof(*insn));
71 insn->end_kaddr = kaddr + buf_len;
72 insn->next_byte = kaddr;
73 insn->x86_64 = x86_64 ? 1 : 0;
81 static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
82 static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
84 static int __insn_get_emulate_prefix(struct insn *insn,
85 const insn_byte_t *prefix, size_t len)
89 for (i = 0; i < len; i++) {
90 if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
94 insn->emulate_prefix_size = len;
95 insn->next_byte += len;
103 static void insn_get_emulate_prefix(struct insn *insn)
105 if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
108 __insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
112 * insn_get_prefixes - scan x86 instruction prefix bytes
113 * @insn: &struct insn containing instruction
115 * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
116 * to point to the (first) opcode. No effect if @insn->prefixes.got
123 int insn_get_prefixes(struct insn *insn)
125 struct insn_field *prefixes = &insn->prefixes;
133 insn_get_emulate_prefix(insn);
137 b = peek_next(insn_byte_t, insn);
138 attr = inat_get_opcode_attribute(b);
139 while (inat_is_legacy_prefix(attr)) {
140 /* Skip if same prefix */
141 for (i = 0; i < nb; i++)
142 if (prefixes->bytes[i] == b)
145 /* Invalid instruction */
147 prefixes->bytes[nb++] = b;
148 if (inat_is_address_size_prefix(attr)) {
149 /* address size switches 2/4 or 4/8 */
151 insn->addr_bytes ^= 12;
153 insn->addr_bytes ^= 6;
154 } else if (inat_is_operand_size_prefix(attr)) {
155 /* oprand size switches 2/4 */
156 insn->opnd_bytes ^= 6;
162 b = peek_next(insn_byte_t, insn);
163 attr = inat_get_opcode_attribute(b);
165 /* Set the last prefix */
166 if (lb && lb != insn->prefixes.bytes[3]) {
167 if (unlikely(insn->prefixes.bytes[3])) {
168 /* Swap the last prefix */
169 b = insn->prefixes.bytes[3];
170 for (i = 0; i < nb; i++)
171 if (prefixes->bytes[i] == lb)
172 insn_set_byte(prefixes, i, b);
174 insn_set_byte(&insn->prefixes, 3, lb);
177 /* Decode REX prefix */
179 b = peek_next(insn_byte_t, insn);
180 attr = inat_get_opcode_attribute(b);
181 if (inat_is_rex_prefix(attr)) {
182 insn_field_set(&insn->rex_prefix, b, 1);
185 /* REX.W overrides opnd_size */
186 insn->opnd_bytes = 8;
189 insn->rex_prefix.got = 1;
191 /* Decode VEX prefix */
192 b = peek_next(insn_byte_t, insn);
193 attr = inat_get_opcode_attribute(b);
194 if (inat_is_vex_prefix(attr)) {
195 insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
198 * In 32-bits mode, if the [7:6] bits (mod bits of
199 * ModRM) on the second byte are not 11b, it is
200 * LDS or LES or BOUND.
202 if (X86_MODRM_MOD(b2) != 3)
205 insn_set_byte(&insn->vex_prefix, 0, b);
206 insn_set_byte(&insn->vex_prefix, 1, b2);
207 if (inat_is_evex_prefix(attr)) {
208 b2 = peek_nbyte_next(insn_byte_t, insn, 2);
209 insn_set_byte(&insn->vex_prefix, 2, b2);
210 b2 = peek_nbyte_next(insn_byte_t, insn, 3);
211 insn_set_byte(&insn->vex_prefix, 3, b2);
212 insn->vex_prefix.nbytes = 4;
213 insn->next_byte += 4;
214 if (insn->x86_64 && X86_VEX_W(b2))
215 /* VEX.W overrides opnd_size */
216 insn->opnd_bytes = 8;
217 } else if (inat_is_vex3_prefix(attr)) {
218 b2 = peek_nbyte_next(insn_byte_t, insn, 2);
219 insn_set_byte(&insn->vex_prefix, 2, b2);
220 insn->vex_prefix.nbytes = 3;
221 insn->next_byte += 3;
222 if (insn->x86_64 && X86_VEX_W(b2))
223 /* VEX.W overrides opnd_size */
224 insn->opnd_bytes = 8;
227 * For VEX2, fake VEX3-like byte#2.
228 * Makes it easier to decode vex.W, vex.vvvv,
229 * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
231 insn_set_byte(&insn->vex_prefix, 2, b2 & 0x7f);
232 insn->vex_prefix.nbytes = 2;
233 insn->next_byte += 2;
237 insn->vex_prefix.got = 1;
248 * insn_get_opcode - collect opcode(s)
249 * @insn: &struct insn containing instruction
251 * Populates @insn->opcode, updates @insn->next_byte to point past the
252 * opcode byte(s), and set @insn->attr (except for groups).
253 * If necessary, first collects any preceding (prefix) bytes.
254 * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
261 int insn_get_opcode(struct insn *insn)
263 struct insn_field *opcode = &insn->opcode;
270 if (!insn->prefixes.got) {
271 ret = insn_get_prefixes(insn);
276 /* Get first opcode */
277 op = get_next(insn_byte_t, insn);
278 insn_set_byte(opcode, 0, op);
281 /* Check if there is VEX prefix or not */
282 if (insn_is_avx(insn)) {
284 m = insn_vex_m_bits(insn);
285 p = insn_vex_p_bits(insn);
286 insn->attr = inat_get_avx_attribute(op, m, p);
287 if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
288 (!inat_accept_vex(insn->attr) &&
289 !inat_is_group(insn->attr))) {
290 /* This instruction is bad */
294 /* VEX has only 1 byte for opcode */
298 insn->attr = inat_get_opcode_attribute(op);
299 while (inat_is_escape(insn->attr)) {
300 /* Get escaped opcode */
301 op = get_next(insn_byte_t, insn);
302 opcode->bytes[opcode->nbytes++] = op;
303 pfx_id = insn_last_prefix_id(insn);
304 insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
307 if (inat_must_vex(insn->attr)) {
308 /* This instruction is bad */
321 * insn_get_modrm - collect ModRM byte, if any
322 * @insn: &struct insn containing instruction
324 * Populates @insn->modrm and updates @insn->next_byte to point past the
325 * ModRM byte, if any. If necessary, first collects the preceding bytes
326 * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
332 int insn_get_modrm(struct insn *insn)
334 struct insn_field *modrm = &insn->modrm;
335 insn_byte_t pfx_id, mod;
341 if (!insn->opcode.got) {
342 ret = insn_get_opcode(insn);
347 if (inat_has_modrm(insn->attr)) {
348 mod = get_next(insn_byte_t, insn);
349 insn_field_set(modrm, mod, 1);
350 if (inat_is_group(insn->attr)) {
351 pfx_id = insn_last_prefix_id(insn);
352 insn->attr = inat_get_group_attribute(mod, pfx_id,
354 if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
362 if (insn->x86_64 && inat_is_force64(insn->attr))
363 insn->opnd_bytes = 8;
374 * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
375 * @insn: &struct insn containing instruction
377 * If necessary, first collects the instruction up to and including the
378 * ModRM byte. No effect if @insn->x86_64 is 0.
380 int insn_rip_relative(struct insn *insn)
382 struct insn_field *modrm = &insn->modrm;
389 ret = insn_get_modrm(insn);
394 * For rip-relative instructions, the mod field (top 2 bits)
395 * is zero and the r/m field (bottom 3 bits) is 0x5.
397 return (modrm->nbytes && (modrm->bytes[0] & 0xc7) == 0x5);
401 * insn_get_sib() - Get the SIB byte of instruction
402 * @insn: &struct insn containing instruction
404 * If necessary, first collects the instruction up to and including the
408 * 0: if decoding succeeded
411 int insn_get_sib(struct insn *insn)
419 if (!insn->modrm.got) {
420 ret = insn_get_modrm(insn);
425 if (insn->modrm.nbytes) {
426 modrm = insn->modrm.bytes[0];
427 if (insn->addr_bytes != 2 &&
428 X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
429 insn_field_set(&insn->sib,
430 get_next(insn_byte_t, insn), 1);
443 * insn_get_displacement() - Get the displacement of instruction
444 * @insn: &struct insn containing instruction
446 * If necessary, first collects the instruction up to and including the
448 * Displacement value is sign-expanded.
451 * 0: if decoding succeeded
454 int insn_get_displacement(struct insn *insn)
456 insn_byte_t mod, rm, base;
459 if (insn->displacement.got)
462 if (!insn->sib.got) {
463 ret = insn_get_sib(insn);
468 if (insn->modrm.nbytes) {
470 * Interpreting the modrm byte:
471 * mod = 00 - no displacement fields (exceptions below)
472 * mod = 01 - 1-byte displacement field
473 * mod = 10 - displacement field is 4 bytes, or 2 bytes if
474 * address size = 2 (0x67 prefix in 32-bit mode)
475 * mod = 11 - no memory operand
477 * If address size = 2...
478 * mod = 00, r/m = 110 - displacement field is 2 bytes
480 * If address size != 2...
481 * mod != 11, r/m = 100 - SIB byte exists
482 * mod = 00, SIB base = 101 - displacement field is 4 bytes
483 * mod = 00, r/m = 101 - rip-relative addressing, displacement
486 mod = X86_MODRM_MOD(insn->modrm.value);
487 rm = X86_MODRM_RM(insn->modrm.value);
488 base = X86_SIB_BASE(insn->sib.value);
492 insn_field_set(&insn->displacement,
493 get_next(signed char, insn), 1);
494 } else if (insn->addr_bytes == 2) {
495 if ((mod == 0 && rm == 6) || mod == 2) {
496 insn_field_set(&insn->displacement,
497 get_next(short, insn), 2);
500 if ((mod == 0 && rm == 5) || mod == 2 ||
501 (mod == 0 && base == 5)) {
502 insn_field_set(&insn->displacement,
503 get_next(int, insn), 4);
508 insn->displacement.got = 1;
515 /* Decode moffset16/32/64. Return 0 if failed */
516 static int __get_moffset(struct insn *insn)
518 switch (insn->addr_bytes) {
520 insn_field_set(&insn->moffset1, get_next(short, insn), 2);
523 insn_field_set(&insn->moffset1, get_next(int, insn), 4);
526 insn_field_set(&insn->moffset1, get_next(int, insn), 4);
527 insn_field_set(&insn->moffset2, get_next(int, insn), 4);
529 default: /* opnd_bytes must be modified manually */
532 insn->moffset1.got = insn->moffset2.got = 1;
540 /* Decode imm v32(Iz). Return 0 if failed */
541 static int __get_immv32(struct insn *insn)
543 switch (insn->opnd_bytes) {
545 insn_field_set(&insn->immediate, get_next(short, insn), 2);
549 insn_field_set(&insn->immediate, get_next(int, insn), 4);
551 default: /* opnd_bytes must be modified manually */
561 /* Decode imm v64(Iv/Ov), Return 0 if failed */
562 static int __get_immv(struct insn *insn)
564 switch (insn->opnd_bytes) {
566 insn_field_set(&insn->immediate1, get_next(short, insn), 2);
569 insn_field_set(&insn->immediate1, get_next(int, insn), 4);
570 insn->immediate1.nbytes = 4;
573 insn_field_set(&insn->immediate1, get_next(int, insn), 4);
574 insn_field_set(&insn->immediate2, get_next(int, insn), 4);
576 default: /* opnd_bytes must be modified manually */
579 insn->immediate1.got = insn->immediate2.got = 1;
586 /* Decode ptr16:16/32(Ap) */
587 static int __get_immptr(struct insn *insn)
589 switch (insn->opnd_bytes) {
591 insn_field_set(&insn->immediate1, get_next(short, insn), 2);
594 insn_field_set(&insn->immediate1, get_next(int, insn), 4);
597 /* ptr16:64 is not exist (no segment) */
599 default: /* opnd_bytes must be modified manually */
602 insn_field_set(&insn->immediate2, get_next(unsigned short, insn), 2);
603 insn->immediate1.got = insn->immediate2.got = 1;
611 * insn_get_immediate() - Get the immediate in an instruction
612 * @insn: &struct insn containing instruction
614 * If necessary, first collects the instruction up to and including the
615 * displacement bytes.
616 * Basically, most of immediates are sign-expanded. Unsigned-value can be
617 * computed by bit masking with ((1 << (nbytes * 8)) - 1)
623 int insn_get_immediate(struct insn *insn)
627 if (insn->immediate.got)
630 if (!insn->displacement.got) {
631 ret = insn_get_displacement(insn);
636 if (inat_has_moffset(insn->attr)) {
637 if (!__get_moffset(insn))
642 if (!inat_has_immediate(insn->attr))
646 switch (inat_immediate_size(insn->attr)) {
648 insn_field_set(&insn->immediate, get_next(signed char, insn), 1);
651 insn_field_set(&insn->immediate, get_next(short, insn), 2);
654 insn_field_set(&insn->immediate, get_next(int, insn), 4);
657 insn_field_set(&insn->immediate1, get_next(int, insn), 4);
658 insn_field_set(&insn->immediate2, get_next(int, insn), 4);
661 if (!__get_immptr(insn))
664 case INAT_IMM_VWORD32:
665 if (!__get_immv32(insn))
669 if (!__get_immv(insn))
673 /* Here, insn must have an immediate, but failed */
676 if (inat_has_second_immediate(insn->attr)) {
677 insn_field_set(&insn->immediate2, get_next(signed char, insn), 1);
680 insn->immediate.got = 1;
688 * insn_get_length() - Get the length of instruction
689 * @insn: &struct insn containing instruction
691 * If necessary, first collects the instruction up to and including the
698 int insn_get_length(struct insn *insn)
705 if (!insn->immediate.got) {
706 ret = insn_get_immediate(insn);
711 insn->length = (unsigned char)((unsigned long)insn->next_byte
712 - (unsigned long)insn->kaddr);
717 /* Ensure this instruction is decoded completely */
718 static inline int insn_complete(struct insn *insn)
720 return insn->opcode.got && insn->modrm.got && insn->sib.got &&
721 insn->displacement.got && insn->immediate.got;
725 * insn_decode() - Decode an x86 instruction
726 * @insn: &struct insn to be initialized
727 * @kaddr: address (in kernel memory) of instruction (or copy thereof)
728 * @buf_len: length of the insn buffer at @kaddr
729 * @m: insn mode, see enum insn_mode
732 * 0: if decoding succeeded
735 int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
739 /* #define INSN_MODE_KERN -1 __ignore_sync_check__ mode is only valid in the kernel */
741 if (m == INSN_MODE_KERN)
742 insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
744 insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
746 ret = insn_get_length(insn);
750 if (insn_complete(insn))