1 ;*****************************************************************************
2 ;* x86inc.asm: x264asm abstraction layer
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2012 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Anton Mitrofanov <BugMaster@narod.ru>
8 ;* Jason Garrett-Glaser <darkshikari@gmail.com>
9 ;* Henrik Gramner <hengar-6@student.ltu.se>
11 ;* Permission to use, copy, modify, and/or distribute this software for any
12 ;* purpose with or without fee is hereby granted, provided that the above
13 ;* copyright notice and this permission notice appear in all copies.
15 ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
16 ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
18 ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ;*****************************************************************************
24 ; This is a header file for the x264ASM assembly language, which uses
25 ; NASM/YASM syntax combined with a large number of macros to provide easy
26 ; abstraction between different calling conventions (x86_32, win64, linux64).
27 ; It also has various other useful features to simplify writing the kind of
28 ; DSP functions that are most often used in x264.
30 ; Unlike the rest of x264, this file is available under an ISC license, as it
31 ; has significant usefulness outside of x264 and we want it to be available
32 ; to the largest audience possible. Of course, if you modify it for your own
33 ; purposes to add a new feature, we strongly encourage contributing a patch
34 ; as this feature might be useful for others as well. Send patches or ideas
35 ; to x264-devel@videolan.org .
37 %include "vpx_config.asm"
39 %define program_name vp9
45 %ifidn __OUTPUT_FORMAT__,win32
47 %elifidn __OUTPUT_FORMAT__,win64
49 %elifidn __OUTPUT_FORMAT__,x64
56 %ifidn __OUTPUT_FORMAT__,elf32
58 %elifidn __OUTPUT_FORMAT__,elf64
60 %elifidn __OUTPUT_FORMAT__,elf
62 %elifidn __OUTPUT_FORMAT__,x64
64 %elifidn __OUTPUT_FORMAT__,win64
67 %define mangle(x) _ %+ x
70 ; FIXME: All of the 64bit asm functions that take a stride as an argument
71 ; via register, assume that the high dword of that register is filled with 0.
72 ; This is true in practice (since we never do any 64bit arithmetic on strides,
73 ; and x264's strides are all positive), but is not guaranteed by the ABI.
75 ; Name of the .rodata section.
76 ; Kludge: Something on OS X fails to align .rodata even given an align attribute,
77 ; so use a different read-only section.
78 %macro SECTION_RODATA 0-1 16
79 %ifidn __OUTPUT_FORMAT__,macho64
80 SECTION .text align=%1
81 %elifidn __OUTPUT_FORMAT__,macho
82 SECTION .text align=%1
84 %elifidn __OUTPUT_FORMAT__,aout
87 SECTION .rodata align=%1
91 ; aout does not support align=
92 %macro SECTION_TEXT 0-1 16
93 %ifidn __OUTPUT_FORMAT__,aout
96 SECTION .text align=%1
100 ; PIC macros are copied from vpx_ports/x86_abi_support.asm. The "define PIC"
101 ; from original code is added in for 64bit.
102 %ifidn __OUTPUT_FORMAT__,elf32
103 %define ABI_IS_32BIT 1
104 %elifidn __OUTPUT_FORMAT__,macho32
105 %define ABI_IS_32BIT 1
106 %elifidn __OUTPUT_FORMAT__,win32
107 %define ABI_IS_32BIT 1
108 %elifidn __OUTPUT_FORMAT__,aout
109 %define ABI_IS_32BIT 1
111 %define ABI_IS_32BIT 0
116 %ifidn __OUTPUT_FORMAT__,elf32
117 %define GET_GOT_SAVE_ARG 1
118 %define WRT_PLT wrt ..plt
120 extern _GLOBAL_OFFSET_TABLE_
127 add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%sub_offset wrt ..gotpc
131 %define GLOBAL(x) x + %1 wrt ..gotoff
133 %define RESTORE_GOT pop %1
135 %elifidn __OUTPUT_FORMAT__,macho32
136 %define GET_GOT_SAVE_ARG 1
143 %define GLOBAL(x) x + %1 - %%get_got
145 %define RESTORE_GOT pop %1
157 %define GLOBAL(x) rel x
158 %define WRT_PLT wrt ..plt
162 %elifidn __OUTPUT_FORMAT__,macho64
184 ; Done with PIC macros
186 ; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
194 ; Macros to eliminate most code duplication between x86_32 and x86_64:
195 ; Currently this works only for leaf functions which load all their arguments
196 ; into registers at the start, and make no other use of the stack. Luckily that
197 ; covers most of x264's asm.
200 ; %1 = number of arguments. loads them from stack if needed.
201 ; %2 = number of registers used. pushes callee-saved regs if needed.
202 ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
203 ; %4 = list of names to define to registers
204 ; PROLOGUE can also be invoked by adding the same options to cglobal
207 ; cglobal foo, 2,3,0, dst, src, tmp
208 ; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
210 ; TODO Some functions can use some args directly from the stack. If they're the
211 ; last args then you can just not declare them, but if they're in the middle
212 ; we need more flexible macro.
215 ; Pops anything that was pushed by PROLOGUE, and returns.
218 ; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
219 ; which are slow when a normal ret follows a branch.
222 ; rN and rNq are the native-size register holding function argument N
223 ; rNd, rNw, rNb are dword, word, and byte size
224 ; rNm is the original location of arg N (a register or on the stack), dword
225 ; rNmp is native size
227 %macro DECLARE_REG 5-6
235 %elif ARCH_X86_64 ; memory
236 %define r%1m [rsp + stack_offset + %6]
237 %define r%1mp qword r %+ %1m
239 %define r%1m [esp + stack_offset + %6]
240 %define r%1mp dword r %+ %1m
245 %macro DECLARE_REG_SIZE 2
259 DECLARE_REG_SIZE ax, al
260 DECLARE_REG_SIZE bx, bl
261 DECLARE_REG_SIZE cx, cl
262 DECLARE_REG_SIZE dx, dl
263 DECLARE_REG_SIZE si, sil
264 DECLARE_REG_SIZE di, dil
265 DECLARE_REG_SIZE bp, bpl
267 ; t# defines for when per-arch register allocation is more complex than just function arguments
269 %macro DECLARE_REG_TMP 1-*
272 CAT_XDEFINE t, %%i, r%1
278 %macro DECLARE_REG_TMP_SIZE 0-*
280 %define t%1q t%1 %+ q
281 %define t%1d t%1 %+ d
282 %define t%1w t%1 %+ w
283 %define t%1b t%1 %+ b
288 DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
298 %assign stack_offset stack_offset+gprsize
303 %assign stack_offset stack_offset-gprsize
306 %macro PUSH_IF_USED 1-*
315 %macro POP_IF_USED 1-*
324 %macro LOAD_IF_USED 1-*
327 mov r%1, r %+ %1 %+ mp
336 %assign stack_offset stack_offset+(%2)
343 %assign stack_offset stack_offset-(%2)
353 %macro movsxdifnidn 2
365 %macro DEFINE_ARGS 0-*
369 CAT_UNDEF arg_name %+ %%i, q
370 CAT_UNDEF arg_name %+ %%i, d
371 CAT_UNDEF arg_name %+ %%i, w
372 CAT_UNDEF arg_name %+ %%i, b
373 CAT_UNDEF arg_name %+ %%i, m
374 CAT_UNDEF arg_name %+ %%i, mp
375 CAT_UNDEF arg_name, %%i
380 %xdefine %%stack_offset stack_offset
381 %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
384 %xdefine %1q r %+ %%i %+ q
385 %xdefine %1d r %+ %%i %+ d
386 %xdefine %1w r %+ %%i %+ w
387 %xdefine %1b r %+ %%i %+ b
388 %xdefine %1m r %+ %%i %+ m
389 %xdefine %1mp r %+ %%i %+ mp
390 CAT_XDEFINE arg_name, %%i, %1
394 %xdefine stack_offset %%stack_offset
395 %assign n_arg_names %0
398 %if WIN64 ; Windows x64 ;=================================================
400 DECLARE_REG 0, rcx, ecx, cx, cl
401 DECLARE_REG 1, rdx, edx, dx, dl
402 DECLARE_REG 2, R8, R8D, R8W, R8B
403 DECLARE_REG 3, R9, R9D, R9W, R9B
404 DECLARE_REG 4, R10, R10D, R10W, R10B, 40
405 DECLARE_REG 5, R11, R11D, R11W, R11B, 48
406 DECLARE_REG 6, rax, eax, ax, al, 56
407 DECLARE_REG 7, rdi, edi, di, dil, 64
408 DECLARE_REG 8, rsi, esi, si, sil, 72
409 DECLARE_REG 9, rbx, ebx, bx, bl, 80
410 DECLARE_REG 10, rbp, ebp, bp, bpl, 88
411 DECLARE_REG 11, R12, R12D, R12W, R12B, 96
412 DECLARE_REG 12, R13, R13D, R13W, R13B, 104
413 DECLARE_REG 13, R14, R14D, R14W, R14B, 112
414 DECLARE_REG 14, R15, R15D, R15W, R15B, 120
416 %macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
419 ASSERT regs_used >= num_args
420 ASSERT regs_used <= 15
421 PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
423 %assign xmm_regs_used 0
427 LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
431 %macro WIN64_SPILL_XMM 1
432 %assign xmm_regs_used %1
433 ASSERT xmm_regs_used <= 16
434 %if xmm_regs_used > 6
435 SUB rsp, (xmm_regs_used-6)*16+16
436 %assign %%i xmm_regs_used
437 %rep (xmm_regs_used-6)
439 movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
444 %macro WIN64_RESTORE_XMM_INTERNAL 1
445 %if xmm_regs_used > 6
446 %assign %%i xmm_regs_used
447 %rep (xmm_regs_used-6)
449 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
451 add %1, (xmm_regs_used-6)*16+16
455 %macro WIN64_RESTORE_XMM 1
456 WIN64_RESTORE_XMM_INTERNAL %1
457 %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
458 %assign xmm_regs_used 0
462 WIN64_RESTORE_XMM_INTERNAL rsp
463 POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
468 %if regs_used > 7 || xmm_regs_used > 6
475 %elif ARCH_X86_64 ; *nix x64 ;=============================================
477 DECLARE_REG 0, rdi, edi, di, dil
478 DECLARE_REG 1, rsi, esi, si, sil
479 DECLARE_REG 2, rdx, edx, dx, dl
480 DECLARE_REG 3, rcx, ecx, cx, cl
481 DECLARE_REG 4, R8, R8D, R8W, R8B
482 DECLARE_REG 5, R9, R9D, R9W, R9B
483 DECLARE_REG 6, rax, eax, ax, al, 8
484 DECLARE_REG 7, R10, R10D, R10W, R10B, 16
485 DECLARE_REG 8, R11, R11D, R11W, R11B, 24
486 DECLARE_REG 9, rbx, ebx, bx, bl, 32
487 DECLARE_REG 10, rbp, ebp, bp, bpl, 40
488 DECLARE_REG 11, R12, R12D, R12W, R12B, 48
489 DECLARE_REG 12, R13, R13D, R13W, R13B, 56
490 DECLARE_REG 13, R14, R14D, R14W, R14B, 64
491 DECLARE_REG 14, R15, R15D, R15W, R15B, 72
493 %macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
496 ASSERT regs_used >= num_args
497 ASSERT regs_used <= 15
498 PUSH_IF_USED 9, 10, 11, 12, 13, 14
499 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
504 POP_IF_USED 14, 13, 12, 11, 10, 9
516 %else ; X86_32 ;==============================================================
518 DECLARE_REG 0, eax, eax, ax, al, 4
519 DECLARE_REG 1, ecx, ecx, cx, cl, 8
520 DECLARE_REG 2, edx, edx, dx, dl, 12
521 DECLARE_REG 3, ebx, ebx, bx, bl, 16
522 DECLARE_REG 4, esi, esi, si, null, 20
523 DECLARE_REG 5, edi, edi, di, null, 24
524 DECLARE_REG 6, ebp, ebp, bp, null, 28
527 %macro DECLARE_ARG 1-*
529 %define r%1m [esp + stack_offset + 4*%1 + 4]
530 %define r%1mp dword r%1m
535 DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
537 %macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
543 ASSERT regs_used >= num_args
544 PUSH_IF_USED 3, 4, 5, 6
545 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
550 POP_IF_USED 6, 5, 4, 3
562 %endif ;======================================================================
565 %macro WIN64_SPILL_XMM 1
567 %macro WIN64_RESTORE_XMM 1
571 ;=============================================================================
572 ; arch-independent part
573 ;=============================================================================
575 %assign function_align 16
578 ; Applies any symbol mangling needed for C linkage, and sets up a define such that
579 ; subsequent uses of the function name automatically refer to the mangled version.
580 ; Appends cpuflags to the function name if cpuflags has been specified.
581 %macro cglobal 1-2+ ; name, [PROLOGUE args]
583 cglobal_internal %1 %+ SUFFIX
585 cglobal_internal %1 %+ SUFFIX, %2
588 %macro cglobal_internal 1-2+
590 %xdefine %1 mangle(program_name %+ _ %+ %1)
591 %xdefine %1.skip_prologue %1 %+ .skip_prologue
592 CAT_XDEFINE cglobaled_, %1, 1
594 %xdefine current_function %1
596 %ifidn __OUTPUT_FORMAT__,elf
597 global %1:function hidden
598 %elifidn __OUTPUT_FORMAT__,elf32
599 global %1:function hidden
600 %elifidn __OUTPUT_FORMAT__,elf64
601 global %1:function hidden
602 %elifidn __OUTPUT_FORMAT__,macho32
603 global %1:private_extern
604 %elifidn __OUTPUT_FORMAT__,macho64
605 global %1:private_extern
614 RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
615 %assign stack_offset 0
622 %xdefine %1 mangle(program_name %+ _ %+ %1)
623 CAT_XDEFINE cglobaled_, %1, 1
627 ; like cextern, but without the prefix
628 %macro cextern_naked 1
629 %xdefine %1 mangle(%1)
630 CAT_XDEFINE cglobaled_, %1, 1
635 %xdefine %1 mangle(program_name %+ _ %+ %1)
640 ; This is needed for ELF, otherwise the GNU linker assumes the stack is
641 ; executable by default.
642 %ifidn __OUTPUT_FORMAT__,elf
643 SECTION .note.GNU-stack noalloc noexec nowrite progbits
644 %elifidn __OUTPUT_FORMAT__,elf32
645 SECTION .note.GNU-stack noalloc noexec nowrite progbits
646 %elifidn __OUTPUT_FORMAT__,elf64
647 SECTION .note.GNU-stack noalloc noexec nowrite progbits
652 %assign cpuflags_mmx (1<<0)
653 %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
654 %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
655 %assign cpuflags_3dnow2 (1<<3) | cpuflags_3dnow
656 %assign cpuflags_sse (1<<4) | cpuflags_mmx2
657 %assign cpuflags_sse2 (1<<5) | cpuflags_sse
658 %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
659 %assign cpuflags_sse3 (1<<7) | cpuflags_sse2
660 %assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
661 %assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
662 %assign cpuflags_sse42 (1<<10)| cpuflags_sse4
663 %assign cpuflags_avx (1<<11)| cpuflags_sse42
664 %assign cpuflags_xop (1<<12)| cpuflags_avx
665 %assign cpuflags_fma4 (1<<13)| cpuflags_avx
667 %assign cpuflags_cache32 (1<<16)
668 %assign cpuflags_cache64 (1<<17)
669 %assign cpuflags_slowctz (1<<18)
670 %assign cpuflags_lzcnt (1<<19)
671 %assign cpuflags_misalign (1<<20)
672 %assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant
673 %assign cpuflags_atom (1<<22)
675 %define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
676 %define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
678 ; Takes up to 2 cpuflags from the above list.
679 ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
680 ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
681 %macro INIT_CPUFLAGS 0-2
684 %assign cpuflags cpuflags_%1
686 %xdefine cpuname %1_%2
687 %assign cpuflags cpuflags | cpuflags_%2
689 %xdefine SUFFIX _ %+ cpuname
691 %assign avx_enabled 1
693 %if mmsize == 16 && notcpuflag(sse2)
696 %define movnta movntps
721 %assign avx_enabled 0
722 %define RESET_MM_PERMUTATION INIT_MMX %1
728 %define movnta movntq
731 CAT_XDEFINE m, %%i, mm %+ %%i
732 CAT_XDEFINE nmm, %%i, %%i
744 %assign avx_enabled 0
745 %define RESET_MM_PERMUTATION INIT_XMM %1
749 %define num_mmregs 16
754 %define movnta movntdq
757 CAT_XDEFINE m, %%i, xmm %+ %%i
758 CAT_XDEFINE nxmm, %%i, %%i
764 ; FIXME: INIT_AVX can be replaced by INIT_XMM avx
767 %assign avx_enabled 1
768 %define PALIGNR PALIGNR_SSSE3
769 %define RESET_MM_PERMUTATION INIT_AVX
773 %assign avx_enabled 1
774 %define RESET_MM_PERMUTATION INIT_YMM %1
778 %define num_mmregs 16
783 %define movnta vmovntps
786 CAT_XDEFINE m, %%i, ymm %+ %%i
787 CAT_XDEFINE nymm, %%i, %%i
795 ; I often want to use macros that permute their arguments. e.g. there's no
796 ; efficient way to implement butterfly or transpose or dct without swapping some
799 ; I would like to not have to manually keep track of the permutations:
800 ; If I insert a permutation in the middle of a function, it should automatically
801 ; change everything that follows. For more complex macros I may also have multiple
802 ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
804 ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
805 ; permutes its arguments. It's equivalent to exchanging the contents of the
806 ; registers, except that this way you exchange the register names instead, so it
807 ; doesn't cost any cycles.
809 %macro PERMUTE 2-* ; takes a list of pairs to swap
824 %macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
830 CAT_XDEFINE n, m%1, %1
831 CAT_XDEFINE n, m%2, %2
833 ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
834 ; Be careful using this mode in nested macros though, as in some cases there may be
835 ; other copies of m# that have already been dereferenced and don't get updated correctly.
836 %xdefine %%n1 n %+ %1
837 %xdefine %%n2 n %+ %2
838 %xdefine tmp m %+ %%n1
839 CAT_XDEFINE m, %%n1, m %+ %%n2
840 CAT_XDEFINE m, %%n2, tmp
841 CAT_XDEFINE n, m %+ %%n1, %%n1
842 CAT_XDEFINE n, m %+ %%n2, %%n2
849 ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
850 ; calls to that function will automatically load the permutation, so values can
851 ; be returned in mmregs.
852 %macro SAVE_MM_PERMUTATION 0-1
856 %xdefine %%f current_function %+ _m
860 CAT_XDEFINE %%f, %%i, m %+ %%i
865 %macro LOAD_MM_PERMUTATION 1 ; name to load from
869 CAT_XDEFINE m, %%i, %1_m %+ %%i
870 CAT_XDEFINE n, m %+ %%i, %%i
876 ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
878 call_internal %1, %1 %+ SUFFIX
880 %macro call_internal 2
888 LOAD_MM_PERMUTATION %%i
891 ; Substitutions that reduce instruction size but are functionally equivalent
916 ;=============================================================================
917 ; AVX abstraction layer
918 ;=============================================================================
923 CAT_XDEFINE sizeofmm, i, 8
925 CAT_XDEFINE sizeofxmm, i, 16
926 CAT_XDEFINE sizeofymm, i, 32
932 ;%2 == 1 if float, 0 if int
933 ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
934 ;%4 == number of operands given
936 %macro RUN_AVX_INSTR 6-7+
938 %define %%size sizeof%5
940 %define %%size mmsize
950 %define %%regmov movq
952 %define %%regmov movaps
954 %define %%regmov movdqa
959 %if avx_enabled && sizeof%5==16
976 ; 3arg AVX ops with a memory arg can only have it in src2,
977 ; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
978 ; So, if the op is symmetric and the wrong one is memory, swap them.
979 %macro RUN_AVX_INSTR1 8
990 %if %%swap && %3 == 0 && %8 == 1
991 RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
993 RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
998 ;%2 == 1 if float, 0 if int
999 ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 3-operand (xmm, xmm, xmm)
1000 ;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
1002 %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
1004 RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
1006 RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
1008 RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
1010 RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
1015 AVX_INSTR addpd, 1, 0, 1
1016 AVX_INSTR addps, 1, 0, 1
1017 AVX_INSTR addsd, 1, 0, 1
1018 AVX_INSTR addss, 1, 0, 1
1019 AVX_INSTR addsubpd, 1, 0, 0
1020 AVX_INSTR addsubps, 1, 0, 0
1021 AVX_INSTR andpd, 1, 0, 1
1022 AVX_INSTR andps, 1, 0, 1
1023 AVX_INSTR andnpd, 1, 0, 0
1024 AVX_INSTR andnps, 1, 0, 0
1025 AVX_INSTR blendpd, 1, 0, 0
1026 AVX_INSTR blendps, 1, 0, 0
1027 AVX_INSTR blendvpd, 1, 0, 0
1028 AVX_INSTR blendvps, 1, 0, 0
1029 AVX_INSTR cmppd, 1, 0, 0
1030 AVX_INSTR cmpps, 1, 0, 0
1031 AVX_INSTR cmpsd, 1, 0, 0
1032 AVX_INSTR cmpss, 1, 0, 0
1033 AVX_INSTR cvtdq2ps, 1, 0, 0
1034 AVX_INSTR cvtps2dq, 1, 0, 0
1035 AVX_INSTR divpd, 1, 0, 0
1036 AVX_INSTR divps, 1, 0, 0
1037 AVX_INSTR divsd, 1, 0, 0
1038 AVX_INSTR divss, 1, 0, 0
1039 AVX_INSTR dppd, 1, 1, 0
1040 AVX_INSTR dpps, 1, 1, 0
1041 AVX_INSTR haddpd, 1, 0, 0
1042 AVX_INSTR haddps, 1, 0, 0
1043 AVX_INSTR hsubpd, 1, 0, 0
1044 AVX_INSTR hsubps, 1, 0, 0
1045 AVX_INSTR maxpd, 1, 0, 1
1046 AVX_INSTR maxps, 1, 0, 1
1047 AVX_INSTR maxsd, 1, 0, 1
1048 AVX_INSTR maxss, 1, 0, 1
1049 AVX_INSTR minpd, 1, 0, 1
1050 AVX_INSTR minps, 1, 0, 1
1051 AVX_INSTR minsd, 1, 0, 1
1052 AVX_INSTR minss, 1, 0, 1
1053 AVX_INSTR movhlps, 1, 0, 0
1054 AVX_INSTR movlhps, 1, 0, 0
1055 AVX_INSTR movsd, 1, 0, 0
1056 AVX_INSTR movss, 1, 0, 0
1057 AVX_INSTR mpsadbw, 0, 1, 0
1058 AVX_INSTR mulpd, 1, 0, 1
1059 AVX_INSTR mulps, 1, 0, 1
1060 AVX_INSTR mulsd, 1, 0, 1
1061 AVX_INSTR mulss, 1, 0, 1
1062 AVX_INSTR orpd, 1, 0, 1
1063 AVX_INSTR orps, 1, 0, 1
1064 AVX_INSTR packsswb, 0, 0, 0
1065 AVX_INSTR packssdw, 0, 0, 0
1066 AVX_INSTR packuswb, 0, 0, 0
1067 AVX_INSTR packusdw, 0, 0, 0
1068 AVX_INSTR paddb, 0, 0, 1
1069 AVX_INSTR paddw, 0, 0, 1
1070 AVX_INSTR paddd, 0, 0, 1
1071 AVX_INSTR paddq, 0, 0, 1
1072 AVX_INSTR paddsb, 0, 0, 1
1073 AVX_INSTR paddsw, 0, 0, 1
1074 AVX_INSTR paddusb, 0, 0, 1
1075 AVX_INSTR paddusw, 0, 0, 1
1076 AVX_INSTR palignr, 0, 1, 0
1077 AVX_INSTR pand, 0, 0, 1
1078 AVX_INSTR pandn, 0, 0, 0
1079 AVX_INSTR pavgb, 0, 0, 1
1080 AVX_INSTR pavgw, 0, 0, 1
1081 AVX_INSTR pblendvb, 0, 0, 0
1082 AVX_INSTR pblendw, 0, 1, 0
1083 AVX_INSTR pcmpestri, 0, 0, 0
1084 AVX_INSTR pcmpestrm, 0, 0, 0
1085 AVX_INSTR pcmpistri, 0, 0, 0
1086 AVX_INSTR pcmpistrm, 0, 0, 0
1087 AVX_INSTR pcmpeqb, 0, 0, 1
1088 AVX_INSTR pcmpeqw, 0, 0, 1
1089 AVX_INSTR pcmpeqd, 0, 0, 1
1090 AVX_INSTR pcmpeqq, 0, 0, 1
1091 AVX_INSTR pcmpgtb, 0, 0, 0
1092 AVX_INSTR pcmpgtw, 0, 0, 0
1093 AVX_INSTR pcmpgtd, 0, 0, 0
1094 AVX_INSTR pcmpgtq, 0, 0, 0
1095 AVX_INSTR phaddw, 0, 0, 0
1096 AVX_INSTR phaddd, 0, 0, 0
1097 AVX_INSTR phaddsw, 0, 0, 0
1098 AVX_INSTR phsubw, 0, 0, 0
1099 AVX_INSTR phsubd, 0, 0, 0
1100 AVX_INSTR phsubsw, 0, 0, 0
1101 AVX_INSTR pmaddwd, 0, 0, 1
1102 AVX_INSTR pmaddubsw, 0, 0, 0
1103 AVX_INSTR pmaxsb, 0, 0, 1
1104 AVX_INSTR pmaxsw, 0, 0, 1
1105 AVX_INSTR pmaxsd, 0, 0, 1
1106 AVX_INSTR pmaxub, 0, 0, 1
1107 AVX_INSTR pmaxuw, 0, 0, 1
1108 AVX_INSTR pmaxud, 0, 0, 1
1109 AVX_INSTR pminsb, 0, 0, 1
1110 AVX_INSTR pminsw, 0, 0, 1
1111 AVX_INSTR pminsd, 0, 0, 1
1112 AVX_INSTR pminub, 0, 0, 1
1113 AVX_INSTR pminuw, 0, 0, 1
1114 AVX_INSTR pminud, 0, 0, 1
1115 AVX_INSTR pmulhuw, 0, 0, 1
1116 AVX_INSTR pmulhrsw, 0, 0, 1
1117 AVX_INSTR pmulhw, 0, 0, 1
1118 AVX_INSTR pmullw, 0, 0, 1
1119 AVX_INSTR pmulld, 0, 0, 1
1120 AVX_INSTR pmuludq, 0, 0, 1
1121 AVX_INSTR pmuldq, 0, 0, 1
1122 AVX_INSTR por, 0, 0, 1
1123 AVX_INSTR psadbw, 0, 0, 1
1124 AVX_INSTR pshufb, 0, 0, 0
1125 AVX_INSTR psignb, 0, 0, 0
1126 AVX_INSTR psignw, 0, 0, 0
1127 AVX_INSTR psignd, 0, 0, 0
1128 AVX_INSTR psllw, 0, 0, 0
1129 AVX_INSTR pslld, 0, 0, 0
1130 AVX_INSTR psllq, 0, 0, 0
1131 AVX_INSTR pslldq, 0, 0, 0
1132 AVX_INSTR psraw, 0, 0, 0
1133 AVX_INSTR psrad, 0, 0, 0
1134 AVX_INSTR psrlw, 0, 0, 0
1135 AVX_INSTR psrld, 0, 0, 0
1136 AVX_INSTR psrlq, 0, 0, 0
1137 AVX_INSTR psrldq, 0, 0, 0
1138 AVX_INSTR psubb, 0, 0, 0
1139 AVX_INSTR psubw, 0, 0, 0
1140 AVX_INSTR psubd, 0, 0, 0
1141 AVX_INSTR psubq, 0, 0, 0
1142 AVX_INSTR psubsb, 0, 0, 0
1143 AVX_INSTR psubsw, 0, 0, 0
1144 AVX_INSTR psubusb, 0, 0, 0
1145 AVX_INSTR psubusw, 0, 0, 0
1146 AVX_INSTR punpckhbw, 0, 0, 0
1147 AVX_INSTR punpckhwd, 0, 0, 0
1148 AVX_INSTR punpckhdq, 0, 0, 0
1149 AVX_INSTR punpckhqdq, 0, 0, 0
1150 AVX_INSTR punpcklbw, 0, 0, 0
1151 AVX_INSTR punpcklwd, 0, 0, 0
1152 AVX_INSTR punpckldq, 0, 0, 0
1153 AVX_INSTR punpcklqdq, 0, 0, 0
1154 AVX_INSTR pxor, 0, 0, 1
1155 AVX_INSTR shufps, 1, 1, 0
1156 AVX_INSTR subpd, 1, 0, 0
1157 AVX_INSTR subps, 1, 0, 0
1158 AVX_INSTR subsd, 1, 0, 0
1159 AVX_INSTR subss, 1, 0, 0
1160 AVX_INSTR unpckhpd, 1, 0, 0
1161 AVX_INSTR unpckhps, 1, 0, 0
1162 AVX_INSTR unpcklpd, 1, 0, 0
1163 AVX_INSTR unpcklps, 1, 0, 0
1164 AVX_INSTR xorpd, 1, 0, 1
1165 AVX_INSTR xorps, 1, 0, 1
1167 ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1168 AVX_INSTR pfadd, 1, 0, 1
1169 AVX_INSTR pfsub, 1, 0, 0
1170 AVX_INSTR pfmul, 1, 0, 1
1172 ; base-4 constants for shuffles
1175 %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
1177 CAT_XDEFINE q000, j, i
1179 CAT_XDEFINE q00, j, i
1181 CAT_XDEFINE q0, j, i
1191 %macro %1 4-7 %1, %2, %3
1201 FMA_INSTR pmacsdd, pmulld, paddd
1202 FMA_INSTR pmacsww, pmullw, paddw
1203 FMA_INSTR pmadcswd, pmaddwd, paddd