%define mangle(x) x
%elifidn __OUTPUT_FORMAT__,x64
%define mangle(x) x
+%elifidn __OUTPUT_FORMAT__,win64
+ %define mangle(x) x
%else
%define mangle(x) _ %+ x
%endif
global sym(vp8_loop_filter_bh_y_sse2) PRIVATE
sym(vp8_loop_filter_bh_y_sse2):
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%define src rcx ; src_ptr
%define stride rdx ; src_pixel_step
%define blimit r8
movdqa i12, xmm3
movdqa i13, xmm8
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
pop r13
pop r12
RESTORE_XMM
global sym(vp8_loop_filter_bv_y_sse2) PRIVATE
sym(vp8_loop_filter_bv_y_sse2):
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%define src rcx ; src_ptr
%define stride rdx ; src_pixel_step
%define blimit r8
; un-ALIGN_STACK
pop rsp
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
pop r13
pop r12
RESTORE_XMM
movsxd rax, dword ptr arg(1) ; src_stride
movsxd rdx, dword ptr arg(3) ; ref_stride
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
pop rsi
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
RESTORE_XMM
%endif
%endif
xchg rbx, rax
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
pop rsi
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
RESTORE_XMM
%endif
movsxd rax, dword ptr arg(2)
lea rcx, [rsi + rax*2]
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
%define input rcx
%define output rdx
%define pitch r8
RESTORE_GOT
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
RESTORE_XMM
%endif
%endif
push rdi
push rsi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
push rdi
push rsi
%endif
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
pop rsi
pop rdi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
%endif
push rdi
push rsi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
push rdi
push rsi
%else
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
pop rsi
pop rdi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
%endif
%define stack_size 32
sub rsp, stack_size
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 8, u
push rdi
push rsi
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
pop rbp
%else
%undef xmm5
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
RESTORE_XMM
push rdi
push rsi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
push rdi
push rsi
%endif
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
pop rsi
pop rdi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
%endif
movsxd rax, dword ptr arg(2)
lea rcx, [rsi + rax*2]
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
%define input rcx
%define output rdx
%define pitch r8
RESTORE_GOT
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
RESTORE_XMM
%endif
%endif
push rdi
push rsi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
push rdi
push rsi
%endif
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
pop rsi
pop rdi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
%endif
push rdi
push rsi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
push rdi
push rsi
%else
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
pop rsi
pop rdi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
%endif
%define stack_size 32
sub rsp, stack_size
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 8, u
push rdi
push rsi
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
pop rbp
%else
%undef xmm5
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
RESTORE_XMM
push rdi
push rsi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
push rdi
push rsi
%endif
mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d
%else
pop rsi
pop rdi
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
pop rdi
%endif
movsxd rax, dword ptr arg(1) ; src_stride
movsxd rdx, dword ptr arg(3) ; ref_stride
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
pop rsi
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
RESTORE_XMM
%endif
%endif
xchg rbx, rax
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
pop rsi
pop rbp
%else
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
pop rsi
RESTORE_XMM
%endif
ret
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
global sym(vpx_winx64_fldcw) PRIVATE
sym(vpx_winx64_fldcw):
sub rsp, 8
%endif
+; LIBVPX_YASM_WIN64
+; Set LIBVPX_YASM_WIN64 if output is Windows 64bit so the code will work if x64
+; or win64 is defined on the Yasm command line.
+%ifidn __OUTPUT_FORMAT__,win64
+%define LIBVPX_YASM_WIN64 1
+%elifidn __OUTPUT_FORMAT__,x64
+%define LIBVPX_YASM_WIN64 1
+%else
+%define LIBVPX_YASM_WIN64 0
+%endif
+
; sym()
; Return the proper symbol name for the target ABI.
;
%define sym(x) x
%elifidn __OUTPUT_FORMAT__,elfx32
%define sym(x) x
-%elifidn __OUTPUT_FORMAT__,x64
+%elif LIBVPX_YASM_WIN64
%define sym(x) x
%else
%define sym(x) _ %+ x
%define PRIVATE :hidden
%elifidn __OUTPUT_FORMAT__,elfx32
%define PRIVATE :hidden
- %elifidn __OUTPUT_FORMAT__,x64
+ %elif LIBVPX_YASM_WIN64
%define PRIVATE
%else
%define PRIVATE :private_extern
%else
; 64 bit ABI passes arguments in registers. This is a workaround to get up
; and running quickly. Relies on SHADOW_ARGS_TO_STACK
- %ifidn __OUTPUT_FORMAT__,x64
+ %if LIBVPX_YASM_WIN64
%define arg(x) [rbp+16+8*x]
%else
%define arg(x) [rbp-8-8*x]
%endm
%define UNSHADOW_ARGS
%else
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%macro SHADOW_ARGS_TO_STACK 1 ; argc
%if %1 > 0
mov arg(0),rcx
; Win64 ABI requires 16 byte stack alignment, but then pushes an 8 byte return
; value. Typically we follow this up with 'push rbp' - re-aligning the stack -
; but in some cases this is not done and unaligned movs must be used.
-%ifidn __OUTPUT_FORMAT__,x64
+%if LIBVPX_YASM_WIN64
%macro SAVE_XMM 1-2 a
%if %1 < 6
%error Only xmm registers 6-15 must be preserved