2 ; jquanti.asm - sample data conversion and quantization (64-bit AVX2)
4 ; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
5 ; Copyright (C) 2009, 2016, 2018, D. R. Commander.
6 ; Copyright (C) 2016, Matthieu Darbois.
7 ; Copyright (C) 2018, Matthias Räncker.
9 ; Based on the x86 SIMD extension for IJG JPEG library
10 ; Copyright (C) 1999-2006, MIYASAKA Masaru.
11 ; For conditions of distribution and use, see copyright notice in jsimdext.inc
13 ; This file should be assembled with NASM (Netwide Assembler),
14 ; can *not* be assembled with Microsoft's MASM or any compatible
15 ; assembler (including Borland's Turbo Assembler).
16 ; NASM is available from http://nasm.sourceforge.net/ or
17 ; http://sourceforge.net/project/showfiles.php?group_id=6208
19 %include "jsimdext.inc"
22 ; --------------------------------------------------------------------------
26 ; Load data into workspace, applying unsigned->signed conversion
29 ; jsimd_convsamp_avx2(JSAMPARRAY sample_data, JDIMENSION start_col,
30 ; DCTELEM *workspace);
33 ; r10 = JSAMPARRAY sample_data
34 ; r11d = JDIMENSION start_col
35 ; r12 = DCTELEM *workspace
38 GLOBAL_FUNCTION(jsimd_convsamp_avx2)
40 EXTN(jsimd_convsamp_avx2):
47 mov rsip, JSAMPROW [r10+0*SIZEOF_JSAMPROW] ; (JSAMPLE *)
48 mov rdip, JSAMPROW [r10+1*SIZEOF_JSAMPROW] ; (JSAMPLE *)
49 movq xmm0, XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE]
50 pinsrq xmm0, XMM_MMWORD [rdi+rax*SIZEOF_JSAMPLE], 1
52 mov rsip, JSAMPROW [r10+2*SIZEOF_JSAMPROW] ; (JSAMPLE *)
53 mov rdip, JSAMPROW [r10+3*SIZEOF_JSAMPROW] ; (JSAMPLE *)
54 movq xmm1, XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE]
55 pinsrq xmm1, XMM_MMWORD [rdi+rax*SIZEOF_JSAMPLE], 1
57 mov rsip, JSAMPROW [r10+4*SIZEOF_JSAMPROW] ; (JSAMPLE *)
58 mov rdip, JSAMPROW [r10+5*SIZEOF_JSAMPROW] ; (JSAMPLE *)
59 movq xmm2, XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE]
60 pinsrq xmm2, XMM_MMWORD [rdi+rax*SIZEOF_JSAMPLE], 1
62 mov rsip, JSAMPROW [r10+6*SIZEOF_JSAMPROW] ; (JSAMPLE *)
63 mov rdip, JSAMPROW [r10+7*SIZEOF_JSAMPROW] ; (JSAMPLE *)
64 movq xmm3, XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE]
65 pinsrq xmm3, XMM_MMWORD [rdi+rax*SIZEOF_JSAMPLE], 1
67 vpmovzxbw ymm0, xmm0 ; ymm0=(00 01 02 03 04 05 06 07 10 11 12 13 14 15 16 17)
68 vpmovzxbw ymm1, xmm1 ; ymm1=(20 21 22 23 24 25 26 27 30 31 32 33 34 35 36 37)
69 vpmovzxbw ymm2, xmm2 ; ymm2=(40 41 42 43 44 45 46 47 50 51 52 53 54 55 56 57)
70 vpmovzxbw ymm3, xmm3 ; ymm3=(60 61 62 63 64 65 66 67 70 71 72 73 74 75 76 77)
72 vpcmpeqw ymm7, ymm7, ymm7
73 vpsllw ymm7, ymm7, 7 ; ymm7={0xFF80 0xFF80 0xFF80 0xFF80 ..}
75 vpaddw ymm0, ymm0, ymm7
76 vpaddw ymm1, ymm1, ymm7
77 vpaddw ymm2, ymm2, ymm7
78 vpaddw ymm3, ymm3, ymm7
80 vmovdqu YMMWORD [YMMBLOCK(0,0,r12,SIZEOF_DCTELEM)], ymm0
81 vmovdqu YMMWORD [YMMBLOCK(2,0,r12,SIZEOF_DCTELEM)], ymm1
82 vmovdqu YMMWORD [YMMBLOCK(4,0,r12,SIZEOF_DCTELEM)], ymm2
83 vmovdqu YMMWORD [YMMBLOCK(6,0,r12,SIZEOF_DCTELEM)], ymm3
90 ; --------------------------------------------------------------------------
92 ; Quantize/descale the coefficients, and store into coef_block
94 ; This implementation is based on an algorithm described in
95 ; "How to optimize for the Pentium family of microprocessors"
96 ; (http://www.agner.org/assem/).
99 ; jsimd_quantize_avx2(JCOEFPTR coef_block, DCTELEM *divisors,
100 ; DCTELEM *workspace);
103 %define RECIPROCAL(m, n, b) \
104 YMMBLOCK(DCTSIZE * 0 + (m), (n), (b), SIZEOF_DCTELEM)
105 %define CORRECTION(m, n, b) \
106 YMMBLOCK(DCTSIZE * 1 + (m), (n), (b), SIZEOF_DCTELEM)
107 %define SCALE(m, n, b) \
108 YMMBLOCK(DCTSIZE * 2 + (m), (n), (b), SIZEOF_DCTELEM)
110 ; r10 = JCOEFPTR coef_block
111 ; r11 = DCTELEM *divisors
112 ; r12 = DCTELEM *workspace
115 GLOBAL_FUNCTION(jsimd_quantize_avx2)
117 EXTN(jsimd_quantize_avx2):
122 vmovdqu ymm4, [YMMBLOCK(0,0,r12,SIZEOF_DCTELEM)]
123 vmovdqu ymm5, [YMMBLOCK(2,0,r12,SIZEOF_DCTELEM)]
124 vmovdqu ymm6, [YMMBLOCK(4,0,r12,SIZEOF_DCTELEM)]
125 vmovdqu ymm7, [YMMBLOCK(6,0,r12,SIZEOF_DCTELEM)]
131 vpaddw ymm0, YMMWORD [CORRECTION(0,0,r11)] ; correction + roundfactor
132 vpaddw ymm1, YMMWORD [CORRECTION(2,0,r11)]
133 vpaddw ymm2, YMMWORD [CORRECTION(4,0,r11)]
134 vpaddw ymm3, YMMWORD [CORRECTION(6,0,r11)]
135 vpmulhuw ymm0, YMMWORD [RECIPROCAL(0,0,r11)] ; reciprocal
136 vpmulhuw ymm1, YMMWORD [RECIPROCAL(2,0,r11)]
137 vpmulhuw ymm2, YMMWORD [RECIPROCAL(4,0,r11)]
138 vpmulhuw ymm3, YMMWORD [RECIPROCAL(6,0,r11)]
139 vpmulhuw ymm0, YMMWORD [SCALE(0,0,r11)] ; scale
140 vpmulhuw ymm1, YMMWORD [SCALE(2,0,r11)]
141 vpmulhuw ymm2, YMMWORD [SCALE(4,0,r11)]
142 vpmulhuw ymm3, YMMWORD [SCALE(6,0,r11)]
144 vpsignw ymm0, ymm0, ymm4
145 vpsignw ymm1, ymm1, ymm5
146 vpsignw ymm2, ymm2, ymm6
147 vpsignw ymm3, ymm3, ymm7
149 vmovdqu [YMMBLOCK(0,0,r10,SIZEOF_DCTELEM)], ymm0
150 vmovdqu [YMMBLOCK(2,0,r10,SIZEOF_DCTELEM)], ymm1
151 vmovdqu [YMMBLOCK(4,0,r10,SIZEOF_DCTELEM)], ymm2
152 vmovdqu [YMMBLOCK(6,0,r10,SIZEOF_DCTELEM)], ymm3
159 ; For some reason, the OS X linker does not honor the request to align the
160 ; segment unless we do this.