2 * Loongson MMI optimizations for libjpeg-turbo
4 * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
5 * Copyright (C) 2014-2015, D. R. Commander. All Rights Reserved.
6 * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
8 * Authors: ZhuChen <zhuchen@loongson.cn>
9 * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
10 * CaiWanwei <caiwanwei@loongson.cn>
12 * Based on the x86 SIMD extension for IJG JPEG library
13 * Copyright (C) 1999-2006, MIYASAKA Masaru.
15 * This software is provided 'as-is', without any express or implied
16 * warranty. In no event will the authors be held liable for any damages
17 * arising from the use of this software.
19 * Permission is granted to anyone to use this software for any purpose,
20 * including commercial applications, and to alter it and redistribute it
21 * freely, subject to the following restrictions:
23 * 1. The origin of this software must not be misrepresented; you must not
24 * claim that you wrote the original software. If you use this software
25 * in a product, an acknowledgment in the product documentation would be
26 * appreciated but is not required.
27 * 2. Altered source versions must be plainly marked as such, and must not be
28 * misrepresented as being the original software.
29 * 3. This notice may not be removed or altered from any source distribution.
32 /* This file is included by jccolor-mmi.c */
92 void jsimd_rgb_ycc_convert_mmi(JDIMENSION image_width, JSAMPARRAY input_buf,
93 JSAMPIMAGE output_buf, JDIMENSION output_row,
96 JSAMPROW inptr, outptr0, outptr1, outptr2;
98 __m64 mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7;
100 __m64 Y_BG, Cb_RG, Cr_BG;
102 while (--num_rows >= 0) {
103 inptr = *input_buf++;
104 outptr0 = output_buf[0][output_row];
105 outptr1 = output_buf[1][output_row];
106 outptr2 = output_buf[2][output_row];
109 for (num_cols = image_width; num_cols > 0; num_cols -= 8,
110 outptr0 += 8, outptr1 += 8, outptr2 += 8) {
112 #if RGB_PIXELSIZE == 3
116 asm(".set noreorder\r\n"
120 "and $10, $9, $8\r\n"
124 "xor $12, $12, $12\r\n"
126 "dadd $13, $13, $9\r\n"
127 "lbu $12, 0($13)\r\n"
131 "and $10, $9, $8\r\n"
135 "xor $11, $11, $11\r\n"
137 "dadd $13, $13, $9\r\n"
138 "lhu $11, 0($13)\r\n"
139 "sll $12, $12, 16\r\n"
140 "or $12, $12, $11\r\n"
145 "and $10, $9, $8\r\n"
150 "dadd $13, $13, $9\r\n"
151 "lwu $14, 0($13)\r\n"
153 "dsll32 $12, $12, 0\r\n"
154 "or $12, $12, $14\r\n"
159 "and $10, $9, $8\r\n"
170 "and $10, $9, $8\r\n"
181 : "=f" (mmA), "=f" (mmG), "=f" (mmF)
182 : "r" (col), "r" (num_rows), "r" (inptr)
183 : "$f0", "$f2", "$f4", "$8", "$9", "$10", "$11", "$12", "$13",
187 mmA = _mm_load_si64((__m64 *)&inptr[0]);
188 mmG = _mm_load_si64((__m64 *)&inptr[8]);
189 mmF = _mm_load_si64((__m64 *)&inptr[16]);
190 inptr += RGB_PIXELSIZE * 8;
193 mmA = _mm_slli_si64(mmA, 4 * BYTE_BIT);
194 mmD = _mm_srli_si64(mmD, 4 * BYTE_BIT);
196 mmA = _mm_unpackhi_pi8(mmA, mmG);
197 mmG = _mm_slli_si64(mmG, 4 * BYTE_BIT);
199 mmD = _mm_unpacklo_pi8(mmD, mmF);
200 mmG = _mm_unpackhi_pi8(mmG, mmF);
203 mmA = _mm_slli_si64(mmA, 4 * BYTE_BIT);
204 mmE = _mm_srli_si64(mmE, 4 * BYTE_BIT);
206 mmA = _mm_unpackhi_pi8(mmA, mmD);
207 mmD = _mm_slli_si64(mmD, 4 * BYTE_BIT);
209 mmE = _mm_unpacklo_pi8(mmE, mmG);
210 mmD = _mm_unpackhi_pi8(mmD, mmG);
212 mmA = _mm_loadlo_pi8_f(mmA);
213 mmC = _mm_loadhi_pi8_f(mmC);
216 mmE = _mm_loadlo_pi8_f(mmE);
217 mmB = _mm_loadhi_pi8_f(mmB);
220 mmD = _mm_loadlo_pi8_f(mmD);
221 mmF = _mm_loadhi_pi8_f(mmF);
223 #else /* RGB_PIXELSIZE == 4 */
227 asm(".set noreorder\r\n"
231 "and $10, $9, $8\r\n"
235 "dsll $11, $9, 2\r\n"
237 "daddu $13, $13, $11\r\n"
238 "lwc1 %0, 0($13)\r\n"
242 "and $10, $9, $8\r\n"
246 "dsll $11, $9, 2\r\n"
248 "daddu $13, $13, $11\r\n"
250 "ldc1 %0, 0($13)\r\n"
254 "and $10, $9, $8\r\n"
266 : "=f" (mmA), "=f" (mmF), "=f" (mmD), "=f" (mmC)
267 : "r" (col), "r" (inptr)
268 : "$f0", "$f2", "$8", "$9", "$10", "$11", "$13", "memory"
271 mmA = _mm_load_si64((__m64 *)&inptr[0]);
272 mmF = _mm_load_si64((__m64 *)&inptr[8]);
273 mmD = _mm_load_si64((__m64 *)&inptr[16]);
274 mmC = _mm_load_si64((__m64 *)&inptr[24]);
275 inptr += RGB_PIXELSIZE * 8;
278 mmA = _mm_unpacklo_pi8(mmA, mmF);
279 mmB = _mm_unpackhi_pi8(mmB, mmF);
282 mmD = _mm_unpacklo_pi8(mmD, mmC);
283 mmG = _mm_unpackhi_pi8(mmG, mmC);
286 mmA = _mm_unpacklo_pi16(mmA, mmD);
287 mmE = _mm_unpackhi_pi16(mmE, mmD);
290 mmB = _mm_unpacklo_pi16(mmB, mmG);
291 mmH = _mm_unpackhi_pi16(mmH, mmG);
294 mmA = _mm_loadlo_pi8_f(mmA);
295 mmC = _mm_loadhi_pi8_f(mmC);
298 mmB = _mm_loadlo_pi8_f(mmB);
299 mmD = _mm_loadhi_pi8_f(mmD);
302 mmE = _mm_loadlo_pi8_f(mmE);
303 mmG = _mm_loadhi_pi8_f(mmG);
306 mmF = _mm_unpacklo_pi8(mmF, mmH);
307 mmH = _mm_unpackhi_pi8(mmH, mmH);
308 mmF = _mm_srli_pi16(mmF, BYTE_BIT);
309 mmH = _mm_srli_pi16(mmH, BYTE_BIT);
319 mm1 = _mm_unpacklo_pi16(mm1, mm3);
320 mm6 = _mm_unpackhi_pi16(mm6, mm3);
323 mm1 = _mm_madd_pi16(mm1, PW_F0299_F0337);
324 mm6 = _mm_madd_pi16(mm6, PW_F0299_F0337);
325 mm7 = _mm_madd_pi16(mm7, PW_MF016_MF033);
326 mm4 = _mm_madd_pi16(mm4, PW_MF016_MF033);
331 mm1 = _mm_loadlo_pi16_f(mm5);
332 mm6 = _mm_loadhi_pi16_f(mm5);
333 mm1 = _mm_srli_pi32(mm1, 1);
334 mm6 = _mm_srli_pi32(mm6, 1);
336 mm5 = PD_ONEHALFM1_CJ;
337 mm7 = _mm_add_pi32(mm7, mm1);
338 mm4 = _mm_add_pi32(mm4, mm6);
339 mm7 = _mm_add_pi32(mm7, mm5);
340 mm4 = _mm_add_pi32(mm4, mm5);
341 mm7 = _mm_srli_pi32(mm7, SCALEBITS);
342 mm4 = _mm_srli_pi32(mm4, SCALEBITS);
343 mm7 = _mm_packs_pi32(mm7, mm4);
347 mm0 = _mm_unpacklo_pi16(mm0, mm2);
348 mm6 = _mm_unpackhi_pi16(mm6, mm2);
351 mm0 = _mm_madd_pi16(mm0, PW_F0299_F0337);
352 mm6 = _mm_madd_pi16(mm6, PW_F0299_F0337);
353 mm5 = _mm_madd_pi16(mm5, PW_MF016_MF033);
354 mm4 = _mm_madd_pi16(mm4, PW_MF016_MF033);
358 mm0 = _mm_loadlo_pi16_f(mm1);
359 mm6 = _mm_loadhi_pi16_f(mm1);
360 mm0 = _mm_srli_pi32(mm0, 1);
361 mm6 = _mm_srli_pi32(mm6, 1);
363 mm1 = PD_ONEHALFM1_CJ;
364 mm5 = _mm_add_pi32(mm5, mm0);
365 mm4 = _mm_add_pi32(mm4, mm6);
366 mm5 = _mm_add_pi32(mm5, mm1);
367 mm4 = _mm_add_pi32(mm4, mm1);
368 mm5 = _mm_srli_pi32(mm5, SCALEBITS);
369 mm4 = _mm_srli_pi32(mm4, SCALEBITS);
370 mm5 = _mm_packs_pi32(mm5, mm4);
372 mm7 = _mm_slli_pi16(mm7, BYTE_BIT);
373 mm5 = _mm_or_si64(mm5, mm7);
381 mm0 = _mm_unpacklo_pi16(mm0, mm3);
382 mm4 = _mm_unpackhi_pi16(mm4, mm3);
385 mm0 = _mm_madd_pi16(mm0, PW_F0114_F0250);
386 mm4 = _mm_madd_pi16(mm4, PW_F0114_F0250);
387 mm7 = _mm_madd_pi16(mm7, PW_MF008_MF041);
388 mm5 = _mm_madd_pi16(mm5, PW_MF008_MF041);
391 mm0 = _mm_add_pi32(mm0, wk[4]);
392 mm4 = _mm_add_pi32(mm4, wk[5]);
393 mm0 = _mm_add_pi32(mm0, mm3);
394 mm4 = _mm_add_pi32(mm4, mm3);
395 mm0 = _mm_srli_pi32(mm0, SCALEBITS);
396 mm4 = _mm_srli_pi32(mm4, SCALEBITS);
397 mm0 = _mm_packs_pi32(mm0, mm4);
399 mm3 = _mm_loadlo_pi16_f(mm1);
400 mm4 = _mm_loadhi_pi16_f(mm1);
401 mm3 = _mm_srli_pi32(mm3, 1);
402 mm4 = _mm_srli_pi32(mm4, 1);
404 mm1 = PD_ONEHALFM1_CJ;
405 mm7 = _mm_add_pi32(mm7, mm3);
406 mm5 = _mm_add_pi32(mm5, mm4);
407 mm7 = _mm_add_pi32(mm7, mm1);
408 mm5 = _mm_add_pi32(mm5, mm1);
409 mm7 = _mm_srli_pi32(mm7, SCALEBITS);
410 mm5 = _mm_srli_pi32(mm5, SCALEBITS);
411 mm7 = _mm_packs_pi32(mm7, mm5);
415 mm6 = _mm_unpacklo_pi16(mm6, mm2);
416 mm4 = _mm_unpackhi_pi16(mm4, mm2);
419 mm6 = _mm_madd_pi16(mm6, PW_F0114_F0250);
420 mm4 = _mm_madd_pi16(mm4, PW_F0114_F0250);
421 mm1 = _mm_madd_pi16(mm1, PW_MF008_MF041);
422 mm5 = _mm_madd_pi16(mm5, PW_MF008_MF041);
425 mm6 = _mm_add_pi32(mm6, wk[6]);
426 mm4 = _mm_add_pi32(mm4, wk[7]);
427 mm6 = _mm_add_pi32(mm6, mm2);
428 mm4 = _mm_add_pi32(mm4, mm2);
429 mm6 = _mm_srli_pi32(mm6, SCALEBITS);
430 mm4 = _mm_srli_pi32(mm4, SCALEBITS);
431 mm6 = _mm_packs_pi32(mm6, mm4);
433 mm0 = _mm_slli_pi16(mm0, BYTE_BIT);
434 mm6 = _mm_or_si64(mm6, mm0);
437 mm2 = _mm_loadlo_pi16_f(mm3);
438 mm4 = _mm_loadhi_pi16_f(mm3);
439 mm2 = _mm_srli_pi32(mm2, 1);
440 mm4 = _mm_srli_pi32(mm4, 1);
442 mm0 = PD_ONEHALFM1_CJ;
443 mm1 = _mm_add_pi32(mm1, mm2);
444 mm5 = _mm_add_pi32(mm5, mm4);
445 mm1 = _mm_add_pi32(mm1, mm0);
446 mm5 = _mm_add_pi32(mm5, mm0);
447 mm1 = _mm_srli_pi32(mm1, SCALEBITS);
448 mm5 = _mm_srli_pi32(mm5, SCALEBITS);
449 mm1 = _mm_packs_pi32(mm1, mm5);
451 mm7 = _mm_slli_pi16(mm7, BYTE_BIT);
452 mm1 = _mm_or_si64(mm1, mm7);
455 _mm_store_si64((__m64 *)&outptr0[0], Y_BG);
456 _mm_store_si64((__m64 *)&outptr1[0], Cb_RG);
457 _mm_store_si64((__m64 *)&outptr2[0], Cr_BG);