2 * Copyright 2012 The LibYuv Project Authors. All rights reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "libyuv/convert_from_argb.h"
13 #include "libyuv/basic_types.h"
14 #include "libyuv/cpu_id.h"
15 #include "libyuv/format_conversion.h"
16 #include "libyuv/planar_functions.h"
17 #include "libyuv/row.h"
24 // ARGB little endian (bgra in memory) to I444
26 int ARGBToI444(const uint8* src_argb, int src_stride_argb,
27 uint8* dst_y, int dst_stride_y,
28 uint8* dst_u, int dst_stride_u,
29 uint8* dst_v, int dst_stride_v,
30 int width, int height) {
32 void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
34 void (*ARGBToUV444Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
35 int pix) = ARGBToUV444Row_C;
36 if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
41 src_argb = src_argb + (height - 1) * src_stride_argb;
42 src_stride_argb = -src_stride_argb;
45 if (src_stride_argb == width * 4 &&
46 dst_stride_y == width &&
47 dst_stride_u == width &&
48 dst_stride_v == width) {
51 src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
53 #if defined(HAS_ARGBTOUV444ROW_SSSE3)
54 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
55 ARGBToUV444Row = ARGBToUV444Row_Any_SSSE3;
56 if (IS_ALIGNED(width, 16)) {
57 ARGBToUV444Row = ARGBToUV444Row_Unaligned_SSSE3;
58 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
59 ARGBToUV444Row = ARGBToUV444Row_SSSE3;
64 #if defined(HAS_ARGBTOYROW_SSSE3)
65 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
66 ARGBToYRow = ARGBToYRow_Any_SSSE3;
67 if (IS_ALIGNED(width, 16)) {
68 ARGBToYRow = ARGBToYRow_Unaligned_SSSE3;
69 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16) &&
70 IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
71 ARGBToYRow = ARGBToYRow_SSSE3;
76 #elif defined(HAS_ARGBTOYROW_NEON)
77 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
78 ARGBToYRow = ARGBToYRow_Any_NEON;
79 ARGBToUV444Row = ARGBToUV444Row_Any_NEON;
80 if (IS_ALIGNED(width, 8)) {
81 ARGBToYRow = ARGBToYRow_NEON;
82 ARGBToUV444Row = ARGBToUV444Row_NEON;
87 for (y = 0; y < height; ++y) {
88 ARGBToUV444Row(src_argb, dst_u, dst_v, width);
89 ARGBToYRow(src_argb, dst_y, width);
90 src_argb += src_stride_argb;
91 dst_y += dst_stride_y;
92 dst_u += dst_stride_u;
93 dst_v += dst_stride_v;
98 // ARGB little endian (bgra in memory) to I422
100 int ARGBToI422(const uint8* src_argb, int src_stride_argb,
101 uint8* dst_y, int dst_stride_y,
102 uint8* dst_u, int dst_stride_u,
103 uint8* dst_v, int dst_stride_v,
104 int width, int height) {
106 void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
107 int pix) = ARGBToUV422Row_C;
108 void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
110 if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
115 src_argb = src_argb + (height - 1) * src_stride_argb;
116 src_stride_argb = -src_stride_argb;
119 if (src_stride_argb == width * 4 &&
120 dst_stride_y == width &&
121 dst_stride_u * 2 == width &&
122 dst_stride_v * 2 == width) {
125 src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
127 #if defined(HAS_ARGBTOUV422ROW_SSSE3)
128 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
129 ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3;
130 if (IS_ALIGNED(width, 16)) {
131 ARGBToUV422Row = ARGBToUV422Row_Unaligned_SSSE3;
132 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
133 ARGBToUV422Row = ARGBToUV422Row_SSSE3;
139 #if defined(HAS_ARGBTOYROW_SSSE3)
140 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
141 ARGBToYRow = ARGBToYRow_Any_SSSE3;
142 if (IS_ALIGNED(width, 16)) {
143 ARGBToYRow = ARGBToYRow_Unaligned_SSSE3;
144 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16) &&
145 IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
146 ARGBToYRow = ARGBToYRow_SSSE3;
150 #elif defined(HAS_ARGBTOYROW_NEON)
151 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
152 ARGBToYRow = ARGBToYRow_Any_NEON;
153 if (IS_ALIGNED(width, 8)) {
154 ARGBToYRow = ARGBToYRow_NEON;
157 ARGBToUV422Row = ARGBToUV422Row_Any_NEON;
158 if (IS_ALIGNED(width, 16)) {
159 ARGBToUV422Row = ARGBToUV422Row_NEON;
165 for (y = 0; y < height; ++y) {
166 ARGBToUV422Row(src_argb, dst_u, dst_v, width);
167 ARGBToYRow(src_argb, dst_y, width);
168 src_argb += src_stride_argb;
169 dst_y += dst_stride_y;
170 dst_u += dst_stride_u;
171 dst_v += dst_stride_v;
176 // ARGB little endian (bgra in memory) to I411
178 int ARGBToI411(const uint8* src_argb, int src_stride_argb,
179 uint8* dst_y, int dst_stride_y,
180 uint8* dst_u, int dst_stride_u,
181 uint8* dst_v, int dst_stride_v,
182 int width, int height) {
184 void (*ARGBToUV411Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
185 int pix) = ARGBToUV411Row_C;
186 void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
188 if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
193 src_argb = src_argb + (height - 1) * src_stride_argb;
194 src_stride_argb = -src_stride_argb;
197 if (src_stride_argb == width * 4 &&
198 dst_stride_y == width &&
199 dst_stride_u * 4 == width &&
200 dst_stride_v * 4 == width) {
203 src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
205 #if defined(HAS_ARGBTOYROW_SSSE3)
206 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
207 ARGBToYRow = ARGBToYRow_Any_SSSE3;
208 if (IS_ALIGNED(width, 16)) {
209 ARGBToYRow = ARGBToYRow_Unaligned_SSSE3;
210 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16) &&
211 IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
212 ARGBToYRow = ARGBToYRow_SSSE3;
217 #if defined(HAS_ARGBTOYROW_AVX2)
218 if (TestCpuFlag(kCpuHasAVX2) && width >= 32) {
219 ARGBToYRow = ARGBToYRow_Any_AVX2;
220 if (IS_ALIGNED(width, 32)) {
221 ARGBToYRow = ARGBToYRow_AVX2;
225 #if defined(HAS_ARGBTOYROW_NEON)
226 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
227 ARGBToYRow = ARGBToYRow_Any_NEON;
228 if (IS_ALIGNED(width, 8)) {
229 ARGBToYRow = ARGBToYRow_NEON;
232 ARGBToUV411Row = ARGBToUV411Row_Any_NEON;
233 if (IS_ALIGNED(width, 32)) {
234 ARGBToUV411Row = ARGBToUV411Row_NEON;
240 for (y = 0; y < height; ++y) {
241 ARGBToUV411Row(src_argb, dst_u, dst_v, width);
242 ARGBToYRow(src_argb, dst_y, width);
243 src_argb += src_stride_argb;
244 dst_y += dst_stride_y;
245 dst_u += dst_stride_u;
246 dst_v += dst_stride_v;
252 int ARGBToNV12(const uint8* src_argb, int src_stride_argb,
253 uint8* dst_y, int dst_stride_y,
254 uint8* dst_uv, int dst_stride_uv,
255 int width, int height) {
257 int halfwidth = (width + 1) >> 1;
258 void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
259 uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
260 void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
262 void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
263 int width) = MergeUVRow_C;
264 // Allocate a rows of uv.
265 align_buffer_64(row_u, ((halfwidth + 15) & ~15) * 2);
266 uint8* row_v = row_u + ((halfwidth + 15) & ~15);
269 width <= 0 || height == 0) {
272 // Negative height means invert the image.
275 src_argb = src_argb + (height - 1) * src_stride_argb;
276 src_stride_argb = -src_stride_argb;
278 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
279 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
280 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
281 ARGBToYRow = ARGBToYRow_Any_SSSE3;
282 if (IS_ALIGNED(width, 16)) {
283 ARGBToUVRow = ARGBToUVRow_Unaligned_SSSE3;
284 ARGBToYRow = ARGBToYRow_Unaligned_SSSE3;
285 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
286 ARGBToUVRow = ARGBToUVRow_SSSE3;
287 if (IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
288 ARGBToYRow = ARGBToYRow_SSSE3;
293 #elif defined(HAS_ARGBTOYROW_NEON)
294 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
295 ARGBToYRow = ARGBToYRow_Any_NEON;
296 if (IS_ALIGNED(width, 8)) {
297 ARGBToYRow = ARGBToYRow_NEON;
300 ARGBToUVRow = ARGBToUVRow_Any_NEON;
301 if (IS_ALIGNED(width, 16)) {
302 ARGBToUVRow = ARGBToUVRow_NEON;
307 #if defined(HAS_MERGEUVROW_SSE2)
308 if (TestCpuFlag(kCpuHasSSE2) && halfwidth >= 16) {
309 MergeUVRow_ = MergeUVRow_Any_SSE2;
310 if (IS_ALIGNED(halfwidth, 16)) {
311 MergeUVRow_ = MergeUVRow_Unaligned_SSE2;
312 if (IS_ALIGNED(dst_uv, 16) && IS_ALIGNED(dst_stride_uv, 16)) {
313 MergeUVRow_ = MergeUVRow_SSE2;
318 #if defined(HAS_MERGEUVROW_AVX2)
319 if (TestCpuFlag(kCpuHasAVX2) && halfwidth >= 32) {
320 MergeUVRow_ = MergeUVRow_Any_AVX2;
321 if (IS_ALIGNED(halfwidth, 32)) {
322 MergeUVRow_ = MergeUVRow_AVX2;
326 #if defined(HAS_MERGEUVROW_NEON)
327 if (TestCpuFlag(kCpuHasNEON) && halfwidth >= 16) {
328 MergeUVRow_ = MergeUVRow_Any_NEON;
329 if (IS_ALIGNED(halfwidth, 16)) {
330 MergeUVRow_ = MergeUVRow_NEON;
335 for (y = 0; y < height - 1; y += 2) {
336 ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
337 MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
338 ARGBToYRow(src_argb, dst_y, width);
339 ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
340 src_argb += src_stride_argb * 2;
341 dst_y += dst_stride_y * 2;
342 dst_uv += dst_stride_uv;
345 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
346 MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
347 ARGBToYRow(src_argb, dst_y, width);
349 free_aligned_buffer_64(row_u);
353 // Same as NV12 but U and V swapped.
355 int ARGBToNV21(const uint8* src_argb, int src_stride_argb,
356 uint8* dst_y, int dst_stride_y,
357 uint8* dst_uv, int dst_stride_uv,
358 int width, int height) {
360 int halfwidth = (width + 1) >> 1;
361 void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
362 uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
363 void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
365 void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
366 int width) = MergeUVRow_C;
367 // Allocate a rows of uv.
368 align_buffer_64(row_u, ((halfwidth + 15) & ~15) * 2);
369 uint8* row_v = row_u + ((halfwidth + 15) & ~15);
372 width <= 0 || height == 0) {
375 // Negative height means invert the image.
378 src_argb = src_argb + (height - 1) * src_stride_argb;
379 src_stride_argb = -src_stride_argb;
381 #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
382 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
383 ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
384 ARGBToYRow = ARGBToYRow_Any_SSSE3;
385 if (IS_ALIGNED(width, 16)) {
386 ARGBToUVRow = ARGBToUVRow_Unaligned_SSSE3;
387 ARGBToYRow = ARGBToYRow_Unaligned_SSSE3;
388 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
389 ARGBToUVRow = ARGBToUVRow_SSSE3;
390 if (IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
391 ARGBToYRow = ARGBToYRow_SSSE3;
396 #elif defined(HAS_ARGBTOYROW_NEON)
397 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
398 ARGBToYRow = ARGBToYRow_Any_NEON;
399 if (IS_ALIGNED(width, 8)) {
400 ARGBToYRow = ARGBToYRow_NEON;
403 ARGBToUVRow = ARGBToUVRow_Any_NEON;
404 if (IS_ALIGNED(width, 16)) {
405 ARGBToUVRow = ARGBToUVRow_NEON;
410 #if defined(HAS_MERGEUVROW_SSE2)
411 if (TestCpuFlag(kCpuHasSSE2) && halfwidth >= 16) {
412 MergeUVRow_ = MergeUVRow_Any_SSE2;
413 if (IS_ALIGNED(halfwidth, 16)) {
414 MergeUVRow_ = MergeUVRow_Unaligned_SSE2;
415 if (IS_ALIGNED(dst_uv, 16) && IS_ALIGNED(dst_stride_uv, 16)) {
416 MergeUVRow_ = MergeUVRow_SSE2;
421 #if defined(HAS_MERGEUVROW_AVX2)
422 if (TestCpuFlag(kCpuHasAVX2) && halfwidth >= 32) {
423 MergeUVRow_ = MergeUVRow_Any_AVX2;
424 if (IS_ALIGNED(halfwidth, 32)) {
425 MergeUVRow_ = MergeUVRow_AVX2;
429 #if defined(HAS_MERGEUVROW_NEON)
430 if (TestCpuFlag(kCpuHasNEON) && halfwidth >= 16) {
431 MergeUVRow_ = MergeUVRow_Any_NEON;
432 if (IS_ALIGNED(halfwidth, 16)) {
433 MergeUVRow_ = MergeUVRow_NEON;
438 for (y = 0; y < height - 1; y += 2) {
439 ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
440 MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
441 ARGBToYRow(src_argb, dst_y, width);
442 ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
443 src_argb += src_stride_argb * 2;
444 dst_y += dst_stride_y * 2;
445 dst_uv += dst_stride_uv;
448 ARGBToUVRow(src_argb, 0, row_u, row_v, width);
449 MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
450 ARGBToYRow(src_argb, dst_y, width);
452 free_aligned_buffer_64(row_u);
456 // Convert ARGB to YUY2.
458 int ARGBToYUY2(const uint8* src_argb, int src_stride_argb,
459 uint8* dst_yuy2, int dst_stride_yuy2,
460 int width, int height) {
462 void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
463 int pix) = ARGBToUV422Row_C;
464 void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
466 void (*I422ToYUY2Row)(const uint8* src_y, const uint8* src_u,
467 const uint8* src_v, uint8* dst_yuy2, int width) = I422ToYUY2Row_C;
469 if (!src_argb || !dst_yuy2 ||
470 width <= 0 || height == 0) {
473 // Negative height means invert the image.
476 dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
477 dst_stride_yuy2 = -dst_stride_yuy2;
480 if (src_stride_argb == width * 4 &&
481 dst_stride_yuy2 == width * 2) {
484 src_stride_argb = dst_stride_yuy2 = 0;
486 #if defined(HAS_ARGBTOUV422ROW_SSSE3)
487 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
488 ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3;
489 if (IS_ALIGNED(width, 16)) {
490 ARGBToUV422Row = ARGBToUV422Row_Unaligned_SSSE3;
491 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
492 ARGBToUV422Row = ARGBToUV422Row_SSSE3;
497 #if defined(HAS_ARGBTOYROW_SSSE3)
498 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
499 ARGBToYRow = ARGBToYRow_Any_SSSE3;
500 if (IS_ALIGNED(width, 16)) {
501 ARGBToYRow = ARGBToYRow_Unaligned_SSSE3;
502 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
503 ARGBToYRow = ARGBToYRow_SSSE3;
507 #elif defined(HAS_ARGBTOYROW_NEON)
508 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
509 ARGBToYRow = ARGBToYRow_Any_NEON;
510 if (IS_ALIGNED(width, 8)) {
511 ARGBToYRow = ARGBToYRow_NEON;
514 ARGBToUV422Row = ARGBToUV422Row_Any_NEON;
515 if (IS_ALIGNED(width, 16)) {
516 ARGBToUV422Row = ARGBToUV422Row_NEON;
522 #if defined(HAS_I422TOYUY2ROW_SSE2)
523 if (TestCpuFlag(kCpuHasSSE2) && width >= 16) {
524 I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
525 if (IS_ALIGNED(width, 16)) {
526 I422ToYUY2Row = I422ToYUY2Row_SSE2;
529 #elif defined(HAS_I422TOYUY2ROW_NEON)
530 if (TestCpuFlag(kCpuHasNEON) && width >= 16) {
531 I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
532 if (IS_ALIGNED(width, 16)) {
533 I422ToYUY2Row = I422ToYUY2Row_NEON;
539 // Allocate a rows of yuv.
540 align_buffer_64(row_y, ((width + 63) & ~63) * 2);
541 uint8* row_u = row_y + ((width + 63) & ~63);
542 uint8* row_v = row_u + ((width + 63) & ~63) / 2;
544 for (y = 0; y < height; ++y) {
545 ARGBToUV422Row(src_argb, row_u, row_v, width);
546 ARGBToYRow(src_argb, row_y, width);
547 I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
548 src_argb += src_stride_argb;
549 dst_yuy2 += dst_stride_yuy2;
552 free_aligned_buffer_64(row_y);
557 // Convert ARGB to UYVY.
559 int ARGBToUYVY(const uint8* src_argb, int src_stride_argb,
560 uint8* dst_uyvy, int dst_stride_uyvy,
561 int width, int height) {
563 void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
564 int pix) = ARGBToUV422Row_C;
565 void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
567 void (*I422ToUYVYRow)(const uint8* src_y, const uint8* src_u,
568 const uint8* src_v, uint8* dst_uyvy, int width) = I422ToUYVYRow_C;
570 if (!src_argb || !dst_uyvy ||
571 width <= 0 || height == 0) {
574 // Negative height means invert the image.
577 dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
578 dst_stride_uyvy = -dst_stride_uyvy;
581 if (src_stride_argb == width * 4 &&
582 dst_stride_uyvy == width * 2) {
585 src_stride_argb = dst_stride_uyvy = 0;
587 #if defined(HAS_ARGBTOUV422ROW_SSSE3)
588 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
589 ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3;
590 if (IS_ALIGNED(width, 16)) {
591 ARGBToUV422Row = ARGBToUV422Row_Unaligned_SSSE3;
592 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
593 ARGBToUV422Row = ARGBToUV422Row_SSSE3;
598 #if defined(HAS_ARGBTOYROW_SSSE3)
599 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
600 ARGBToYRow = ARGBToYRow_Any_SSSE3;
601 if (IS_ALIGNED(width, 16)) {
602 ARGBToYRow = ARGBToYRow_Unaligned_SSSE3;
603 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
604 ARGBToYRow = ARGBToYRow_SSSE3;
608 #elif defined(HAS_ARGBTOYROW_NEON)
609 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
610 ARGBToYRow = ARGBToYRow_Any_NEON;
611 if (IS_ALIGNED(width, 8)) {
612 ARGBToYRow = ARGBToYRow_NEON;
615 ARGBToUV422Row = ARGBToUV422Row_Any_NEON;
616 if (IS_ALIGNED(width, 16)) {
617 ARGBToUV422Row = ARGBToUV422Row_NEON;
623 #if defined(HAS_I422TOUYVYROW_SSE2)
624 if (TestCpuFlag(kCpuHasSSE2) && width >= 16) {
625 I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
626 if (IS_ALIGNED(width, 16)) {
627 I422ToUYVYRow = I422ToUYVYRow_SSE2;
630 #elif defined(HAS_I422TOUYVYROW_NEON)
631 if (TestCpuFlag(kCpuHasNEON) && width >= 16) {
632 I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
633 if (IS_ALIGNED(width, 16)) {
634 I422ToUYVYRow = I422ToUYVYRow_NEON;
640 // Allocate a rows of yuv.
641 align_buffer_64(row_y, ((width + 63) & ~63) * 2);
642 uint8* row_u = row_y + ((width + 63) & ~63);
643 uint8* row_v = row_u + ((width + 63) & ~63) / 2;
645 for (y = 0; y < height; ++y) {
646 ARGBToUV422Row(src_argb, row_u, row_v, width);
647 ARGBToYRow(src_argb, row_y, width);
648 I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
649 src_argb += src_stride_argb;
650 dst_uyvy += dst_stride_uyvy;
653 free_aligned_buffer_64(row_y);
658 // Convert ARGB to I400.
660 int ARGBToI400(const uint8* src_argb, int src_stride_argb,
661 uint8* dst_y, int dst_stride_y,
662 int width, int height) {
664 void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
666 if (!src_argb || !dst_y || width <= 0 || height == 0) {
671 src_argb = src_argb + (height - 1) * src_stride_argb;
672 src_stride_argb = -src_stride_argb;
675 if (src_stride_argb == width * 4 &&
676 dst_stride_y == width) {
679 src_stride_argb = dst_stride_y = 0;
681 #if defined(HAS_ARGBTOYROW_SSSE3)
682 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
683 ARGBToYRow = ARGBToYRow_Any_SSSE3;
684 if (IS_ALIGNED(width, 16)) {
685 ARGBToYRow = ARGBToYRow_Unaligned_SSSE3;
686 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16) &&
687 IS_ALIGNED(dst_y, 16) && IS_ALIGNED(dst_stride_y, 16)) {
688 ARGBToYRow = ARGBToYRow_SSSE3;
693 #if defined(HAS_ARGBTOYROW_AVX2)
694 if (TestCpuFlag(kCpuHasAVX2) && width >= 32) {
695 ARGBToYRow = ARGBToYRow_Any_AVX2;
696 if (IS_ALIGNED(width, 32)) {
697 ARGBToYRow = ARGBToYRow_AVX2;
701 #if defined(HAS_ARGBTOYROW_NEON)
702 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
703 ARGBToYRow = ARGBToYRow_Any_NEON;
704 if (IS_ALIGNED(width, 8)) {
705 ARGBToYRow = ARGBToYRow_NEON;
710 for (y = 0; y < height; ++y) {
711 ARGBToYRow(src_argb, dst_y, width);
712 src_argb += src_stride_argb;
713 dst_y += dst_stride_y;
718 // Shuffle table for converting ARGB to RGBA.
719 static uvec8 kShuffleMaskARGBToRGBA = {
720 3u, 0u, 1u, 2u, 7u, 4u, 5u, 6u, 11u, 8u, 9u, 10u, 15u, 12u, 13u, 14u
723 // Convert ARGB to RGBA.
725 int ARGBToRGBA(const uint8* src_argb, int src_stride_argb,
726 uint8* dst_rgba, int dst_stride_rgba,
727 int width, int height) {
728 return ARGBShuffle(src_argb, src_stride_argb,
729 dst_rgba, dst_stride_rgba,
730 (const uint8*)(&kShuffleMaskARGBToRGBA),
734 // Convert ARGB To RGB24.
736 int ARGBToRGB24(const uint8* src_argb, int src_stride_argb,
737 uint8* dst_rgb24, int dst_stride_rgb24,
738 int width, int height) {
740 void (*ARGBToRGB24Row)(const uint8* src_argb, uint8* dst_rgb, int pix) =
742 if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) {
747 src_argb = src_argb + (height - 1) * src_stride_argb;
748 src_stride_argb = -src_stride_argb;
751 if (src_stride_argb == width * 4 &&
752 dst_stride_rgb24 == width * 3) {
755 src_stride_argb = dst_stride_rgb24 = 0;
757 #if defined(HAS_ARGBTORGB24ROW_SSSE3)
758 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
759 ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3;
760 if (IS_ALIGNED(width, 16)) {
761 ARGBToRGB24Row = ARGBToRGB24Row_SSSE3;
764 #elif defined(HAS_ARGBTORGB24ROW_NEON)
765 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
766 ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON;
767 if (IS_ALIGNED(width, 8)) {
768 ARGBToRGB24Row = ARGBToRGB24Row_NEON;
773 for (y = 0; y < height; ++y) {
774 ARGBToRGB24Row(src_argb, dst_rgb24, width);
775 src_argb += src_stride_argb;
776 dst_rgb24 += dst_stride_rgb24;
781 // Convert ARGB To RAW.
783 int ARGBToRAW(const uint8* src_argb, int src_stride_argb,
784 uint8* dst_raw, int dst_stride_raw,
785 int width, int height) {
787 void (*ARGBToRAWRow)(const uint8* src_argb, uint8* dst_rgb, int pix) =
789 if (!src_argb || !dst_raw || width <= 0 || height == 0) {
794 src_argb = src_argb + (height - 1) * src_stride_argb;
795 src_stride_argb = -src_stride_argb;
798 if (src_stride_argb == width * 4 &&
799 dst_stride_raw == width * 3) {
802 src_stride_argb = dst_stride_raw = 0;
804 #if defined(HAS_ARGBTORAWROW_SSSE3)
805 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
806 ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3;
807 if (IS_ALIGNED(width, 16)) {
808 ARGBToRAWRow = ARGBToRAWRow_SSSE3;
811 #elif defined(HAS_ARGBTORAWROW_NEON)
812 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
813 ARGBToRAWRow = ARGBToRAWRow_Any_NEON;
814 if (IS_ALIGNED(width, 8)) {
815 ARGBToRAWRow = ARGBToRAWRow_NEON;
820 for (y = 0; y < height; ++y) {
821 ARGBToRAWRow(src_argb, dst_raw, width);
822 src_argb += src_stride_argb;
823 dst_raw += dst_stride_raw;
828 // Convert ARGB To RGB565.
830 int ARGBToRGB565(const uint8* src_argb, int src_stride_argb,
831 uint8* dst_rgb565, int dst_stride_rgb565,
832 int width, int height) {
834 void (*ARGBToRGB565Row)(const uint8* src_argb, uint8* dst_rgb, int pix) =
836 if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
841 src_argb = src_argb + (height - 1) * src_stride_argb;
842 src_stride_argb = -src_stride_argb;
845 if (src_stride_argb == width * 4 &&
846 dst_stride_rgb565 == width * 2) {
849 src_stride_argb = dst_stride_rgb565 = 0;
851 #if defined(HAS_ARGBTORGB565ROW_SSE2)
852 if (TestCpuFlag(kCpuHasSSE2) && width >= 4 &&
853 IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
854 ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2;
855 if (IS_ALIGNED(width, 4)) {
856 ARGBToRGB565Row = ARGBToRGB565Row_SSE2;
859 #elif defined(HAS_ARGBTORGB565ROW_NEON)
860 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
861 ARGBToRGB565Row = ARGBToRGB565Row_Any_NEON;
862 if (IS_ALIGNED(width, 8)) {
863 ARGBToRGB565Row = ARGBToRGB565Row_NEON;
868 for (y = 0; y < height; ++y) {
869 ARGBToRGB565Row(src_argb, dst_rgb565, width);
870 src_argb += src_stride_argb;
871 dst_rgb565 += dst_stride_rgb565;
876 // Convert ARGB To ARGB1555.
878 int ARGBToARGB1555(const uint8* src_argb, int src_stride_argb,
879 uint8* dst_argb1555, int dst_stride_argb1555,
880 int width, int height) {
882 void (*ARGBToARGB1555Row)(const uint8* src_argb, uint8* dst_rgb, int pix) =
884 if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) {
889 src_argb = src_argb + (height - 1) * src_stride_argb;
890 src_stride_argb = -src_stride_argb;
893 if (src_stride_argb == width * 4 &&
894 dst_stride_argb1555 == width * 2) {
897 src_stride_argb = dst_stride_argb1555 = 0;
899 #if defined(HAS_ARGBTOARGB1555ROW_SSE2)
900 if (TestCpuFlag(kCpuHasSSE2) && width >= 4 &&
901 IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
902 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_SSE2;
903 if (IS_ALIGNED(width, 4)) {
904 ARGBToARGB1555Row = ARGBToARGB1555Row_SSE2;
907 #elif defined(HAS_ARGBTOARGB1555ROW_NEON)
908 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
909 ARGBToARGB1555Row = ARGBToARGB1555Row_Any_NEON;
910 if (IS_ALIGNED(width, 8)) {
911 ARGBToARGB1555Row = ARGBToARGB1555Row_NEON;
916 for (y = 0; y < height; ++y) {
917 ARGBToARGB1555Row(src_argb, dst_argb1555, width);
918 src_argb += src_stride_argb;
919 dst_argb1555 += dst_stride_argb1555;
924 // Convert ARGB To ARGB4444.
926 int ARGBToARGB4444(const uint8* src_argb, int src_stride_argb,
927 uint8* dst_argb4444, int dst_stride_argb4444,
928 int width, int height) {
930 void (*ARGBToARGB4444Row)(const uint8* src_argb, uint8* dst_rgb, int pix) =
932 if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) {
937 src_argb = src_argb + (height - 1) * src_stride_argb;
938 src_stride_argb = -src_stride_argb;
941 if (src_stride_argb == width * 4 &&
942 dst_stride_argb4444 == width * 2) {
945 src_stride_argb = dst_stride_argb4444 = 0;
947 #if defined(HAS_ARGBTOARGB4444ROW_SSE2)
948 if (TestCpuFlag(kCpuHasSSE2) && width >= 4 &&
949 IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
950 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_SSE2;
951 if (IS_ALIGNED(width, 4)) {
952 ARGBToARGB4444Row = ARGBToARGB4444Row_SSE2;
955 #elif defined(HAS_ARGBTOARGB4444ROW_NEON)
956 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
957 ARGBToARGB4444Row = ARGBToARGB4444Row_Any_NEON;
958 if (IS_ALIGNED(width, 8)) {
959 ARGBToARGB4444Row = ARGBToARGB4444Row_NEON;
964 for (y = 0; y < height; ++y) {
965 ARGBToARGB4444Row(src_argb, dst_argb4444, width);
966 src_argb += src_stride_argb;
967 dst_argb4444 += dst_stride_argb4444;
972 // Convert ARGB to J420. (JPeg full range I420).
974 int ARGBToJ420(const uint8* src_argb, int src_stride_argb,
975 uint8* dst_yj, int dst_stride_yj,
976 uint8* dst_u, int dst_stride_u,
977 uint8* dst_v, int dst_stride_v,
978 int width, int height) {
980 void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb,
981 uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C;
982 void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int pix) =
985 !dst_yj || !dst_u || !dst_v ||
986 width <= 0 || height == 0) {
989 // Negative height means invert the image.
992 src_argb = src_argb + (height - 1) * src_stride_argb;
993 src_stride_argb = -src_stride_argb;
995 #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
996 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
997 ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
998 ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
999 if (IS_ALIGNED(width, 16)) {
1000 ARGBToUVJRow = ARGBToUVJRow_Unaligned_SSSE3;
1001 ARGBToYJRow = ARGBToYJRow_Unaligned_SSSE3;
1002 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16)) {
1003 ARGBToUVJRow = ARGBToUVJRow_SSSE3;
1004 if (IS_ALIGNED(dst_yj, 16) && IS_ALIGNED(dst_stride_yj, 16)) {
1005 ARGBToYJRow = ARGBToYJRow_SSSE3;
1011 #if defined(HAS_ARGBTOYJROW_AVX2) && defined(HAS_ARGBTOUVJROW_AVX2)
1012 if (TestCpuFlag(kCpuHasAVX2) && width >= 32) {
1013 ARGBToYJRow = ARGBToYJRow_Any_AVX2;
1014 if (IS_ALIGNED(width, 32)) {
1015 ARGBToYJRow = ARGBToYJRow_AVX2;
1019 #if defined(HAS_ARGBTOYJROW_NEON)
1020 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
1021 ARGBToYJRow = ARGBToYJRow_Any_NEON;
1022 if (IS_ALIGNED(width, 8)) {
1023 ARGBToYJRow = ARGBToYJRow_NEON;
1026 ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
1027 if (IS_ALIGNED(width, 16)) {
1028 ARGBToUVJRow = ARGBToUVJRow_NEON;
1034 for (y = 0; y < height - 1; y += 2) {
1035 ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width);
1036 ARGBToYJRow(src_argb, dst_yj, width);
1037 ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width);
1038 src_argb += src_stride_argb * 2;
1039 dst_yj += dst_stride_yj * 2;
1040 dst_u += dst_stride_u;
1041 dst_v += dst_stride_v;
1044 ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
1045 ARGBToYJRow(src_argb, dst_yj, width);
1050 // Convert ARGB to J400.
1052 int ARGBToJ400(const uint8* src_argb, int src_stride_argb,
1053 uint8* dst_yj, int dst_stride_yj,
1054 int width, int height) {
1056 void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int pix) =
1058 if (!src_argb || !dst_yj || width <= 0 || height == 0) {
1063 src_argb = src_argb + (height - 1) * src_stride_argb;
1064 src_stride_argb = -src_stride_argb;
1067 if (src_stride_argb == width * 4 &&
1068 dst_stride_yj == width) {
1071 src_stride_argb = dst_stride_yj = 0;
1073 #if defined(HAS_ARGBTOYJROW_SSSE3)
1074 if (TestCpuFlag(kCpuHasSSSE3) && width >= 16) {
1075 ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
1076 if (IS_ALIGNED(width, 16)) {
1077 ARGBToYJRow = ARGBToYJRow_Unaligned_SSSE3;
1078 if (IS_ALIGNED(src_argb, 16) && IS_ALIGNED(src_stride_argb, 16) &&
1079 IS_ALIGNED(dst_yj, 16) && IS_ALIGNED(dst_stride_yj, 16)) {
1080 ARGBToYJRow = ARGBToYJRow_SSSE3;
1085 #if defined(HAS_ARGBTOYJROW_AVX2)
1086 if (TestCpuFlag(kCpuHasAVX2) && width >= 32) {
1087 ARGBToYJRow = ARGBToYJRow_Any_AVX2;
1088 if (IS_ALIGNED(width, 32)) {
1089 ARGBToYJRow = ARGBToYJRow_AVX2;
1093 #if defined(HAS_ARGBTOYJROW_NEON)
1094 if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
1095 ARGBToYJRow = ARGBToYJRow_Any_NEON;
1096 if (IS_ALIGNED(width, 8)) {
1097 ARGBToYJRow = ARGBToYJRow_NEON;
1102 for (y = 0; y < height; ++y) {
1103 ARGBToYJRow(src_argb, dst_yj, width);
1104 src_argb += src_stride_argb;
1105 dst_yj += dst_stride_yj;
1112 } // namespace libyuv