2 * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
4 * Copyright (c) 2002-2004 Ronald Bultje <rbultje@ronald.bitfreak.net>
5 * Copyright (C) 2012, 2013 Samsung Electronics Co., Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
17 * You should have received a copy of the GNU Library General Public
18 * License along with this library; if not, write to the
19 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 * Boston, MA 02111-1307, USA.
22 * * Modifications by Samsung Electronics Co., Ltd.
23 * 1. Support samsung extension format
35 #include "gstffmpegcodecmap.h"
37 GST_DEBUG_CATEGORY_EXTERN (ffmpegcolorspace_debug);
38 #define GST_CAT_DEFAULT ffmpegcolorspace_debug
41 gst_ff_vid_caps_new (AVCodecContext * context,
42 const char *mimetype, const char *fieldname, ...)
43 G_GNUC_NULL_TERMINATED;
44 static GstCaps *gst_ff_aud_caps_new (AVCodecContext * context,
45 const char *mimetype, const char *fieldname, ...) G_GNUC_NULL_TERMINATED;
48 * Read a palette from a caps.
52 gst_ffmpeg_get_palette (const GstCaps * caps, AVCodecContext * context)
54 GstStructure *str = gst_caps_get_structure (caps, 0);
55 const GValue *palette_v;
57 /* do we have a palette? */
58 if ((palette_v = gst_structure_get_value (str, "palette_data")) && context) {
59 const GstBuffer *palette;
61 palette = gst_value_get_buffer (palette_v);
62 if (palette && GST_BUFFER_SIZE (palette) >= 256 * 4) {
64 av_free (context->palctrl);
65 context->palctrl = av_malloc (sizeof (AVPaletteControl));
66 context->palctrl->palette_changed = 1;
67 memcpy (context->palctrl->palette, GST_BUFFER_DATA (palette),
74 gst_ffmpeg_set_palette (GstCaps * caps, AVCodecContext * context)
76 if (context->palctrl) {
77 GstBuffer *palette = gst_buffer_new_and_alloc (256 * 4);
79 memcpy (GST_BUFFER_DATA (palette), context->palctrl->palette,
81 gst_caps_set_simple (caps, "palette_data", GST_TYPE_BUFFER, palette, NULL);
82 gst_buffer_unref (palette);
86 /* this function creates caps with fixed or unfixed width/height
87 * properties depending on whether we've got a context.
89 * See below for why we use this.
91 * We should actually do this stuff at the end, like in riff-media.c,
92 * but I'm too lazy today. Maybe later.
96 gst_ff_vid_caps_new (AVCodecContext * context, const char *mimetype,
97 const char *fieldname, ...)
99 GstStructure *structure = NULL;
100 GstCaps *caps = NULL;
103 if (context != NULL) {
104 caps = gst_caps_new_simple (mimetype,
105 "width", G_TYPE_INT, context->width,
106 "height", G_TYPE_INT, context->height,
107 "framerate", GST_TYPE_FRACTION,
108 (gint) context->frame_rate, (gint) context->frame_rate_base, NULL);
110 caps = gst_caps_new_simple (mimetype,
111 "width", GST_TYPE_INT_RANGE, 1, G_MAXINT,
112 "height", GST_TYPE_INT_RANGE, 1, G_MAXINT,
113 "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
116 structure = gst_caps_get_structure (caps, 0);
119 va_start (var_args, fieldname);
120 gst_structure_set_valist (structure, fieldname, var_args);
127 /* same for audio - now with channels/sample rate
131 gst_ff_aud_caps_new (AVCodecContext * context, const char *mimetype,
132 const char *fieldname, ...)
134 GstCaps *caps = NULL;
135 GstStructure *structure = NULL;
138 if (context != NULL) {
139 caps = gst_caps_new_simple (mimetype,
140 "rate", G_TYPE_INT, context->sample_rate,
141 "channels", G_TYPE_INT, context->channels, NULL);
143 caps = gst_caps_new_simple (mimetype, NULL);
146 structure = gst_caps_get_structure (caps, 0);
149 va_start (var_args, fieldname);
150 gst_structure_set_valist (structure, fieldname, var_args);
157 /* Convert a FFMPEG Pixel Format and optional AVCodecContext
158 * to a GstCaps. If the context is omitted, no fixed values
159 * for video/audio size will be included in the GstCaps
161 * See below for usefulness
165 gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context)
167 GstCaps *caps = NULL;
169 int bpp = 0, depth = 0, endianness = 0;
170 gulong g_mask = 0, r_mask = 0, b_mask = 0, a_mask = 0;
174 case PIX_FMT_YUV420P:
175 fmt = GST_MAKE_FOURCC ('I', '4', '2', '0');
177 case PIX_FMT_YUVA420P:
178 fmt = GST_MAKE_FOURCC ('A', '4', '2', '0');
181 fmt = GST_MAKE_FOURCC ('N', 'V', '1', '2');
184 fmt = GST_MAKE_FOURCC ('N', 'V', '2', '1');
186 case PIX_FMT_YVU420P:
187 fmt = GST_MAKE_FOURCC ('Y', 'V', '1', '2');
190 fmt = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
192 case PIX_FMT_UYVY422:
193 fmt = GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y');
195 case PIX_FMT_YVYU422:
196 fmt = GST_MAKE_FOURCC ('Y', 'V', 'Y', 'U');
198 case PIX_FMT_UYVY411:
199 fmt = GST_MAKE_FOURCC ('I', 'Y', 'U', '1');
203 endianness = G_BIG_ENDIAN;
210 endianness = G_BIG_ENDIAN;
215 case PIX_FMT_YUV422P:
216 fmt = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
218 case PIX_FMT_YUV444P:
219 fmt = GST_MAKE_FOURCC ('Y', '4', '4', '4');
224 endianness = G_BIG_ENDIAN;
225 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
238 endianness = G_BIG_ENDIAN;
239 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
252 endianness = G_BIG_ENDIAN;
253 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
266 endianness = G_BIG_ENDIAN;
267 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
280 endianness = G_BIG_ENDIAN;
281 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
296 endianness = G_BIG_ENDIAN;
297 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
312 endianness = G_BIG_ENDIAN;
313 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
328 endianness = G_BIG_ENDIAN;
329 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
341 case PIX_FMT_YUV410P:
342 fmt = GST_MAKE_FOURCC ('Y', 'U', 'V', '9');
344 case PIX_FMT_YVU410P:
345 fmt = GST_MAKE_FOURCC ('Y', 'V', 'U', '9');
347 case PIX_FMT_YUV411P:
348 fmt = GST_MAKE_FOURCC ('Y', '4', '1', 'B');
353 caps = gst_ff_vid_caps_new (context, "video/x-raw-yuv",
354 "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('Y', '8', '0', '0'),
357 gst_ff_vid_caps_new (context, "video/x-raw-yuv", "format",
358 GST_TYPE_FOURCC, GST_MAKE_FOURCC ('Y', '8', ' ', ' '), NULL);
359 gst_caps_append (caps, tmp);
360 tmp = gst_ff_vid_caps_new (context, "video/x-raw-yuv",
361 "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('G', 'R', 'E', 'Y'),
363 gst_caps_append (caps, tmp);
367 fmt = GST_MAKE_FOURCC ('Y', '1', '6', ' ');
371 endianness = G_BYTE_ORDER;
379 endianness = G_BYTE_ORDER;
386 endianness = G_BYTE_ORDER;
389 fmt = GST_MAKE_FOURCC ('v', '3', '0', '8');
391 case PIX_FMT_AYUV4444:
392 fmt = GST_MAKE_FOURCC ('A', 'Y', 'U', 'V');
396 caps = gst_ff_vid_caps_new (context, "video/x-raw-gray",
397 "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth, NULL);
399 case PIX_FMT_GRAY16_L:
401 caps = gst_ff_vid_caps_new (context, "video/x-raw-gray",
402 "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth,
403 "endianness", G_TYPE_INT, G_LITTLE_ENDIAN, NULL);
405 case PIX_FMT_GRAY16_B:
407 caps = gst_ff_vid_caps_new (context, "video/x-raw-gray",
408 "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth,
409 "endianness", G_TYPE_INT, G_BIG_ENDIAN, NULL);
419 caps = gst_ff_vid_caps_new (context, "video/x-raw-rgb",
420 "bpp", G_TYPE_INT, bpp,
421 "depth", G_TYPE_INT, depth,
422 "red_mask", G_TYPE_INT, r_mask,
423 "green_mask", G_TYPE_INT, g_mask,
424 "blue_mask", G_TYPE_INT, b_mask,
425 "alpha_mask", G_TYPE_INT, a_mask,
426 "endianness", G_TYPE_INT, endianness, NULL);
427 } else if (r_mask != 0) {
428 caps = gst_ff_vid_caps_new (context, "video/x-raw-rgb",
429 "bpp", G_TYPE_INT, bpp,
430 "depth", G_TYPE_INT, depth,
431 "red_mask", G_TYPE_INT, r_mask,
432 "green_mask", G_TYPE_INT, g_mask,
433 "blue_mask", G_TYPE_INT, b_mask,
434 "endianness", G_TYPE_INT, endianness, NULL);
436 caps = gst_ff_vid_caps_new (context, "video/x-raw-rgb",
437 "bpp", G_TYPE_INT, bpp,
438 "depth", G_TYPE_INT, depth,
439 "endianness", G_TYPE_INT, endianness, NULL);
440 if (context && context->pix_fmt == PIX_FMT_PAL8) {
441 gst_ffmpeg_set_palette (caps, context);
445 caps = gst_ff_vid_caps_new (context, "video/x-raw-yuv",
446 "format", GST_TYPE_FOURCC, fmt, NULL);
451 GST_DEBUG ("caps for pix_fmt=%d: %" GST_PTR_FORMAT, pix_fmt, caps);
453 GST_LOG ("No caps found for pix_fmt=%d", pix_fmt);
459 /* Convert a FFMPEG Sample Format and optional AVCodecContext
460 * to a GstCaps. If the context is omitted, no fixed values
461 * for video/audio size will be included in the GstCaps
463 * See below for usefulness
467 gst_ffmpeg_smpfmt_to_caps (enum SampleFormat sample_fmt,
468 AVCodecContext * context)
470 GstCaps *caps = NULL;
473 gboolean signedness = FALSE;
475 switch (sample_fmt) {
487 caps = gst_ff_aud_caps_new (context, "audio/x-raw-int",
488 "signed", G_TYPE_BOOLEAN, signedness,
489 "endianness", G_TYPE_INT, G_BYTE_ORDER,
490 "width", G_TYPE_INT, bpp, "depth", G_TYPE_INT, bpp, NULL);
494 GST_DEBUG ("caps for sample_fmt=%d: %" GST_PTR_FORMAT, sample_fmt, caps);
496 GST_LOG ("No caps found for sample_fmt=%d", sample_fmt);
502 /* Convert a FFMPEG codec Type and optional AVCodecContext
503 * to a GstCaps. If the context is omitted, no fixed values
504 * for video/audio size will be included in the GstCaps
506 * CodecType is primarily meant for uncompressed data GstCaps!
510 gst_ffmpegcsp_codectype_to_caps (enum CodecType codec_type,
511 AVCodecContext * context)
515 switch (codec_type) {
516 case CODEC_TYPE_VIDEO:
518 caps = gst_ffmpeg_pixfmt_to_caps (context->pix_fmt,
519 context->width == -1 ? NULL : context);
524 caps = gst_caps_new_empty ();
525 for (i = 0; i < PIX_FMT_NB; i++) {
526 temp = gst_ffmpeg_pixfmt_to_caps (i, NULL);
528 gst_caps_append (caps, temp);
534 case CODEC_TYPE_AUDIO:
536 caps = gst_ffmpeg_smpfmt_to_caps (context->sample_fmt, context);
541 caps = gst_caps_new_empty ();
542 for (i = 0; i <= SAMPLE_FMT_S16; i++) {
543 temp = gst_ffmpeg_smpfmt_to_caps (i, NULL);
545 gst_caps_append (caps, temp);
560 /* Convert a GstCaps (audio/raw) to a FFMPEG SampleFmt
561 * and other audio properties in a AVCodecContext.
563 * For usefulness, see below
567 gst_ffmpeg_caps_to_smpfmt (const GstCaps * caps,
568 AVCodecContext * context, gboolean raw)
570 GstStructure *structure;
571 gint depth = 0, width = 0, endianness = 0;
572 gboolean signedness = FALSE;
574 g_return_if_fail (gst_caps_get_size (caps) == 1);
575 structure = gst_caps_get_structure (caps, 0);
577 gst_structure_get_int (structure, "channels", &context->channels);
578 gst_structure_get_int (structure, "rate", &context->sample_rate);
583 if (gst_structure_get_int (structure, "width", &width) &&
584 gst_structure_get_int (structure, "depth", &depth) &&
585 gst_structure_get_boolean (structure, "signed", &signedness) &&
586 gst_structure_get_int (structure, "endianness", &endianness)) {
587 if (width == 16 && depth == 16 &&
588 endianness == G_BYTE_ORDER && signedness == TRUE) {
589 context->sample_fmt = SAMPLE_FMT_S16;
595 /* Convert a GstCaps (video/raw) to a FFMPEG PixFmt
596 * and other video properties in a AVCodecContext.
598 * For usefulness, see below
602 gst_ffmpeg_caps_to_pixfmt (const GstCaps * caps,
603 AVCodecContext * context, gboolean raw)
605 GstStructure *structure;
609 g_return_if_fail (gst_caps_get_size (caps) == 1);
610 structure = gst_caps_get_structure (caps, 0);
612 ret = gst_structure_get_int (structure, "width", &context->width);
613 ret &= gst_structure_get_int (structure, "height", &context->height);
614 g_return_if_fail (ret == TRUE);
616 fps = gst_structure_get_value (structure, "framerate");
617 g_return_if_fail (GST_VALUE_HOLDS_FRACTION (fps));
619 /* framerate does not really matter */
620 context->frame_rate = gst_value_get_fraction_numerator (fps);
621 context->frame_rate_base = gst_value_get_fraction_denominator (fps);
626 if (gst_structure_has_name (structure, "video/x-raw-yuv")) {
629 if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
631 case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
632 case GST_MAKE_FOURCC ('S', 'U', 'Y', 'V'):
633 context->pix_fmt = PIX_FMT_YUV422;
635 case GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'):
636 case GST_MAKE_FOURCC ('S', 'Y', 'V', 'Y'):
637 case GST_MAKE_FOURCC ('I', 'T', 'L', 'V'):
638 context->pix_fmt = PIX_FMT_UYVY422;
640 case GST_MAKE_FOURCC ('Y', 'V', 'Y', 'U'):
641 context->pix_fmt = PIX_FMT_YVYU422;
643 case GST_MAKE_FOURCC ('I', 'Y', 'U', '1'):
644 context->pix_fmt = PIX_FMT_UYVY411;
646 case GST_MAKE_FOURCC ('I', '4', '2', '0'):
647 case GST_MAKE_FOURCC ('S', '4', '2', '0'):
648 context->pix_fmt = PIX_FMT_YUV420P;
650 case GST_MAKE_FOURCC ('A', '4', '2', '0'):
651 context->pix_fmt = PIX_FMT_YUVA420P;
653 case GST_MAKE_FOURCC ('N', 'V', '1', '2'):
654 context->pix_fmt = PIX_FMT_NV12;
656 case GST_MAKE_FOURCC ('N', 'V', '2', '1'):
657 context->pix_fmt = PIX_FMT_NV21;
659 case GST_MAKE_FOURCC ('Y', 'V', '1', '2'):
660 context->pix_fmt = PIX_FMT_YVU420P;
662 case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
663 context->pix_fmt = PIX_FMT_YUV411P;
665 case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
666 context->pix_fmt = PIX_FMT_YUV422P;
668 case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
669 context->pix_fmt = PIX_FMT_YUV410P;
671 case GST_MAKE_FOURCC ('Y', 'V', 'U', '9'):
672 context->pix_fmt = PIX_FMT_YVU410P;
674 case GST_MAKE_FOURCC ('v', '3', '0', '8'):
675 context->pix_fmt = PIX_FMT_V308;
677 case GST_MAKE_FOURCC ('A', 'Y', 'U', 'V'):
678 context->pix_fmt = PIX_FMT_AYUV4444;
680 case GST_MAKE_FOURCC ('Y', '4', '4', '4'):
681 context->pix_fmt = PIX_FMT_YUV444P;
683 case GST_MAKE_FOURCC ('Y', '8', '0', '0'):
684 case GST_MAKE_FOURCC ('Y', '8', ' ', ' '):
685 case GST_MAKE_FOURCC ('G', 'R', 'E', 'Y'):
686 context->pix_fmt = PIX_FMT_Y800;
688 case GST_MAKE_FOURCC ('Y', '1', '6', ' '):
689 context->pix_fmt = PIX_FMT_Y16;
693 } else if (gst_structure_has_name (structure, "video/x-raw-rgb")) {
694 gint bpp = 0, rmask = 0, endianness = 0, amask = 0, depth = 0;
696 if (gst_structure_get_int (structure, "bpp", &bpp) &&
697 gst_structure_get_int (structure, "endianness", &endianness)) {
698 if (gst_structure_get_int (structure, "red_mask", &rmask)) {
701 if (gst_structure_get_int (structure, "alpha_mask", &amask)) {
702 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
703 if (rmask == 0x0000ff00)
704 context->pix_fmt = PIX_FMT_BGRA32;
705 else if (rmask == 0x00ff0000)
706 context->pix_fmt = PIX_FMT_RGBA32;
707 else if (rmask == 0xff000000)
708 context->pix_fmt = PIX_FMT_ARGB32;
709 else // if (r_mask = 0x000000ff)
710 context->pix_fmt = PIX_FMT_ABGR32;
712 if (rmask == 0x00ff0000)
713 context->pix_fmt = PIX_FMT_BGRA32;
714 else if (rmask == 0x0000ff00)
715 context->pix_fmt = PIX_FMT_RGBA32;
716 else if (rmask == 0x000000ff)
717 context->pix_fmt = PIX_FMT_ARGB32;
718 else // if (rmask == 0xff000000)
719 context->pix_fmt = PIX_FMT_ABGR32;
722 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
723 if (rmask == 0x00ff0000)
724 context->pix_fmt = PIX_FMT_RGB32;
725 else if (rmask == 0x0000ff00)
726 context->pix_fmt = PIX_FMT_BGR32;
727 else if (rmask == 0xff000000)
728 context->pix_fmt = PIX_FMT_xRGB32;
729 else // if (rmask == 0x000000ff)
730 context->pix_fmt = PIX_FMT_BGRx32;
732 if (rmask == 0x0000ff00)
733 context->pix_fmt = PIX_FMT_RGB32;
734 else if (rmask == 0x00ff0000)
735 context->pix_fmt = PIX_FMT_BGR32;
736 else if (rmask == 0x000000ff)
737 context->pix_fmt = PIX_FMT_xRGB32;
738 else // if (rmask == 0xff000000)
739 context->pix_fmt = PIX_FMT_BGRx32;
744 if (rmask == 0x0000FF)
745 context->pix_fmt = PIX_FMT_BGR24;
747 context->pix_fmt = PIX_FMT_RGB24;
750 if (endianness == G_BYTE_ORDER) {
751 context->pix_fmt = PIX_FMT_RGB565;
752 if (gst_structure_get_int (structure, "depth", &depth)) {
754 context->pix_fmt = PIX_FMT_RGB555;
759 if (endianness == G_BYTE_ORDER)
760 context->pix_fmt = PIX_FMT_RGB555;
768 context->pix_fmt = PIX_FMT_PAL8;
769 gst_ffmpeg_get_palette (caps, context);
773 } else if (gst_structure_has_name (structure, "video/x-raw-gray")) {
776 if (gst_structure_get_int (structure, "bpp", &bpp)) {
779 context->pix_fmt = PIX_FMT_GRAY8;
784 if (gst_structure_get_int (structure, "endianness", &endianness)) {
785 if (endianness == G_LITTLE_ENDIAN)
786 context->pix_fmt = PIX_FMT_GRAY16_L;
787 else if (endianness == G_BIG_ENDIAN)
788 context->pix_fmt = PIX_FMT_GRAY16_B;
797 /* Convert a GstCaps and a FFMPEG codec Type to a
798 * AVCodecContext. If the context is omitted, no fixed values
799 * for video/audio size will be included in the context
801 * CodecType is primarily meant for uncompressed data GstCaps!
805 gst_ffmpegcsp_caps_with_codectype (enum CodecType type,
806 const GstCaps * caps, AVCodecContext * context)
812 case CODEC_TYPE_VIDEO:
813 gst_ffmpeg_caps_to_pixfmt (caps, context, TRUE);
816 case CODEC_TYPE_AUDIO:
817 gst_ffmpeg_caps_to_smpfmt (caps, context, TRUE);
826 #define GEN_MASK(x) ((1<<(x))-1)
827 #define ROUND_UP_X(v,x) (((v) + GEN_MASK(x)) & ~GEN_MASK(x))
828 #define DIV_ROUND_UP_X(v,x) (((v) + GEN_MASK(x)) >> (x))
831 * Fill in pointers to memory in a AVPicture, where
832 * everything is aligned by 4 (as required by X).
833 * This is mostly a copy from imgconvert.c with some
837 gst_ffmpegcsp_avpicture_fill (AVPicture * picture,
838 uint8_t * ptr, enum PixelFormat pix_fmt, int width, int height,
841 int size, w2, h2, size2;
845 pinfo = get_pix_fmt_info (pix_fmt);
847 picture->interlaced = interlaced;
850 case PIX_FMT_YUV420P:
851 case PIX_FMT_YUV422P:
852 case PIX_FMT_YUV444P:
853 case PIX_FMT_YUV410P:
854 case PIX_FMT_YUV411P:
855 case PIX_FMT_YUVJ420P:
856 case PIX_FMT_YUVJ422P:
857 case PIX_FMT_YUVJ444P:
858 stride = GST_ROUND_UP_4 (width);
859 h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
861 w2 = DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
862 stride2 = GST_ROUND_UP_4 (w2);
863 h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
864 size2 = stride2 * h2;
865 picture->data[0] = ptr;
866 picture->data[1] = picture->data[0] + size;
867 picture->data[2] = picture->data[1] + size2;
868 picture->linesize[0] = stride;
869 picture->linesize[1] = stride2;
870 picture->linesize[2] = stride2;
871 return size + 2 * size2;
872 /* PIX_FMT_YVU420P = YV12: same as PIX_FMT_YUV420P, but
873 * with U and V plane swapped. Strides as in videotestsrc */
874 case PIX_FMT_YUVA420P:
875 stride = GST_ROUND_UP_4 (width);
876 h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
878 w2 = DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
879 stride2 = GST_ROUND_UP_4 (w2);
880 h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
881 size2 = stride2 * h2;
882 picture->data[0] = ptr;
883 picture->data[1] = picture->data[0] + size;
884 picture->data[2] = picture->data[1] + size2;
885 picture->data[3] = picture->data[2] + size2;
886 picture->linesize[0] = stride;
887 picture->linesize[1] = stride2;
888 picture->linesize[2] = stride2;
889 picture->linesize[3] = stride;
890 return 2 * size + 2 * size2;
891 case PIX_FMT_YVU410P:
892 case PIX_FMT_YVU420P:
893 stride = GST_ROUND_UP_4 (width);
894 h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
896 w2 = DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
897 stride2 = GST_ROUND_UP_4 (w2);
898 h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
899 size2 = stride2 * h2;
900 picture->data[0] = ptr;
901 picture->data[2] = picture->data[0] + size;
902 picture->data[1] = picture->data[2] + size2;
903 picture->linesize[0] = stride;
904 picture->linesize[1] = stride2;
905 picture->linesize[2] = stride2;
906 return size + 2 * size2;
909 stride = GST_ROUND_UP_4 (width);
910 h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
912 w2 = 2 * DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
913 stride2 = GST_ROUND_UP_4 (w2);
914 h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
915 size2 = stride2 * h2;
916 picture->data[0] = ptr;
917 picture->data[1] = picture->data[0] + size;
918 picture->data[2] = NULL;
919 picture->linesize[0] = stride;
920 picture->linesize[1] = stride2;
921 picture->linesize[2] = 0;
925 stride = GST_ROUND_UP_4 (width * 3);
926 size = stride * height;
927 picture->data[0] = ptr;
928 picture->data[1] = NULL;
929 picture->data[2] = NULL;
930 picture->linesize[0] = stride;
932 case PIX_FMT_AYUV4444:
942 size = stride * height;
943 picture->data[0] = ptr;
944 picture->data[1] = NULL;
945 picture->data[2] = NULL;
946 picture->linesize[0] = stride;
951 case PIX_FMT_UYVY422:
952 case PIX_FMT_YVYU422:
953 stride = GST_ROUND_UP_4 (width * 2);
954 size = stride * height;
955 picture->data[0] = ptr;
956 picture->data[1] = NULL;
957 picture->data[2] = NULL;
958 picture->linesize[0] = stride;
961 stride = GST_ROUND_UP_4 (width * 3);
962 size = stride * height;
963 picture->data[0] = ptr;
964 picture->data[1] = NULL;
965 picture->data[2] = NULL;
966 picture->linesize[0] = stride;
968 case PIX_FMT_UYVY411:
970 GST_ROUND_UP_4 (GST_ROUND_UP_4 (width) + GST_ROUND_UP_4 (width) / 2);
971 size = stride * height;
972 picture->data[0] = ptr;
973 picture->data[1] = NULL;
974 picture->data[2] = NULL;
975 picture->linesize[0] = stride;
979 stride = GST_ROUND_UP_4 (width);
980 size = stride * height;
981 picture->data[0] = ptr;
982 picture->data[1] = NULL;
983 picture->data[2] = NULL;
984 picture->linesize[0] = stride;
987 case PIX_FMT_GRAY16_L:
988 case PIX_FMT_GRAY16_B:
989 stride = GST_ROUND_UP_4 (width * 2);
990 size = stride * height;
991 picture->data[0] = ptr;
992 picture->data[1] = NULL;
993 picture->data[2] = NULL;
994 picture->linesize[0] = stride;
996 case PIX_FMT_MONOWHITE:
997 case PIX_FMT_MONOBLACK:
998 stride = GST_ROUND_UP_4 ((width + 7) >> 3);
999 size = stride * height;
1000 picture->data[0] = ptr;
1001 picture->data[1] = NULL;
1002 picture->data[2] = NULL;
1003 picture->linesize[0] = stride;
1006 /* already forced to be with stride, so same result as other function */
1007 stride = GST_ROUND_UP_4 (width);
1008 size = stride * height;
1009 picture->data[0] = ptr;
1010 picture->data[1] = ptr + size; /* palette is stored here as 256 32 bit words */
1011 picture->data[2] = NULL;
1012 picture->linesize[0] = stride;
1013 picture->linesize[1] = 4;
1014 return size + 256 * 4;
1016 picture->data[0] = NULL;
1017 picture->data[1] = NULL;
1018 picture->data[2] = NULL;
1019 picture->data[3] = NULL;