2 * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
4 * Copyright (c) 2002-2004 Ronald Bultje <rbultje@ronald.bitfreak.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 02111-1307, USA.
31 #include "gstffmpegcodecmap.h"
33 GST_DEBUG_CATEGORY_EXTERN (ffmpegcolorspace_debug);
34 #define GST_CAT_DEFAULT ffmpegcolorspace_debug
37 gst_ff_vid_caps_new (AVCodecContext * context,
38 const char *mimetype, const char *fieldname, ...)
39 G_GNUC_NULL_TERMINATED;
40 static GstCaps *gst_ff_aud_caps_new (AVCodecContext * context,
41 const char *mimetype, const char *fieldname, ...) G_GNUC_NULL_TERMINATED;
44 * Read a palette from a caps.
48 gst_ffmpeg_get_palette (const GstCaps * caps, AVCodecContext * context)
50 GstStructure *str = gst_caps_get_structure (caps, 0);
51 const GValue *palette_v;
53 /* do we have a palette? */
54 if ((palette_v = gst_structure_get_value (str, "palette_data")) && context) {
55 const GstBuffer *palette;
57 palette = gst_value_get_buffer (palette_v);
58 if (palette && GST_BUFFER_SIZE (palette) >= 256 * 4) {
60 av_free (context->palctrl);
61 context->palctrl = av_malloc (sizeof (AVPaletteControl));
62 context->palctrl->palette_changed = 1;
63 memcpy (context->palctrl->palette, GST_BUFFER_DATA (palette),
70 gst_ffmpeg_set_palette (GstCaps * caps, AVCodecContext * context)
72 if (context->palctrl) {
73 GstBuffer *palette = gst_buffer_new_and_alloc (256 * 4);
75 memcpy (GST_BUFFER_DATA (palette), context->palctrl->palette,
77 gst_caps_set_simple (caps, "palette_data", GST_TYPE_BUFFER, palette, NULL);
78 gst_buffer_unref (palette);
82 /* this function creates caps with fixed or unfixed width/height
83 * properties depending on whether we've got a context.
85 * See below for why we use this.
87 * We should actually do this stuff at the end, like in riff-media.c,
88 * but I'm too lazy today. Maybe later.
92 gst_ff_vid_caps_new (AVCodecContext * context, const char *mimetype,
93 const char *fieldname, ...)
95 GstStructure *structure = NULL;
99 if (context != NULL) {
100 caps = gst_caps_new_simple (mimetype,
101 "width", G_TYPE_INT, context->width,
102 "height", G_TYPE_INT, context->height,
103 "framerate", GST_TYPE_FRACTION,
104 (gint) context->frame_rate, (gint) context->frame_rate_base, NULL);
106 caps = gst_caps_new_simple (mimetype,
107 "width", GST_TYPE_INT_RANGE, 1, G_MAXINT,
108 "height", GST_TYPE_INT_RANGE, 1, G_MAXINT,
109 "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
112 structure = gst_caps_get_structure (caps, 0);
115 va_start (var_args, fieldname);
116 gst_structure_set_valist (structure, fieldname, var_args);
123 /* same for audio - now with channels/sample rate
127 gst_ff_aud_caps_new (AVCodecContext * context, const char *mimetype,
128 const char *fieldname, ...)
130 GstCaps *caps = NULL;
131 GstStructure *structure = NULL;
134 if (context != NULL) {
135 caps = gst_caps_new_simple (mimetype,
136 "rate", G_TYPE_INT, context->sample_rate,
137 "channels", G_TYPE_INT, context->channels, NULL);
139 caps = gst_caps_new_simple (mimetype, NULL);
142 structure = gst_caps_get_structure (caps, 0);
145 va_start (var_args, fieldname);
146 gst_structure_set_valist (structure, fieldname, var_args);
153 /* Convert a FFMPEG Pixel Format and optional AVCodecContext
154 * to a GstCaps. If the context is ommitted, no fixed values
155 * for video/audio size will be included in the GstCaps
157 * See below for usefulness
161 gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context)
163 GstCaps *caps = NULL;
165 int bpp = 0, depth = 0, endianness = 0;
166 gulong g_mask = 0, r_mask = 0, b_mask = 0, a_mask = 0;
170 case PIX_FMT_YUV420P:
171 fmt = GST_MAKE_FOURCC ('I', '4', '2', '0');
173 case PIX_FMT_YUVA420P:
174 fmt = GST_MAKE_FOURCC ('A', '4', '2', '0');
177 fmt = GST_MAKE_FOURCC ('N', 'V', '1', '2');
180 fmt = GST_MAKE_FOURCC ('N', 'V', '2', '1');
182 case PIX_FMT_YVU420P:
183 fmt = GST_MAKE_FOURCC ('Y', 'V', '1', '2');
186 fmt = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
188 case PIX_FMT_UYVY422:
189 fmt = GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y');
191 case PIX_FMT_YVYU422:
192 fmt = GST_MAKE_FOURCC ('Y', 'V', 'Y', 'U');
194 case PIX_FMT_UYVY411:
195 fmt = GST_MAKE_FOURCC ('I', 'Y', 'U', '1');
199 endianness = G_BIG_ENDIAN;
206 endianness = G_BIG_ENDIAN;
211 case PIX_FMT_YUV422P:
212 fmt = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
214 case PIX_FMT_YUV444P:
215 fmt = GST_MAKE_FOURCC ('Y', '4', '4', '4');
220 endianness = G_BIG_ENDIAN;
221 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
234 endianness = G_BIG_ENDIAN;
235 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
248 endianness = G_BIG_ENDIAN;
249 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
262 endianness = G_BIG_ENDIAN;
263 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
276 endianness = G_BIG_ENDIAN;
277 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
292 endianness = G_BIG_ENDIAN;
293 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
308 endianness = G_BIG_ENDIAN;
309 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
324 endianness = G_BIG_ENDIAN;
325 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
337 case PIX_FMT_YUV410P:
338 fmt = GST_MAKE_FOURCC ('Y', 'U', 'V', '9');
340 case PIX_FMT_YVU410P:
341 fmt = GST_MAKE_FOURCC ('Y', 'V', 'U', '9');
343 case PIX_FMT_YUV411P:
344 fmt = GST_MAKE_FOURCC ('Y', '4', '1', 'B');
349 caps = gst_ff_vid_caps_new (context, "video/x-raw-yuv",
350 "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('Y', '8', '0', '0'),
353 gst_ff_vid_caps_new (context, "video/x-raw-yuv", "format",
354 GST_TYPE_FOURCC, GST_MAKE_FOURCC ('Y', '8', ' ', ' '), NULL);
355 gst_caps_append (caps, tmp);
356 tmp = gst_ff_vid_caps_new (context, "video/x-raw-yuv",
357 "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('G', 'R', 'E', 'Y'),
359 gst_caps_append (caps, tmp);
363 fmt = GST_MAKE_FOURCC ('Y', '1', '6', ' ');
367 endianness = G_BYTE_ORDER;
375 endianness = G_BYTE_ORDER;
382 endianness = G_BYTE_ORDER;
385 fmt = GST_MAKE_FOURCC ('v', '3', '0', '8');
387 case PIX_FMT_AYUV4444:
388 fmt = GST_MAKE_FOURCC ('A', 'Y', 'U', 'V');
392 caps = gst_ff_vid_caps_new (context, "video/x-raw-gray",
393 "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth, NULL);
395 case PIX_FMT_GRAY16_L:
397 caps = gst_ff_vid_caps_new (context, "video/x-raw-gray",
398 "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth,
399 "endianness", G_TYPE_INT, G_LITTLE_ENDIAN, NULL);
401 case PIX_FMT_GRAY16_B:
403 caps = gst_ff_vid_caps_new (context, "video/x-raw-gray",
404 "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth,
405 "endianness", G_TYPE_INT, G_BIG_ENDIAN, NULL);
415 caps = gst_ff_vid_caps_new (context, "video/x-raw-rgb",
416 "bpp", G_TYPE_INT, bpp,
417 "depth", G_TYPE_INT, depth,
418 "red_mask", G_TYPE_INT, r_mask,
419 "green_mask", G_TYPE_INT, g_mask,
420 "blue_mask", G_TYPE_INT, b_mask,
421 "alpha_mask", G_TYPE_INT, a_mask,
422 "endianness", G_TYPE_INT, endianness, NULL);
423 } else if (r_mask != 0) {
424 caps = gst_ff_vid_caps_new (context, "video/x-raw-rgb",
425 "bpp", G_TYPE_INT, bpp,
426 "depth", G_TYPE_INT, depth,
427 "red_mask", G_TYPE_INT, r_mask,
428 "green_mask", G_TYPE_INT, g_mask,
429 "blue_mask", G_TYPE_INT, b_mask,
430 "endianness", G_TYPE_INT, endianness, NULL);
432 caps = gst_ff_vid_caps_new (context, "video/x-raw-rgb",
433 "bpp", G_TYPE_INT, bpp,
434 "depth", G_TYPE_INT, depth,
435 "endianness", G_TYPE_INT, endianness, NULL);
436 if (context && context->pix_fmt == PIX_FMT_PAL8) {
437 gst_ffmpeg_set_palette (caps, context);
441 caps = gst_ff_vid_caps_new (context, "video/x-raw-yuv",
442 "format", GST_TYPE_FOURCC, fmt, NULL);
447 GST_DEBUG ("caps for pix_fmt=%d: %" GST_PTR_FORMAT, pix_fmt, caps);
449 GST_LOG ("No caps found for pix_fmt=%d", pix_fmt);
455 /* Convert a FFMPEG Sample Format and optional AVCodecContext
456 * to a GstCaps. If the context is ommitted, no fixed values
457 * for video/audio size will be included in the GstCaps
459 * See below for usefulness
463 gst_ffmpeg_smpfmt_to_caps (enum SampleFormat sample_fmt,
464 AVCodecContext * context)
466 GstCaps *caps = NULL;
469 gboolean signedness = FALSE;
471 switch (sample_fmt) {
483 caps = gst_ff_aud_caps_new (context, "audio/x-raw-int",
484 "signed", G_TYPE_BOOLEAN, signedness,
485 "endianness", G_TYPE_INT, G_BYTE_ORDER,
486 "width", G_TYPE_INT, bpp, "depth", G_TYPE_INT, bpp, NULL);
490 GST_DEBUG ("caps for sample_fmt=%d: %" GST_PTR_FORMAT, sample_fmt, caps);
492 GST_LOG ("No caps found for sample_fmt=%d", sample_fmt);
498 /* Convert a FFMPEG codec Type and optional AVCodecContext
499 * to a GstCaps. If the context is ommitted, no fixed values
500 * for video/audio size will be included in the GstCaps
502 * CodecType is primarily meant for uncompressed data GstCaps!
506 gst_ffmpegcsp_codectype_to_caps (enum CodecType codec_type,
507 AVCodecContext * context)
511 switch (codec_type) {
512 case CODEC_TYPE_VIDEO:
514 caps = gst_ffmpeg_pixfmt_to_caps (context->pix_fmt,
515 context->width == -1 ? NULL : context);
520 caps = gst_caps_new_empty ();
521 for (i = 0; i < PIX_FMT_NB; i++) {
522 temp = gst_ffmpeg_pixfmt_to_caps (i, NULL);
524 gst_caps_append (caps, temp);
530 case CODEC_TYPE_AUDIO:
532 caps = gst_ffmpeg_smpfmt_to_caps (context->sample_fmt, context);
537 caps = gst_caps_new_empty ();
538 for (i = 0; i <= SAMPLE_FMT_S16; i++) {
539 temp = gst_ffmpeg_smpfmt_to_caps (i, NULL);
541 gst_caps_append (caps, temp);
556 /* Convert a GstCaps (audio/raw) to a FFMPEG SampleFmt
557 * and other audio properties in a AVCodecContext.
559 * For usefulness, see below
563 gst_ffmpeg_caps_to_smpfmt (const GstCaps * caps,
564 AVCodecContext * context, gboolean raw)
566 GstStructure *structure;
567 gint depth = 0, width = 0, endianness = 0;
568 gboolean signedness = FALSE;
570 g_return_if_fail (gst_caps_get_size (caps) == 1);
571 structure = gst_caps_get_structure (caps, 0);
573 gst_structure_get_int (structure, "channels", &context->channels);
574 gst_structure_get_int (structure, "rate", &context->sample_rate);
579 if (gst_structure_get_int (structure, "width", &width) &&
580 gst_structure_get_int (structure, "depth", &depth) &&
581 gst_structure_get_boolean (structure, "signed", &signedness) &&
582 gst_structure_get_int (structure, "endianness", &endianness)) {
583 if (width == 16 && depth == 16 &&
584 endianness == G_BYTE_ORDER && signedness == TRUE) {
585 context->sample_fmt = SAMPLE_FMT_S16;
591 /* Convert a GstCaps (video/raw) to a FFMPEG PixFmt
592 * and other video properties in a AVCodecContext.
594 * For usefulness, see below
598 gst_ffmpeg_caps_to_pixfmt (const GstCaps * caps,
599 AVCodecContext * context, gboolean raw)
601 GstStructure *structure;
605 g_return_if_fail (gst_caps_get_size (caps) == 1);
606 structure = gst_caps_get_structure (caps, 0);
608 ret = gst_structure_get_int (structure, "width", &context->width);
609 ret &= gst_structure_get_int (structure, "height", &context->height);
610 g_return_if_fail (ret == TRUE);
612 fps = gst_structure_get_value (structure, "framerate");
613 g_return_if_fail (GST_VALUE_HOLDS_FRACTION (fps));
615 /* framerate does not really matter */
616 context->frame_rate = gst_value_get_fraction_numerator (fps);
617 context->frame_rate_base = gst_value_get_fraction_denominator (fps);
622 if (gst_structure_has_name (structure, "video/x-raw-yuv")) {
625 if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
627 case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
628 case GST_MAKE_FOURCC ('S', 'U', 'Y', 'V'):
629 context->pix_fmt = PIX_FMT_YUV422;
631 case GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'):
632 case GST_MAKE_FOURCC ('S', 'Y', 'V', 'Y'):
633 context->pix_fmt = PIX_FMT_UYVY422;
635 case GST_MAKE_FOURCC ('Y', 'V', 'Y', 'U'):
636 context->pix_fmt = PIX_FMT_YVYU422;
638 case GST_MAKE_FOURCC ('I', 'Y', 'U', '1'):
639 context->pix_fmt = PIX_FMT_UYVY411;
641 case GST_MAKE_FOURCC ('I', '4', '2', '0'):
642 case GST_MAKE_FOURCC ('S', '4', '2', '0'):
643 context->pix_fmt = PIX_FMT_YUV420P;
645 case GST_MAKE_FOURCC ('A', '4', '2', '0'):
646 context->pix_fmt = PIX_FMT_YUVA420P;
648 case GST_MAKE_FOURCC ('N', 'V', '1', '2'):
649 context->pix_fmt = PIX_FMT_NV12;
651 case GST_MAKE_FOURCC ('N', 'V', '2', '1'):
652 context->pix_fmt = PIX_FMT_NV21;
654 case GST_MAKE_FOURCC ('Y', 'V', '1', '2'):
655 context->pix_fmt = PIX_FMT_YVU420P;
657 case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
658 context->pix_fmt = PIX_FMT_YUV411P;
660 case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
661 context->pix_fmt = PIX_FMT_YUV422P;
663 case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
664 context->pix_fmt = PIX_FMT_YUV410P;
666 case GST_MAKE_FOURCC ('Y', 'V', 'U', '9'):
667 context->pix_fmt = PIX_FMT_YVU410P;
669 case GST_MAKE_FOURCC ('v', '3', '0', '8'):
670 context->pix_fmt = PIX_FMT_V308;
672 case GST_MAKE_FOURCC ('A', 'Y', 'U', 'V'):
673 context->pix_fmt = PIX_FMT_AYUV4444;
675 case GST_MAKE_FOURCC ('Y', '4', '4', '4'):
676 context->pix_fmt = PIX_FMT_YUV444P;
678 case GST_MAKE_FOURCC ('Y', '8', '0', '0'):
679 case GST_MAKE_FOURCC ('Y', '8', ' ', ' '):
680 case GST_MAKE_FOURCC ('G', 'R', 'E', 'Y'):
681 context->pix_fmt = PIX_FMT_Y800;
683 case GST_MAKE_FOURCC ('Y', '1', '6', ' '):
684 context->pix_fmt = PIX_FMT_Y16;
688 } else if (gst_structure_has_name (structure, "video/x-raw-rgb")) {
689 gint bpp = 0, rmask = 0, endianness = 0, amask = 0, depth = 0;
691 if (gst_structure_get_int (structure, "bpp", &bpp) &&
692 gst_structure_get_int (structure, "endianness", &endianness)) {
693 if (gst_structure_get_int (structure, "red_mask", &rmask)) {
696 if (gst_structure_get_int (structure, "alpha_mask", &amask)) {
697 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
698 if (rmask == 0x0000ff00)
699 context->pix_fmt = PIX_FMT_BGRA32;
700 else if (rmask == 0x00ff0000)
701 context->pix_fmt = PIX_FMT_RGBA32;
702 else if (rmask == 0xff000000)
703 context->pix_fmt = PIX_FMT_ARGB32;
704 else // if (r_mask = 0x000000ff)
705 context->pix_fmt = PIX_FMT_ABGR32;
707 if (rmask == 0x00ff0000)
708 context->pix_fmt = PIX_FMT_BGRA32;
709 else if (rmask == 0x0000ff00)
710 context->pix_fmt = PIX_FMT_RGBA32;
711 else if (rmask == 0x000000ff)
712 context->pix_fmt = PIX_FMT_ARGB32;
713 else // if (rmask == 0xff000000)
714 context->pix_fmt = PIX_FMT_ABGR32;
717 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
718 if (rmask == 0x00ff0000)
719 context->pix_fmt = PIX_FMT_RGB32;
720 else if (rmask == 0x0000ff00)
721 context->pix_fmt = PIX_FMT_BGR32;
722 else if (rmask == 0xff000000)
723 context->pix_fmt = PIX_FMT_xRGB32;
724 else // if (rmask == 0x000000ff)
725 context->pix_fmt = PIX_FMT_BGRx32;
727 if (rmask == 0x0000ff00)
728 context->pix_fmt = PIX_FMT_RGB32;
729 else if (rmask == 0x00ff0000)
730 context->pix_fmt = PIX_FMT_BGR32;
731 else if (rmask == 0x000000ff)
732 context->pix_fmt = PIX_FMT_xRGB32;
733 else // if (rmask == 0xff000000)
734 context->pix_fmt = PIX_FMT_BGRx32;
739 if (rmask == 0x0000FF)
740 context->pix_fmt = PIX_FMT_BGR24;
742 context->pix_fmt = PIX_FMT_RGB24;
745 if (endianness == G_BYTE_ORDER) {
746 context->pix_fmt = PIX_FMT_RGB565;
747 if (gst_structure_get_int (structure, "depth", &depth)) {
749 context->pix_fmt = PIX_FMT_RGB555;
754 if (endianness == G_BYTE_ORDER)
755 context->pix_fmt = PIX_FMT_RGB555;
763 context->pix_fmt = PIX_FMT_PAL8;
764 gst_ffmpeg_get_palette (caps, context);
768 } else if (gst_structure_has_name (structure, "video/x-raw-gray")) {
771 if (gst_structure_get_int (structure, "bpp", &bpp)) {
774 context->pix_fmt = PIX_FMT_GRAY8;
779 if (gst_structure_get_int (structure, "endianness", &endianness)) {
780 if (endianness == G_LITTLE_ENDIAN)
781 context->pix_fmt = PIX_FMT_GRAY16_L;
782 else if (endianness == G_BIG_ENDIAN)
783 context->pix_fmt = PIX_FMT_GRAY16_B;
792 /* Convert a GstCaps and a FFMPEG codec Type to a
793 * AVCodecContext. If the context is ommitted, no fixed values
794 * for video/audio size will be included in the context
796 * CodecType is primarily meant for uncompressed data GstCaps!
800 gst_ffmpegcsp_caps_with_codectype (enum CodecType type,
801 const GstCaps * caps, AVCodecContext * context)
807 case CODEC_TYPE_VIDEO:
808 gst_ffmpeg_caps_to_pixfmt (caps, context, TRUE);
811 case CODEC_TYPE_AUDIO:
812 gst_ffmpeg_caps_to_smpfmt (caps, context, TRUE);
821 #define GEN_MASK(x) ((1<<(x))-1)
822 #define ROUND_UP_X(v,x) (((v) + GEN_MASK(x)) & ~GEN_MASK(x))
823 #define DIV_ROUND_UP_X(v,x) (((v) + GEN_MASK(x)) >> (x))
826 * Fill in pointers to memory in a AVPicture, where
827 * everything is aligned by 4 (as required by X).
828 * This is mostly a copy from imgconvert.c with some
832 gst_ffmpegcsp_avpicture_fill (AVPicture * picture,
833 uint8_t * ptr, enum PixelFormat pix_fmt, int width, int height,
836 int size, w2, h2, size2;
840 pinfo = get_pix_fmt_info (pix_fmt);
842 picture->interlaced = interlaced;
845 case PIX_FMT_YUV420P:
846 case PIX_FMT_YUV422P:
847 case PIX_FMT_YUV444P:
848 case PIX_FMT_YUV410P:
849 case PIX_FMT_YUV411P:
850 case PIX_FMT_YUVJ420P:
851 case PIX_FMT_YUVJ422P:
852 case PIX_FMT_YUVJ444P:
853 stride = GST_ROUND_UP_4 (width);
854 h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
856 w2 = DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
857 stride2 = GST_ROUND_UP_4 (w2);
858 h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
859 size2 = stride2 * h2;
860 picture->data[0] = ptr;
861 picture->data[1] = picture->data[0] + size;
862 picture->data[2] = picture->data[1] + size2;
863 picture->linesize[0] = stride;
864 picture->linesize[1] = stride2;
865 picture->linesize[2] = stride2;
866 return size + 2 * size2;
867 /* PIX_FMT_YVU420P = YV12: same as PIX_FMT_YUV420P, but
868 * with U and V plane swapped. Strides as in videotestsrc */
869 case PIX_FMT_YUVA420P:
870 stride = GST_ROUND_UP_4 (width);
871 h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
873 w2 = DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
874 stride2 = GST_ROUND_UP_4 (w2);
875 h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
876 size2 = stride2 * h2;
877 picture->data[0] = ptr;
878 picture->data[1] = picture->data[0] + size;
879 picture->data[2] = picture->data[1] + size2;
880 picture->data[3] = picture->data[2] + size2;
881 picture->linesize[0] = stride;
882 picture->linesize[1] = stride2;
883 picture->linesize[2] = stride2;
884 picture->linesize[3] = stride;
885 return 2 * size + 2 * size2;
886 case PIX_FMT_YVU410P:
887 case PIX_FMT_YVU420P:
888 stride = GST_ROUND_UP_4 (width);
889 h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
891 w2 = DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
892 stride2 = GST_ROUND_UP_4 (w2);
893 h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
894 size2 = stride2 * h2;
895 picture->data[0] = ptr;
896 picture->data[2] = picture->data[0] + size;
897 picture->data[1] = picture->data[2] + size2;
898 picture->linesize[0] = stride;
899 picture->linesize[1] = stride2;
900 picture->linesize[2] = stride2;
901 return size + 2 * size2;
904 stride = GST_ROUND_UP_4 (width);
905 h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
907 w2 = 2 * DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
908 stride2 = GST_ROUND_UP_4 (w2);
909 h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
910 size2 = stride2 * h2;
911 picture->data[0] = ptr;
912 picture->data[1] = picture->data[0] + size;
913 picture->data[2] = NULL;
914 picture->linesize[0] = stride;
915 picture->linesize[1] = stride2;
916 picture->linesize[2] = 0;
920 stride = GST_ROUND_UP_4 (width * 3);
921 size = stride * height;
922 picture->data[0] = ptr;
923 picture->data[1] = NULL;
924 picture->data[2] = NULL;
925 picture->linesize[0] = stride;
927 case PIX_FMT_AYUV4444:
937 size = stride * height;
938 picture->data[0] = ptr;
939 picture->data[1] = NULL;
940 picture->data[2] = NULL;
941 picture->linesize[0] = stride;
946 case PIX_FMT_UYVY422:
947 case PIX_FMT_YVYU422:
948 stride = GST_ROUND_UP_4 (width * 2);
949 size = stride * height;
950 picture->data[0] = ptr;
951 picture->data[1] = NULL;
952 picture->data[2] = NULL;
953 picture->linesize[0] = stride;
956 stride = GST_ROUND_UP_4 (width * 3);
957 size = stride * height;
958 picture->data[0] = ptr;
959 picture->data[1] = NULL;
960 picture->data[2] = NULL;
961 picture->linesize[0] = stride;
963 case PIX_FMT_UYVY411:
965 GST_ROUND_UP_4 (GST_ROUND_UP_4 (width) + GST_ROUND_UP_4 (width) / 2);
966 size = stride * height;
967 picture->data[0] = ptr;
968 picture->data[1] = NULL;
969 picture->data[2] = NULL;
970 picture->linesize[0] = stride;
974 stride = GST_ROUND_UP_4 (width);
975 size = stride * height;
976 picture->data[0] = ptr;
977 picture->data[1] = NULL;
978 picture->data[2] = NULL;
979 picture->linesize[0] = stride;
982 case PIX_FMT_GRAY16_L:
983 case PIX_FMT_GRAY16_B:
984 stride = GST_ROUND_UP_4 (width * 2);
985 size = stride * height;
986 picture->data[0] = ptr;
987 picture->data[1] = NULL;
988 picture->data[2] = NULL;
989 picture->linesize[0] = stride;
991 case PIX_FMT_MONOWHITE:
992 case PIX_FMT_MONOBLACK:
993 stride = GST_ROUND_UP_4 ((width + 7) >> 3);
994 size = stride * height;
995 picture->data[0] = ptr;
996 picture->data[1] = NULL;
997 picture->data[2] = NULL;
998 picture->linesize[0] = stride;
1001 /* already forced to be with stride, so same result as other function */
1002 stride = GST_ROUND_UP_4 (width);
1003 size = stride * height;
1004 picture->data[0] = ptr;
1005 picture->data[1] = ptr + size; /* palette is stored here as 256 32 bit words */
1006 picture->data[2] = NULL;
1007 picture->linesize[0] = stride;
1008 picture->linesize[1] = 4;
1009 return size + 256 * 4;
1011 picture->data[0] = NULL;
1012 picture->data[1] = NULL;
1013 picture->data[2] = NULL;
1014 picture->data[3] = NULL;