2 * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
4 * Copyright (c) 2002-2004 Ronald Bultje <rbultje@ronald.bitfreak.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 02111-1307, USA.
31 #include "gstffmpegcodecmap.h"
34 * Read a palette from a caps.
38 gst_ffmpeg_get_palette (const GstCaps * caps, AVCodecContext * context)
40 GstStructure *str = gst_caps_get_structure (caps, 0);
41 const GValue *palette_v;
43 /* do we have a palette? */
44 if ((palette_v = gst_structure_get_value (str, "palette_data")) && context) {
45 const GstBuffer *palette;
47 palette = gst_value_get_buffer (palette_v);
48 if (palette && GST_BUFFER_SIZE (palette) >= 256 * 4) {
50 av_free (context->palctrl);
51 context->palctrl = av_malloc (sizeof (AVPaletteControl));
52 context->palctrl->palette_changed = 1;
53 memcpy (context->palctrl->palette, GST_BUFFER_DATA (palette),
60 gst_ffmpeg_set_palette (GstCaps * caps, AVCodecContext * context)
62 if (context->palctrl) {
63 GstBuffer *palette = gst_buffer_new_and_alloc (256 * 4);
65 memcpy (GST_BUFFER_DATA (palette), context->palctrl->palette,
67 gst_caps_set_simple (caps, "palette_data", GST_TYPE_BUFFER, palette, NULL);
68 gst_buffer_unref (palette);
72 /* this macro makes a caps width fixed or unfixed width/height
73 * properties depending on whether we've got a context.
75 * See below for why we use this.
77 * We should actually do this stuff at the end, like in riff-media.c,
78 * but I'm too lazy today. Maybe later.
81 #define GST_FF_VID_CAPS_NEW(mimetype, ...) \
83 gst_caps_new_simple (mimetype, \
84 "width", G_TYPE_INT, context->width, \
85 "height", G_TYPE_INT, context->height, \
86 "framerate", GST_TYPE_FRACTION, \
87 (gint) context->frame_rate, (gint) context->frame_rate_base, \
90 gst_caps_new_simple (mimetype, \
91 "width", GST_TYPE_INT_RANGE, 1, G_MAXINT, \
92 "height", GST_TYPE_INT_RANGE, 1, G_MAXINT, \
93 "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1,\
96 /* same for audio - now with channels/sample rate
99 #define GST_FF_AUD_CAPS_NEW(mimetype, ...) \
100 (context != NULL) ? \
101 gst_caps_new_simple (mimetype, \
102 "rate", G_TYPE_INT, context->sample_rate, \
103 "channels", G_TYPE_INT, context->channels, \
106 gst_caps_new_simple (mimetype, \
109 /* Convert a FFMPEG Pixel Format and optional AVCodecContext
110 * to a GstCaps. If the context is ommitted, no fixed values
111 * for video/audio size will be included in the GstCaps
113 * See below for usefulness
117 gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context)
119 GstCaps *caps = NULL;
121 int bpp = 0, depth = 0, endianness = 0;
122 gulong g_mask = 0, r_mask = 0, b_mask = 0, a_mask = 0;
126 case PIX_FMT_YUV420P:
127 fmt = GST_MAKE_FOURCC ('I', '4', '2', '0');
129 case PIX_FMT_YVU420P:
130 fmt = GST_MAKE_FOURCC ('Y', 'V', '1', '2');
133 fmt = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
135 case PIX_FMT_UYVY422:
136 fmt = GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y');
140 endianness = G_BIG_ENDIAN;
147 endianness = G_BIG_ENDIAN;
152 case PIX_FMT_YUV422P:
153 fmt = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
155 case PIX_FMT_YUV444P:
156 fmt = GST_MAKE_FOURCC ('Y', '4', '4', '4');
161 endianness = G_BIG_ENDIAN;
162 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
175 endianness = G_BIG_ENDIAN;
176 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
189 endianness = G_BIG_ENDIAN;
190 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
205 endianness = G_BIG_ENDIAN;
206 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
219 case PIX_FMT_YUV410P:
220 fmt = GST_MAKE_FOURCC ('Y', 'U', 'V', '9');
222 case PIX_FMT_YUV411P:
223 fmt = GST_MAKE_FOURCC ('Y', '4', '1', 'B');
227 endianness = G_BYTE_ORDER;
235 endianness = G_BYTE_ORDER;
242 endianness = G_BYTE_ORDER;
244 case PIX_FMT_AYUV4444:
245 fmt = GST_MAKE_FOURCC ('A', 'Y', 'U', 'V');
249 caps = GST_FF_VID_CAPS_NEW ("video/x-raw-gray",
250 "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth, NULL);
260 caps = GST_FF_VID_CAPS_NEW ("video/x-raw-rgb",
261 "bpp", G_TYPE_INT, bpp,
262 "depth", G_TYPE_INT, depth,
263 "red_mask", G_TYPE_INT, r_mask,
264 "green_mask", G_TYPE_INT, g_mask,
265 "blue_mask", G_TYPE_INT, b_mask,
266 "alpha_mask", G_TYPE_INT, a_mask,
267 "endianness", G_TYPE_INT, endianness, NULL);
268 } else if (r_mask != 0) {
269 caps = GST_FF_VID_CAPS_NEW ("video/x-raw-rgb",
270 "bpp", G_TYPE_INT, bpp,
271 "depth", G_TYPE_INT, depth,
272 "red_mask", G_TYPE_INT, r_mask,
273 "green_mask", G_TYPE_INT, g_mask,
274 "blue_mask", G_TYPE_INT, b_mask,
275 "endianness", G_TYPE_INT, endianness, NULL);
277 caps = GST_FF_VID_CAPS_NEW ("video/x-raw-rgb",
278 "bpp", G_TYPE_INT, bpp,
279 "depth", G_TYPE_INT, depth,
280 "endianness", G_TYPE_INT, endianness, NULL);
282 gst_ffmpeg_set_palette (caps, context);
286 caps = GST_FF_VID_CAPS_NEW ("video/x-raw-yuv",
287 "format", GST_TYPE_FOURCC, fmt, NULL);
292 char *str = gst_caps_to_string (caps);
294 GST_DEBUG ("caps for pix_fmt=%d: %s", pix_fmt, str);
297 GST_WARNING ("No caps found for pix_fmt=%d", pix_fmt);
303 /* Convert a FFMPEG Sample Format and optional AVCodecContext
304 * to a GstCaps. If the context is ommitted, no fixed values
305 * for video/audio size will be included in the GstCaps
307 * See below for usefulness
311 gst_ffmpeg_smpfmt_to_caps (enum SampleFormat sample_fmt,
312 AVCodecContext * context)
314 GstCaps *caps = NULL;
317 gboolean signedness = FALSE;
319 switch (sample_fmt) {
331 caps = GST_FF_AUD_CAPS_NEW ("audio/x-raw-int",
332 "signed", G_TYPE_BOOLEAN, signedness,
333 "endianness", G_TYPE_INT, G_BYTE_ORDER,
334 "width", G_TYPE_INT, bpp, "depth", G_TYPE_INT, bpp, NULL);
338 char *str = gst_caps_to_string (caps);
340 GST_DEBUG ("caps for sample_fmt=%d: %s", sample_fmt, str);
343 GST_WARNING ("No caps found for sample_fmt=%d", sample_fmt);
349 /* Convert a FFMPEG codec Type and optional AVCodecContext
350 * to a GstCaps. If the context is ommitted, no fixed values
351 * for video/audio size will be included in the GstCaps
353 * CodecType is primarily meant for uncompressed data GstCaps!
357 gst_ffmpegcsp_codectype_to_caps (enum CodecType codec_type,
358 AVCodecContext * context)
362 switch (codec_type) {
363 case CODEC_TYPE_VIDEO:
365 caps = gst_ffmpeg_pixfmt_to_caps (context->pix_fmt,
366 context->width == -1 ? NULL : context);
371 caps = gst_caps_new_empty ();
372 for (i = 0; i < PIX_FMT_NB; i++) {
373 temp = gst_ffmpeg_pixfmt_to_caps (i, NULL);
375 gst_caps_append (caps, temp);
381 case CODEC_TYPE_AUDIO:
383 caps = gst_ffmpeg_smpfmt_to_caps (context->sample_fmt, context);
388 caps = gst_caps_new_empty ();
389 for (i = 0; i <= SAMPLE_FMT_S16; i++) {
390 temp = gst_ffmpeg_smpfmt_to_caps (i, NULL);
392 gst_caps_append (caps, temp);
407 /* Convert a GstCaps (audio/raw) to a FFMPEG SampleFmt
408 * and other audio properties in a AVCodecContext.
410 * For usefulness, see below
414 gst_ffmpeg_caps_to_smpfmt (const GstCaps * caps,
415 AVCodecContext * context, gboolean raw)
417 GstStructure *structure;
418 gint depth = 0, width = 0, endianness = 0;
419 gboolean signedness = FALSE;
421 g_return_if_fail (gst_caps_get_size (caps) == 1);
422 structure = gst_caps_get_structure (caps, 0);
424 gst_structure_get_int (structure, "channels", &context->channels);
425 gst_structure_get_int (structure, "rate", &context->sample_rate);
430 if (gst_structure_get_int (structure, "width", &width) &&
431 gst_structure_get_int (structure, "depth", &depth) &&
432 gst_structure_get_int (structure, "signed", &signedness) &&
433 gst_structure_get_int (structure, "endianness", &endianness)) {
434 if (width == 16 && depth == 16 &&
435 endianness == G_BYTE_ORDER && signedness == TRUE) {
436 context->sample_fmt = SAMPLE_FMT_S16;
442 /* Convert a GstCaps (video/raw) to a FFMPEG PixFmt
443 * and other video properties in a AVCodecContext.
445 * For usefulness, see below
449 gst_ffmpeg_caps_to_pixfmt (const GstCaps * caps,
450 AVCodecContext * context, gboolean raw)
452 GstStructure *structure;
456 g_return_if_fail (gst_caps_get_size (caps) == 1);
457 structure = gst_caps_get_structure (caps, 0);
459 ret = gst_structure_get_int (structure, "width", &context->width);
460 ret &= gst_structure_get_int (structure, "height", &context->height);
461 g_return_if_fail (ret == TRUE);
463 fps = gst_structure_get_value (structure, "framerate");
464 g_return_if_fail (GST_VALUE_HOLDS_FRACTION (fps));
466 /* framerate does not really matter */
467 context->frame_rate = gst_value_get_fraction_numerator (fps);
468 context->frame_rate_base = gst_value_get_fraction_denominator (fps);
473 if (gst_structure_has_name (structure, "video/x-raw-yuv")) {
476 if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
478 case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
479 context->pix_fmt = PIX_FMT_YUV422;
481 case GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'):
482 context->pix_fmt = PIX_FMT_UYVY422;
484 case GST_MAKE_FOURCC ('I', '4', '2', '0'):
485 context->pix_fmt = PIX_FMT_YUV420P;
487 case GST_MAKE_FOURCC ('Y', 'V', '1', '2'):
488 context->pix_fmt = PIX_FMT_YVU420P;
490 case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
491 context->pix_fmt = PIX_FMT_YUV411P;
493 case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
494 context->pix_fmt = PIX_FMT_YUV422P;
496 case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
497 context->pix_fmt = PIX_FMT_YUV410P;
499 case GST_MAKE_FOURCC ('A', 'Y', 'U', 'V'):
500 context->pix_fmt = PIX_FMT_AYUV4444;
502 case GST_MAKE_FOURCC ('Y', '4', '4', '4'):
503 context->pix_fmt = PIX_FMT_YUV444P;
507 } else if (gst_structure_has_name (structure, "video/x-raw-rgb")) {
508 gint bpp = 0, rmask = 0, endianness = 0, amask = 0, depth = 0;
510 if (gst_structure_get_int (structure, "bpp", &bpp) &&
511 gst_structure_get_int (structure, "endianness", &endianness)) {
512 if (gst_structure_get_int (structure, "red_mask", &rmask)) {
515 if (gst_structure_get_int (structure, "alpha_mask", &amask)) {
516 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
517 if (rmask == 0x0000ff00)
519 if (rmask == 0x00ff0000)
521 context->pix_fmt = PIX_FMT_BGRA32;
523 context->pix_fmt = PIX_FMT_RGBA32;
525 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
526 if (rmask == 0x00ff0000)
528 if (rmask == 0x0000ff00)
530 context->pix_fmt = PIX_FMT_RGB32;
532 #if (G_BYTE_ORDER == G_BIG_ENDIAN)
533 if (rmask == 0x0000ff00)
535 if (rmask == 0x00ff0000)
537 context->pix_fmt = PIX_FMT_BGR32;
541 if (rmask == 0x0000FF)
542 context->pix_fmt = PIX_FMT_BGR24;
544 context->pix_fmt = PIX_FMT_RGB24;
547 if (endianness == G_BYTE_ORDER) {
548 context->pix_fmt = PIX_FMT_RGB565;
549 if (gst_structure_get_int (structure, "depth", &depth)) {
551 context->pix_fmt = PIX_FMT_RGB555;
556 if (endianness == G_BYTE_ORDER)
557 context->pix_fmt = PIX_FMT_RGB555;
565 context->pix_fmt = PIX_FMT_PAL8;
566 gst_ffmpeg_get_palette (caps, context);
570 } else if (gst_structure_has_name (structure, "video/x-raw-gray")) {
573 if (gst_structure_get_int (structure, "bpp", &bpp)) {
576 context->pix_fmt = PIX_FMT_GRAY8;
583 /* Convert a GstCaps and a FFMPEG codec Type to a
584 * AVCodecContext. If the context is ommitted, no fixed values
585 * for video/audio size will be included in the context
587 * CodecType is primarily meant for uncompressed data GstCaps!
591 gst_ffmpegcsp_caps_with_codectype (enum CodecType type,
592 const GstCaps * caps, AVCodecContext * context)
598 case CODEC_TYPE_VIDEO:
599 gst_ffmpeg_caps_to_pixfmt (caps, context, TRUE);
602 case CODEC_TYPE_AUDIO:
603 gst_ffmpeg_caps_to_smpfmt (caps, context, TRUE);
612 #define GEN_MASK(x) ((1<<(x))-1)
613 #define ROUND_UP_X(v,x) (((v) + GEN_MASK(x)) & ~GEN_MASK(x))
614 #define DIV_ROUND_UP_X(v,x) (((v) + GEN_MASK(x)) >> (x))
617 * Fill in pointers to memory in a AVPicture, where
618 * everything is aligned by 4 (as required by X).
619 * This is mostly a copy from imgconvert.c with some
623 gst_ffmpegcsp_avpicture_fill (AVPicture * picture,
624 uint8_t * ptr, enum PixelFormat pix_fmt, int width, int height)
626 int size, w2, h2, size2;
630 pinfo = get_pix_fmt_info (pix_fmt);
633 case PIX_FMT_YUV420P:
634 case PIX_FMT_YUV422P:
635 case PIX_FMT_YUV444P:
636 case PIX_FMT_YUV410P:
637 case PIX_FMT_YUV411P:
638 case PIX_FMT_YUVJ420P:
639 case PIX_FMT_YUVJ422P:
640 case PIX_FMT_YUVJ444P:
641 stride = GST_ROUND_UP_4 (width);
642 h2 = ROUND_UP_X (height, pinfo->y_chroma_shift);
644 w2 = DIV_ROUND_UP_X (width, pinfo->x_chroma_shift);
645 stride2 = GST_ROUND_UP_4 (w2);
646 h2 = DIV_ROUND_UP_X (height, pinfo->y_chroma_shift);
647 size2 = stride2 * h2;
648 picture->data[0] = ptr;
649 picture->data[1] = picture->data[0] + size;
650 picture->data[2] = picture->data[1] + size2;
651 picture->linesize[0] = stride;
652 picture->linesize[1] = stride2;
653 picture->linesize[2] = stride2;
654 return size + 2 * size2;
655 /* PIX_FMT_YVU420P = YV12: same as PIX_FMT_YUV420P, but
656 * with U and V plane swapped. Strides as in videotestsrc */
657 case PIX_FMT_YVU420P:
658 stride = GST_ROUND_UP_4 (width);
659 h2 = GST_ROUND_UP_2 (height);
661 stride2 = GST_ROUND_UP_8 (stride) / 2;
662 h2 = GST_ROUND_UP_2 (height) / 2;
663 size2 = stride2 * h2;
664 picture->data[0] = ptr;
665 picture->data[2] = picture->data[0] + size;
666 picture->data[1] = picture->data[2] + size2;
667 picture->linesize[0] = stride;
668 picture->linesize[1] = GST_ROUND_UP_8 (stride) / 2;
669 picture->linesize[2] = GST_ROUND_UP_8 (stride) / 2;
670 return size + 2 * size2;
673 stride = GST_ROUND_UP_4 (width * 3);
674 size = stride * height;
675 picture->data[0] = ptr;
676 picture->data[1] = NULL;
677 picture->data[2] = NULL;
678 picture->linesize[0] = stride;
680 case PIX_FMT_AYUV4444:
686 size = stride * height;
687 picture->data[0] = ptr;
688 picture->data[1] = NULL;
689 picture->data[2] = NULL;
690 picture->linesize[0] = stride;
695 case PIX_FMT_UYVY422:
696 stride = GST_ROUND_UP_4 (width * 2);
697 size = stride * height;
698 picture->data[0] = ptr;
699 picture->data[1] = NULL;
700 picture->data[2] = NULL;
701 picture->linesize[0] = stride;
703 case PIX_FMT_UYVY411:
704 /* FIXME, probably not the right stride */
705 stride = GST_ROUND_UP_4 (width);
706 size = stride * height;
707 picture->data[0] = ptr;
708 picture->data[1] = NULL;
709 picture->data[2] = NULL;
710 picture->linesize[0] = width + width / 2;
711 return size + size / 2;
713 stride = GST_ROUND_UP_4 (width);
714 size = stride * height;
715 picture->data[0] = ptr;
716 picture->data[1] = NULL;
717 picture->data[2] = NULL;
718 picture->linesize[0] = stride;
720 case PIX_FMT_MONOWHITE:
721 case PIX_FMT_MONOBLACK:
722 stride = GST_ROUND_UP_4 ((width + 7) >> 3);
723 size = stride * height;
724 picture->data[0] = ptr;
725 picture->data[1] = NULL;
726 picture->data[2] = NULL;
727 picture->linesize[0] = stride;
730 /* already forced to be with stride, so same result as other function */
731 stride = GST_ROUND_UP_4 (width);
732 size = stride * height;
733 picture->data[0] = ptr;
734 picture->data[1] = ptr + size; /* palette is stored here as 256 32 bit words */
735 picture->data[2] = NULL;
736 picture->linesize[0] = stride;
737 picture->linesize[1] = 4;
738 return size + 256 * 4;
740 picture->data[0] = NULL;
741 picture->data[1] = NULL;
742 picture->data[2] = NULL;
743 picture->data[3] = NULL;