2 * Copyright (C) 1999 Erik Walthinsen <omega@cse.ogi.edu>
3 * Copyright (C) 2006 Tim-Philipp Müller <tim centricular net>
4 * Copyright (C) 2010 Sebastian Dröge <sebastian.droege@collabora.co.uk>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 02111-1307, USA.
22 * SECTION:element-videobox
23 * @see_also: #GstVideoCrop
25 * This plugin crops or enlarges the image. It takes 4 values as input, a
26 * top, bottom, left and right offset. Positive values will crop that much
27 * pixels from the respective border of the image, negative values will add
28 * that much pixels. When pixels are added, you can specify their color.
29 * Some predefined colors are usable with an enum property.
31 * The plugin is alpha channel aware and will try to negotiate with a format
32 * that supports alpha channels first. When alpha channel is active two
33 * other properties, alpha and border_alpha can be used to set the alpha
34 * values of the inner picture and the border respectively. an alpha value of
35 * 0.0 means total transparency, 1.0 is opaque.
37 * The videobox plugin has many uses such as doing a mosaic of pictures,
38 * letterboxing video, cutting out pieces of video, picture in picture, etc..
40 * Setting autocrop to true changes the behavior of the plugin so that
41 * caps determine crop properties rather than the other way around: given
42 * input and output dimensions, the crop values are selected so that the
43 * smaller frame is effectively centered in the larger frame. This
44 * involves either cropping or padding.
46 * If you use autocrop there is little point in setting the other
47 * properties manually because they will be overriden if the caps change,
48 * but nothing stops you from doing so.
52 * gst-launch videotestsrc ! videobox autocrop=true ! \
53 * "video/x-raw-yuv, width=600, height=400" ! ffmpegcolorspace ! ximagesink
61 #include "gstvideobox.h"
62 #include "gstvideoboxorc.h"
67 #include <gst/controller/gstcontroller.h>
69 #include "gst/glib-compat-private.h"
71 GST_DEBUG_CATEGORY_STATIC (videobox_debug);
72 #define GST_CAT_DEFAULT videobox_debug
74 /* From videotestsrc.c */
75 static const guint8 yuv_sdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
76 { 16, 145, 41, 81, 210, 235 };
77 static const guint8 yuv_sdtv_colors_U[VIDEO_BOX_FILL_LAST] =
78 { 128, 54, 240, 90, 16, 128 };
79 static const guint8 yuv_sdtv_colors_V[VIDEO_BOX_FILL_LAST] =
80 { 128, 34, 110, 240, 146, 128 };
82 static const guint8 yuv_hdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
83 { 16, 173, 32, 63, 219, 235 };
84 static const guint8 yuv_hdtv_colors_U[VIDEO_BOX_FILL_LAST] =
85 { 128, 42, 240, 102, 16, 128 };
86 static const guint8 yuv_hdtv_colors_V[VIDEO_BOX_FILL_LAST] =
87 { 128, 26, 118, 240, 138, 128 };
89 static const guint8 rgb_colors_R[VIDEO_BOX_FILL_LAST] =
90 { 0, 0, 0, 255, 255, 255 };
91 static const guint8 rgb_colors_G[VIDEO_BOX_FILL_LAST] =
92 { 0, 255, 0, 0, 255, 255 };
93 static const guint8 rgb_colors_B[VIDEO_BOX_FILL_LAST] =
94 { 0, 0, 255, 0, 0, 255 };
96 /* Generated by -bad/ext/cog/generate_tables */
97 static const int cog_ycbcr_to_rgb_matrix_8bit_hdtv[] = {
99 298, -55, -136, 19681,
103 static const int cog_ycbcr_to_rgb_matrix_8bit_sdtv[] = {
105 298, -100, -208, 34707,
109 static const gint cog_rgb_to_ycbcr_matrix_8bit_hdtv[] = {
111 -26, -87, 112, 32768,
112 112, -102, -10, 32768,
115 static const gint cog_rgb_to_ycbcr_matrix_8bit_sdtv[] = {
117 -38, -74, 112, 32768,
118 112, -94, -18, 32768,
121 static const gint cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit[] = {
122 256, -30, -53, 10600,
127 static const gint cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit[] = {
133 static const gint cog_identity_matrix_8bit[] = {
139 #define APPLY_MATRIX(m,o,v1,v2,v3) ((m[o*4] * v1 + m[o*4+1] * v2 + m[o*4+2] * v3 + m[o*4+3]) >> 8)
142 fill_ayuv (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
143 guint8 * dest, gboolean sdtv, gint width, gint height)
147 b_alpha = CLAMP (b_alpha, 0, 255);
150 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
151 (yuv_sdtv_colors_Y[fill_type] << 16) |
152 (yuv_sdtv_colors_U[fill_type] << 8) | yuv_sdtv_colors_V[fill_type]);
154 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
155 (yuv_hdtv_colors_Y[fill_type] << 16) |
156 (yuv_hdtv_colors_U[fill_type] << 8) | yuv_hdtv_colors_V[fill_type]);
158 orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
162 copy_ayuv_ayuv (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
163 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
164 gint dest_y, GstVideoFormat src_format, const guint8 * src,
165 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
169 gint src_stride = 4 * src_width;
170 gint dest_stride = 4 * dest_width;
172 dest = dest + dest_y * dest_width * 4 + dest_x * 4;
173 src = src + src_y * src_width * 4 + src_x * 4;
177 if (dest_sdtv != src_sdtv) {
182 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
183 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
185 for (i = 0; i < h; i++) {
186 for (j = 0; j < w; j += 4) {
188 dest[j] = (src[j] * i_alpha) >> 8;
192 dest[j + 1] = APPLY_MATRIX (matrix, 0, y, u, v);
193 dest[j + 2] = APPLY_MATRIX (matrix, 1, y, u, v);
194 dest[j + 3] = APPLY_MATRIX (matrix, 2, y, u, v);
200 for (i = 0; i < h; i++) {
201 for (j = 0; j < w; j += 4) {
203 dest[j] = (src[j] * i_alpha) >> 8;
204 dest[j + 1] = src[j + 1];
205 dest[j + 2] = src[j + 2];
206 dest[j + 3] = src[j + 3];
215 copy_ayuv_i420 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
216 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
217 gint dest_y, GstVideoFormat src_format, const guint8 * src,
218 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
222 guint8 *destY, *destY2, *destU, *destV;
223 gint dest_strideY, dest_strideUV;
232 dest_strideY = gst_video_format_get_row_stride (dest_format, 0, dest_width);
233 dest_strideUV = gst_video_format_get_row_stride (dest_format, 1, dest_width);
235 src_stride = 4 * src_width;
238 dest + gst_video_format_get_component_offset (dest_format, 0,
239 dest_width, dest_height);
241 dest + gst_video_format_get_component_offset (dest_format, 1,
242 dest_width, dest_height);
244 dest + gst_video_format_get_component_offset (dest_format, 2,
245 dest_width, dest_height);
247 destY = destY + dest_y * dest_strideY + dest_x;
248 destY2 = (dest_y < dest_height) ? destY + dest_strideY : destY;
249 destU = destU + (dest_y / 2) * dest_strideUV + dest_x / 2;
250 destV = destV + (dest_y / 2) * dest_strideUV + dest_x / 2;
252 src = src + src_y * src_stride + src_x * 4;
253 src2 = (src_y < src_height) ? src + src_stride : src;
258 if (src_sdtv != dest_sdtv)
260 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
261 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
263 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
265 /* 1. Handle the first destination scanline specially if it
266 * doesn't start at the macro pixel boundary, i.e. blend
267 * with the background! */
268 if (dest_y % 2 == 1) {
269 /* 1.1. Handle the first destination pixel if it doesn't
270 * start at the macro pixel boundary, i.e. blend with
272 if (dest_x % 2 == 1) {
277 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
279 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
282 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
292 /* 1.2. Copy all macro pixels from the source to the destination
293 * but blend with the background because we're only filling
294 * the lower part of the macro pixels. */
295 for (; j < w - 1; j += 2) {
296 y1 = src[4 * y_idx + 1];
297 y2 = src[4 * y_idx + 4 + 1];
299 u1 = src[4 * y_idx + 2];
300 u2 = src[4 * y_idx + 4 + 2];
302 v1 = src[4 * y_idx + 3];
303 v2 = src[4 * y_idx + 4 + 3];
305 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
306 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
307 destU[uv_idx] = CLAMP (
308 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
309 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
310 destV[uv_idx] = CLAMP (
311 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
312 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
318 /* 1.3. Now copy the last pixel if one exists and blend it
319 * with the background because we only fill part of
320 * the macro pixel. In case this is the last pixel of
321 * the destination we will a larger part. */
322 if (j == w - 1 && j == dest_width - 1) {
323 y1 = src[4 * y_idx + 1];
324 u1 = src[4 * y_idx + 2];
325 v1 = src[4 * y_idx + 3];
327 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
328 destU[uv_idx] = CLAMP (
329 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
330 destV[uv_idx] = CLAMP (
331 (destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
332 } else if (j == w - 1) {
333 y1 = src[4 * y_idx + 1];
334 u1 = src[4 * y_idx + 2];
335 v1 = src[4 * y_idx + 3];
337 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
338 destU[uv_idx] = CLAMP (
339 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
342 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4,
346 destY += dest_strideY;
347 destY2 += dest_strideY;
348 destU += dest_strideUV;
349 destV += dest_strideUV;
357 /* 2. Copy all macro pixel scanlines, the destination scanline
358 * now starts at macro pixel boundary. */
359 for (; i < h - 1; i += 2) {
360 /* 2.1. Handle the first destination pixel if it doesn't
361 * start at the macro pixel boundary, i.e. blend with
363 if (dest_x % 2 == 1) {
365 y2 = src2[4 * 0 + 1];
367 u2 = src2[4 * 0 + 2];
369 v2 = src2[4 * 0 + 3];
371 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
372 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
374 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
375 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
377 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
378 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
386 /* 2.2. Copy all macro pixels from the source to the destination.
387 * All pixels now start at macro pixel boundary, i.e. no
388 * blending with the background is necessary. */
389 for (; j < w - 1; j += 2) {
390 y1 = src[4 * y_idx + 1];
391 y2 = src[4 * y_idx + 4 + 1];
392 y3 = src2[4 * y_idx + 1];
393 y4 = src2[4 * y_idx + 4 + 1];
395 u1 = src[4 * y_idx + 2];
396 u2 = src[4 * y_idx + 4 + 2];
397 u3 = src2[4 * y_idx + 2];
398 u4 = src2[4 * y_idx + 4 + 2];
400 v1 = src[4 * y_idx + 3];
401 v2 = src[4 * y_idx + 4 + 3];
402 v3 = src2[4 * y_idx + 3];
403 v4 = src2[4 * y_idx + 4 + 3];
405 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
406 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
407 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
408 destY2[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
410 destU[uv_idx] = CLAMP (
411 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
412 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
413 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
414 destV[uv_idx] = CLAMP (
415 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
416 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
417 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
423 /* 2.3. Now copy the last pixel if one exists and blend it
424 * with the background because we only fill part of
425 * the macro pixel. In case this is the last pixel of
426 * the destination we will a larger part. */
427 if (j == w - 1 && j == dest_width - 1) {
428 y1 = src[4 * y_idx + 1];
429 y2 = src2[4 * y_idx + 1];
431 u1 = src[4 * y_idx + 2];
432 u2 = src2[4 * y_idx + 2];
434 v1 = src[4 * y_idx + 3];
435 v2 = src2[4 * y_idx + 3];
437 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
438 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
439 destU[uv_idx] = CLAMP (
440 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
441 u2, v2)) / 2, 0, 255);
442 destV[uv_idx] = CLAMP (
443 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
444 u2, v2)) / 2, 0, 255);
445 } else if (j == w - 1) {
446 y1 = src[4 * y_idx + 1];
447 y2 = src2[4 * y_idx + 1];
449 u1 = src[4 * y_idx + 2];
450 u2 = src2[4 * y_idx + 2];
452 v1 = src[4 * y_idx + 3];
453 v2 = src2[4 * y_idx + 3];
455 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
456 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
457 destU[uv_idx] = CLAMP (
458 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
459 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
460 destV[uv_idx] = CLAMP (
461 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
462 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
465 destY += 2 * dest_strideY;
466 destY2 += 2 * dest_strideY;
467 destU += dest_strideUV;
468 destV += dest_strideUV;
469 src += 2 * src_stride;
470 src2 += 2 * src_stride;
473 /* 3. Handle the last scanline if one exists. This again
474 * doesn't start at macro pixel boundary but should
475 * only fill the upper part of the macro pixels. */
476 if (i == h - 1 && i == dest_height - 1) {
477 /* 3.1. Handle the first destination pixel if it doesn't
478 * start at the macro pixel boundary, i.e. blend with
480 if (dest_x % 2 == 1) {
485 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
487 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
489 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
498 /* 3.2. Copy all macro pixels from the source to the destination
499 * but blend with the background because we're only filling
500 * the upper part of the macro pixels. */
501 for (; j < w - 1; j += 2) {
502 y1 = src[4 * y_idx + 1];
503 y2 = src[4 * y_idx + 4 + 1];
505 u1 = src[4 * y_idx + 2];
506 u2 = src[4 * y_idx + 4 + 2];
508 v1 = src[4 * y_idx + 3];
509 v2 = src[4 * y_idx + 4 + 3];
511 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
512 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
514 destU[uv_idx] = CLAMP (
515 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
516 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
517 destV[uv_idx] = CLAMP (
518 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
519 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
525 /* 3.3. Now copy the last pixel if one exists and blend it
526 * with the background because we only fill part of
527 * the macro pixel. In case this is the last pixel of
528 * the destination we will a larger part. */
529 if (j == w - 1 && j == dest_width - 1) {
530 y1 = src[4 * y_idx + 1];
531 u1 = src[4 * y_idx + 2];
532 v1 = src[4 * y_idx + 3];
534 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
535 destU[uv_idx] = CLAMP (
536 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
537 destV[uv_idx] = CLAMP (
538 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
539 } else if (j == w - 1) {
540 y1 = src[4 * y_idx + 1];
541 u1 = src[4 * y_idx + 2];
542 v1 = src[4 * y_idx + 3];
544 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
545 destU[uv_idx] = CLAMP (
546 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
549 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
552 } else if (i == h - 1) {
553 /* 3.1. Handle the first destination pixel if it doesn't
554 * start at the macro pixel boundary, i.e. blend with
556 if (dest_x % 2 == 1) {
561 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
563 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
566 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
576 /* 3.2. Copy all macro pixels from the source to the destination
577 * but blend with the background because we're only filling
578 * the upper part of the macro pixels. */
579 for (; j < w - 1; j += 2) {
580 y1 = src[4 * y_idx + 1];
581 y2 = src[4 * y_idx + 4 + 1];
583 u1 = src[4 * y_idx + 2];
584 u2 = src[4 * y_idx + 4 + 2];
586 v1 = src[4 * y_idx + 3];
587 v2 = src[4 * y_idx + 4 + 3];
589 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
590 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
592 destU[uv_idx] = CLAMP (
593 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
594 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
595 destV[uv_idx] = CLAMP (
596 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
597 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
603 /* 3.3. Now copy the last pixel if one exists and blend it
604 * with the background because we only fill part of
605 * the macro pixel. In case this is the last pixel of
606 * the destination we will a larger part. */
607 if (j == w - 1 && j == dest_width - 1) {
608 y1 = src[4 * y_idx + 1];
609 u1 = src[4 * y_idx + 2];
610 v1 = src[4 * y_idx + 3];
612 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
613 destU[uv_idx] = CLAMP (
614 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
615 destV[uv_idx] = CLAMP (
616 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
617 } else if (j == w - 1) {
618 y1 = src[4 * y_idx + 1];
619 u1 = src[4 * y_idx + 2];
620 v1 = src[4 * y_idx + 3];
622 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
623 destU[uv_idx] = CLAMP (
624 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
627 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
634 fill_planar_yuv (GstVideoBoxFill fill_type, guint b_alpha,
635 GstVideoFormat format, guint8 * dest, gboolean sdtv, gint width,
638 guint8 empty_pixel[3];
639 guint8 *destY, *destU, *destV;
640 gint strideY, strideUV;
641 gint heightY, heightUV;
644 empty_pixel[0] = yuv_sdtv_colors_Y[fill_type];
645 empty_pixel[1] = yuv_sdtv_colors_U[fill_type];
646 empty_pixel[2] = yuv_sdtv_colors_V[fill_type];
648 empty_pixel[0] = yuv_hdtv_colors_Y[fill_type];
649 empty_pixel[1] = yuv_hdtv_colors_U[fill_type];
650 empty_pixel[2] = yuv_hdtv_colors_V[fill_type];
653 strideY = gst_video_format_get_row_stride (format, 0, width);
654 strideUV = gst_video_format_get_row_stride (format, 1, width);
657 dest + gst_video_format_get_component_offset (format, 0, width, height);
659 dest + gst_video_format_get_component_offset (format, 1, width, height);
661 dest + gst_video_format_get_component_offset (format, 2, width, height);
663 heightY = gst_video_format_get_component_height (format, 0, height);
664 heightUV = gst_video_format_get_component_height (format, 1, height);
666 memset (destY, empty_pixel[0], strideY * heightY);
667 memset (destU, empty_pixel[1], strideUV * heightUV);
668 memset (destV, empty_pixel[2], strideUV * heightUV);
672 copy_y444_y444 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
673 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
674 gint dest_y, GstVideoFormat src_format, const guint8 * src,
675 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
679 guint8 *destY, *destU, *destV;
680 const guint8 *srcY, *srcU, *srcV;
684 dest_stride = gst_video_format_get_row_stride (dest_format, 0, dest_width);
685 src_stride = gst_video_format_get_row_stride (src_format, 0, src_width);
688 dest + gst_video_format_get_component_offset (dest_format, 0,
689 dest_width, dest_height);
691 dest + gst_video_format_get_component_offset (dest_format, 1,
692 dest_width, dest_height);
694 dest + gst_video_format_get_component_offset (dest_format, 2,
695 dest_width, dest_height);
698 src + gst_video_format_get_component_offset (src_format, 0,
699 src_width, src_height);
701 src + gst_video_format_get_component_offset (src_format, 1,
702 src_width, src_height);
704 src + gst_video_format_get_component_offset (src_format, 2,
705 src_width, src_height);
707 destY = destY + dest_y * dest_stride + dest_x;
708 destU = destU + dest_y * dest_stride + dest_x;
709 destV = destV + dest_y * dest_stride + dest_x;
711 srcY = srcY + src_y * src_stride + src_x;
712 srcU = srcU + src_y * src_stride + src_x;
713 srcV = srcV + src_y * src_stride + src_x;
715 if (src_sdtv != dest_sdtv) {
720 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
721 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
723 for (i = 0; i < h; i++) {
724 for (j = 0; j < w; j++) {
725 y = APPLY_MATRIX (matrix, 0, srcY[j], srcU[j], srcV[j]);
726 u = APPLY_MATRIX (matrix, 1, srcY[j], srcU[j], srcV[j]);
727 v = APPLY_MATRIX (matrix, 2, srcY[j], srcU[j], srcV[j]);
733 destY += dest_stride;
734 destU += dest_stride;
735 destV += dest_stride;
742 for (i = 0; i < h; i++) {
743 memcpy (destY, srcY, w);
744 memcpy (destU, srcU, w);
745 memcpy (destV, srcV, w);
747 destY += dest_stride;
748 destU += dest_stride;
749 destV += dest_stride;
759 copy_y42b_y42b (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
760 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
761 gint dest_y, GstVideoFormat src_format, const guint8 * src,
762 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
766 guint8 *destY, *destU, *destV;
767 const guint8 *srcY, *srcU, *srcV;
768 gint dest_strideY, dest_strideUV;
769 gint src_strideY, src_strideUV;
770 gint src_y_idx, src_uv_idx;
771 gint dest_y_idx, dest_uv_idx;
777 dest_strideY = gst_video_format_get_row_stride (dest_format, 0, dest_width);
778 dest_strideUV = gst_video_format_get_row_stride (dest_format, 1, dest_width);
779 src_strideY = gst_video_format_get_row_stride (src_format, 0, src_width);
780 src_strideUV = gst_video_format_get_row_stride (src_format, 1, src_width);
783 dest + gst_video_format_get_component_offset (dest_format, 0,
784 dest_width, dest_height);
786 dest + gst_video_format_get_component_offset (dest_format, 1,
787 dest_width, dest_height);
789 dest + gst_video_format_get_component_offset (dest_format, 2,
790 dest_width, dest_height);
793 src + gst_video_format_get_component_offset (src_format, 0,
794 src_width, src_height);
796 src + gst_video_format_get_component_offset (src_format, 1,
797 src_width, src_height);
799 src + gst_video_format_get_component_offset (src_format, 2,
800 src_width, src_height);
803 destY = destY + dest_y * dest_strideY + dest_x;
804 destU = destU + dest_y * dest_strideUV + dest_x / 2;
805 destV = destV + dest_y * dest_strideUV + dest_x / 2;
807 srcY = srcY + src_y * src_strideY + src_x;
808 srcU = srcU + src_y * src_strideUV + src_x / 2;
809 srcV = srcV + src_y * src_strideUV + src_x / 2;
814 if (src_sdtv != dest_sdtv)
816 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
817 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
819 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
821 /* 1. Copy all macro pixel scanlines, the destination scanline
822 * now starts at macro pixel boundary. */
823 for (i = dest_y; i < h; i++) {
824 /* 1.1. Handle the first destination pixel if it doesn't
825 * start at the macro pixel boundary, i.e. blend with
827 if (dest_x % 2 == 1) {
832 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
834 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
836 (destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
838 src_y_idx = dest_y_idx = dest_uv_idx = 1;
839 src_uv_idx = (src_x % 2) + 1;
842 src_y_idx = dest_y_idx = dest_uv_idx = 0;
843 src_uv_idx = (src_x % 2);
846 /* 1.2. Copy all macro pixels from the source to the destination.
847 * All pixels now start at macro pixel boundary, i.e. no
848 * blending with the background is necessary. */
849 for (; j < w - 1; j += 2) {
850 y1 = srcY[src_y_idx];
851 y2 = srcY[src_y_idx + 1];
853 u1 = srcU[src_uv_idx / 2];
854 v1 = srcV[src_uv_idx / 2];
856 u2 = srcU[src_uv_idx / 2];
857 v2 = srcV[src_uv_idx / 2];
860 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
861 destY[dest_y_idx + 1] =
862 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
864 destU[dest_uv_idx] = CLAMP (
865 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
866 u2, v2)) / 2, 0, 255);
867 destV[dest_uv_idx] = CLAMP (
868 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
869 u2, v2)) / 2, 0, 255);
876 /* 1.3. Now copy the last pixel if one exists and blend it
877 * with the background because we only fill part of
878 * the macro pixel. In case this is the last pixel of
879 * the destination we will a larger part. */
880 if (j == w - 1 && j == dest_width - 1) {
881 y1 = srcY[src_y_idx];
882 u1 = srcU[src_uv_idx / 2];
883 v1 = srcV[src_uv_idx / 2];
885 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
886 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
887 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
888 } else if (j == w - 1) {
889 y1 = srcY[src_y_idx];
890 u1 = srcU[src_uv_idx / 2];
891 v1 = srcV[src_uv_idx / 2];
893 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
894 destU[dest_uv_idx] = CLAMP (
895 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
897 destV[dest_uv_idx] = CLAMP (
898 (destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
902 destY += dest_strideY;
903 destU += dest_strideUV;
904 destV += dest_strideUV;
907 srcU += src_strideUV;
908 srcV += src_strideUV;
913 copy_y41b_y41b (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
914 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
915 gint dest_y, GstVideoFormat src_format, const guint8 * src,
916 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
920 guint8 *destY, *destU, *destV;
921 const guint8 *srcY, *srcU, *srcV;
922 gint dest_strideY, dest_strideUV;
923 gint src_strideY, src_strideUV;
924 gint src_y_idx, src_uv_idx;
925 gint dest_y_idx, dest_uv_idx;
931 dest_strideY = gst_video_format_get_row_stride (dest_format, 0, dest_width);
932 dest_strideUV = gst_video_format_get_row_stride (dest_format, 1, dest_width);
933 src_strideY = gst_video_format_get_row_stride (src_format, 0, src_width);
934 src_strideUV = gst_video_format_get_row_stride (src_format, 1, src_width);
937 dest + gst_video_format_get_component_offset (dest_format, 0,
938 dest_width, dest_height);
940 dest + gst_video_format_get_component_offset (dest_format, 1,
941 dest_width, dest_height);
943 dest + gst_video_format_get_component_offset (dest_format, 2,
944 dest_width, dest_height);
947 src + gst_video_format_get_component_offset (src_format, 0,
948 src_width, src_height);
950 src + gst_video_format_get_component_offset (src_format, 1,
951 src_width, src_height);
953 src + gst_video_format_get_component_offset (src_format, 2,
954 src_width, src_height);
957 destY = destY + dest_y * dest_strideY + dest_x;
958 destU = destU + dest_y * dest_strideUV + dest_x / 4;
959 destV = destV + dest_y * dest_strideUV + dest_x / 4;
961 srcY = srcY + src_y * src_strideY + src_x;
962 srcU = srcU + src_y * src_strideUV + src_x / 4;
963 srcV = srcV + src_y * src_strideUV + src_x / 4;
968 if (src_sdtv != dest_sdtv)
970 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
971 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
973 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
975 /* 1. Copy all macro pixel scanlines, the destination scanline
976 * now starts at macro pixel boundary. */
977 for (i = dest_y; i < h; i++) {
978 /* 1.1. Handle the first destination pixel if it doesn't
979 * start at the macro pixel boundary, i.e. blend with
981 if (dest_x % 4 == 1) {
988 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
989 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
990 destY[2] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
993 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
994 v1) + APPLY_MATRIX (matrix, 1, y2, u1,
995 v1) + APPLY_MATRIX (matrix, 1, y3, u1, v1)) / 4, 0, 255);
997 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
998 v1) + APPLY_MATRIX (matrix, 2, y2, u1,
999 v1) + APPLY_MATRIX (matrix, 2, y3, u1, v1)) / 4, 0, 255);
1002 src_y_idx = dest_y_idx = 3;
1004 src_uv_idx = (src_x % 4) + 3;
1005 } else if (dest_x % 4 == 2) {
1011 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1012 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1015 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1016 v1) + APPLY_MATRIX (matrix, 1, y2, u1, v1)) / 4, 0, 255);
1018 CLAMP ((2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1019 v1) + APPLY_MATRIX (matrix, 2, y2, u1, v1)) / 4, 0, 255);
1022 src_y_idx = dest_y_idx = 2;
1024 src_uv_idx = (src_x % 4) + 2;
1025 } else if (dest_x % 4 == 3) {
1030 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1033 (3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0, 255);
1035 (3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0, 255);
1038 src_y_idx = dest_y_idx = 1;
1040 src_uv_idx = (src_x % 4) + 1;
1043 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1044 src_uv_idx = (src_x % 4);
1047 /* 1.2. Copy all macro pixels from the source to the destination.
1048 * All pixels now start at macro pixel boundary, i.e. no
1049 * blending with the background is necessary. */
1050 for (; j < w - 3; j += 4) {
1051 y1 = srcY[src_y_idx];
1052 y2 = srcY[src_y_idx + 1];
1053 y3 = srcY[src_y_idx + 2];
1054 y4 = srcY[src_y_idx + 3];
1056 u1 = srcU[src_uv_idx / 4];
1057 v1 = srcV[src_uv_idx / 4];
1059 u2 = srcU[src_uv_idx / 4];
1060 v2 = srcV[src_uv_idx / 4];
1062 u3 = srcU[src_uv_idx / 4];
1063 v3 = srcV[src_uv_idx / 4];
1065 u4 = srcU[src_uv_idx / 4];
1066 v4 = srcV[src_uv_idx / 4];
1069 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1070 destY[dest_y_idx + 1] =
1071 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1072 destY[dest_y_idx + 2] =
1073 CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1074 destY[dest_y_idx + 3] =
1075 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1077 destU[dest_uv_idx] = CLAMP (
1078 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1079 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1080 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1081 destV[dest_uv_idx] =
1082 CLAMP ((APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix,
1083 2, y2, u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1084 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1091 /* 1.3. Now copy the last pixel if one exists and blend it
1092 * with the background because we only fill part of
1093 * the macro pixel. In case this is the last pixel of
1094 * the destination we will a larger part. */
1095 if (j == w - 1 && j == dest_width - 1) {
1096 y1 = srcY[src_y_idx];
1097 u1 = srcU[src_uv_idx / 4];
1098 v1 = srcV[src_uv_idx / 4];
1100 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1101 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1102 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1103 } else if (j == w - 1) {
1104 y1 = srcY[src_y_idx];
1105 u1 = srcU[src_uv_idx / 4];
1106 v1 = srcV[src_uv_idx / 4];
1108 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1109 destU[dest_uv_idx] = CLAMP (
1110 (destU[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1112 destV[dest_uv_idx] = CLAMP (
1113 (destV[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1115 } else if (j == w - 2 && j == dest_width - 2) {
1116 y1 = srcY[src_y_idx];
1117 y2 = srcY[src_y_idx + 1];
1118 u1 = srcU[src_uv_idx / 4];
1119 v1 = srcV[src_uv_idx / 4];
1121 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1122 destY[dest_y_idx + 1] =
1123 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1124 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1125 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1126 } else if (j == w - 2) {
1127 y1 = srcY[src_y_idx];
1128 y2 = srcY[src_y_idx + 1];
1129 u1 = srcU[src_uv_idx / 4];
1130 v1 = srcV[src_uv_idx / 4];
1132 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1133 destY[dest_y_idx + 1] =
1134 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1135 destU[dest_uv_idx] =
1136 CLAMP ((destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1138 destV[dest_uv_idx] =
1139 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1141 } else if (j == w - 3 && j == dest_width - 3) {
1142 y1 = srcY[src_y_idx];
1143 y2 = srcY[src_y_idx + 1];
1144 y3 = srcY[src_y_idx + 2];
1145 u1 = srcU[src_uv_idx / 4];
1146 v1 = srcV[src_uv_idx / 4];
1148 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1149 destY[dest_y_idx + 1] =
1150 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1151 destY[dest_y_idx + 2] =
1152 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1153 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1154 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1155 } else if (j == w - 3) {
1156 y1 = srcY[src_y_idx];
1157 y2 = srcY[src_y_idx + 1];
1158 y3 = srcY[src_y_idx + 2];
1159 u1 = srcU[src_uv_idx / 4];
1160 v1 = srcV[src_uv_idx / 4];
1162 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1163 destY[dest_y_idx + 1] =
1164 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1165 destY[dest_y_idx + 2] =
1166 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1167 destU[dest_uv_idx] =
1168 CLAMP ((3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1170 destV[dest_uv_idx] =
1171 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1175 destY += dest_strideY;
1176 destU += dest_strideUV;
1177 destV += dest_strideUV;
1178 srcY += src_strideY;
1179 srcU += src_strideUV;
1180 srcV += src_strideUV;
1185 copy_i420_i420 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
1186 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
1187 gint dest_y, GstVideoFormat src_format, const guint8 * src,
1188 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
1192 guint8 *destY, *destU, *destV;
1193 const guint8 *srcY, *srcU, *srcV;
1195 const guint8 *srcY2, *srcU2, *srcV2;
1196 gint dest_strideY, dest_strideUV;
1197 gint src_strideY, src_strideUV;
1198 gint src_y_idx, src_uv_idx;
1199 gint dest_y_idx, dest_uv_idx;
1201 gint y1, y2, y3, y4;
1202 gint u1, u2, u3, u4;
1203 gint v1, v2, v3, v4;
1206 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 0, dest_width);
1208 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 1, dest_width);
1210 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 0, src_width);
1212 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 1, src_width);
1215 dest + gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, 0,
1216 dest_width, dest_height);
1218 dest + gst_video_format_get_component_offset (dest_format, 1,
1219 dest_width, dest_height);
1221 dest + gst_video_format_get_component_offset (dest_format, 2,
1222 dest_width, dest_height);
1225 src + gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, 0,
1226 src_width, src_height);
1228 src + gst_video_format_get_component_offset (src_format, 1,
1229 src_width, src_height);
1231 src + gst_video_format_get_component_offset (src_format, 2,
1232 src_width, src_height);
1235 destY = destY + dest_y * dest_strideY + dest_x;
1236 destU = destU + (dest_y / 2) * dest_strideUV + dest_x / 2;
1237 destV = destV + (dest_y / 2) * dest_strideUV + dest_x / 2;
1239 srcY = srcY + src_y * src_strideY + src_x;
1240 srcU = srcU + (src_y / 2) * src_strideUV + src_x / 2;
1241 srcV = srcV + (src_y / 2) * src_strideUV + src_x / 2;
1243 destY2 = destY + dest_strideY;
1244 srcY2 = srcY + src_strideY;
1249 if (src_sdtv != dest_sdtv)
1251 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1252 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1254 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
1256 /* 1. Handle the first destination scanline specially if it
1257 * doesn't start at the macro pixel boundary, i.e. blend
1258 * with the background! */
1259 if (dest_y % 2 == 1) {
1260 /* 1.1. Handle the first destination pixel if it doesn't
1261 * start at the macro pixel boundary, i.e. blend with
1262 * the background! */
1263 if (dest_x % 2 == 1) {
1268 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1270 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1273 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1277 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1278 src_uv_idx = (src_x % 2) + 1;
1281 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1282 src_uv_idx = (src_x % 2);
1285 /* 1.2. Copy all macro pixels from the source to the destination
1286 * but blend with the background because we're only filling
1287 * the lower part of the macro pixels. */
1288 for (; j < w - 1; j += 2) {
1289 y1 = srcY[src_y_idx];
1290 y2 = srcY[src_y_idx + 1];
1292 u1 = srcU[src_uv_idx / 2];
1293 v1 = srcV[src_uv_idx / 2];
1295 u2 = srcU[src_uv_idx / 2];
1296 v2 = srcV[src_uv_idx / 2];
1299 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1300 destY[dest_y_idx + 1] =
1301 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1302 destU[dest_uv_idx] =
1303 CLAMP ((2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1304 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1305 destV[dest_uv_idx] =
1306 CLAMP ((2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1307 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1314 /* 1.3. Now copy the last pixel if one exists and blend it
1315 * with the background because we only fill part of
1316 * the macro pixel. In case this is the last pixel of
1317 * the destination we will a larger part. */
1318 if (j == w - 1 && j == dest_width - 1) {
1319 y1 = srcY[src_y_idx];
1320 u1 = srcU[src_uv_idx / 2];
1321 v1 = srcV[src_uv_idx / 2];
1323 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1324 destU[dest_uv_idx] = CLAMP (
1325 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1327 destV[dest_uv_idx] =
1328 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1330 } else if (j == w - 1) {
1331 y1 = srcY[src_y_idx];
1332 u1 = srcU[src_uv_idx / 2];
1333 v1 = srcV[src_uv_idx / 2];
1335 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1336 destU[dest_uv_idx] = CLAMP (
1337 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1339 destV[dest_uv_idx] =
1340 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1344 destY += dest_strideY;
1345 destY2 += dest_strideY;
1346 destU += dest_strideUV;
1347 destV += dest_strideUV;
1348 srcY += src_strideY;
1349 srcY2 += src_strideY;
1351 if (src_y % 2 == 0) {
1352 srcU += src_strideUV;
1353 srcV += src_strideUV;
1360 /* 2. Copy all macro pixel scanlines, the destination scanline
1361 * now starts at macro pixel boundary. */
1362 for (; i < h - 1; i += 2) {
1363 /* 2.1. Handle the first destination pixel if it doesn't
1364 * start at the macro pixel boundary, i.e. blend with
1365 * the background! */
1369 if (src_y % 2 == 1) {
1370 srcU2 += src_strideUV;
1371 srcV2 += src_strideUV;
1374 if (dest_x % 2 == 1) {
1382 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1383 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1385 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1386 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1388 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1389 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1391 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1392 src_uv_idx = (src_x % 2) + 1;
1395 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1396 src_uv_idx = (src_x % 2);
1399 /* 2.2. Copy all macro pixels from the source to the destination.
1400 * All pixels now start at macro pixel boundary, i.e. no
1401 * blending with the background is necessary. */
1402 for (; j < w - 1; j += 2) {
1403 y1 = srcY[src_y_idx];
1404 y2 = srcY[src_y_idx + 1];
1405 y3 = srcY2[src_y_idx];
1406 y4 = srcY2[src_y_idx + 1];
1408 u1 = srcU[src_uv_idx / 2];
1409 u3 = srcU2[src_uv_idx / 2];
1410 v1 = srcV[src_uv_idx / 2];
1411 v3 = srcV2[src_uv_idx / 2];
1413 u2 = srcU[src_uv_idx / 2];
1414 u4 = srcU2[src_uv_idx / 2];
1415 v2 = srcV[src_uv_idx / 2];
1416 v4 = srcV2[src_uv_idx / 2];
1419 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1420 destY[dest_y_idx + 1] =
1421 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1422 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1423 destY2[dest_y_idx + 1] =
1424 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1426 destU[dest_uv_idx] = CLAMP (
1427 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1428 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1429 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1430 destV[dest_uv_idx] = CLAMP (
1431 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1432 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1433 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1440 /* 2.3. Now copy the last pixel if one exists and blend it
1441 * with the background because we only fill part of
1442 * the macro pixel. In case this is the last pixel of
1443 * the destination we will a larger part. */
1444 if (j == w - 1 && j == dest_width - 1) {
1445 y1 = srcY[src_y_idx];
1446 y2 = srcY2[src_y_idx];
1448 u1 = srcU[src_uv_idx / 2];
1449 u2 = srcU2[src_uv_idx / 2];
1451 v1 = srcV[src_uv_idx / 2];
1452 v2 = srcV2[src_uv_idx / 2];
1454 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1455 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1456 destU[dest_uv_idx] = CLAMP (
1457 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1458 u2, v2)) / 2, 0, 255);
1459 destV[dest_uv_idx] = CLAMP (
1460 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1461 u2, v2)) / 2, 0, 255);
1462 } else if (j == w - 1) {
1463 y1 = srcY[src_y_idx];
1464 y2 = srcY2[src_y_idx];
1466 u1 = srcU[src_uv_idx / 2];
1467 u2 = srcU2[src_uv_idx / 2];
1469 v1 = srcV[src_uv_idx / 2];
1470 v2 = srcV2[src_uv_idx / 2];
1472 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1473 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1474 destU[dest_uv_idx] = CLAMP (
1475 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1476 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1477 destV[dest_uv_idx] = CLAMP (
1478 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1479 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1482 destY += 2 * dest_strideY;
1483 destY2 += 2 * dest_strideY;
1484 destU += dest_strideUV;
1485 destV += dest_strideUV;
1486 srcY += 2 * src_strideY;
1487 srcY2 += 2 * src_strideY;
1490 srcU += src_strideUV;
1491 srcV += src_strideUV;
1494 /* 3. Handle the last scanline if one exists. This again
1495 * doesn't start at macro pixel boundary but should
1496 * only fill the upper part of the macro pixels. */
1497 if (i == h - 1 && i == dest_height - 1) {
1498 /* 3.1. Handle the first destination pixel if it doesn't
1499 * start at the macro pixel boundary, i.e. blend with
1500 * the background! */
1501 if (dest_x % 2 == 1) {
1506 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1508 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
1510 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
1513 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1514 src_uv_idx = (src_x % 2) + 1;
1517 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1518 src_uv_idx = (src_x % 2);
1521 /* 3.2. Copy all macro pixels from the source to the destination
1522 * but blend with the background because we're only filling
1523 * the upper part of the macro pixels. */
1524 for (; j < w - 1; j += 2) {
1525 y1 = srcY[src_y_idx];
1526 y2 = srcY[src_y_idx + 1];
1528 u1 = srcU[src_uv_idx / 2];
1529 v1 = srcV[src_uv_idx / 2];
1531 u2 = srcU[src_uv_idx / 2];
1532 v2 = srcV[src_uv_idx / 2];
1535 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1536 destY[dest_y_idx + 1] =
1537 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1539 destU[dest_uv_idx] = CLAMP (
1540 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1541 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1542 destV[dest_uv_idx] = CLAMP (
1543 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1544 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1551 /* 3.3. Now copy the last pixel if one exists and blend it
1552 * with the background because we only fill part of
1553 * the macro pixel. In case this is the last pixel of
1554 * the destination we will a larger part. */
1555 if (j == w - 1 && j == dest_width - 1) {
1556 y1 = srcY[src_y_idx];
1557 u1 = srcU[src_uv_idx / 2];
1558 v1 = srcV[src_uv_idx / 2];
1560 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1561 destU[dest_uv_idx] = CLAMP (
1562 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1564 destV[dest_uv_idx] =
1565 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1567 } else if (j == w - 1) {
1568 y1 = srcY[src_y_idx];
1569 u1 = srcU[src_uv_idx / 2];
1570 v1 = srcV[src_uv_idx / 2];
1572 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1573 destU[dest_uv_idx] = CLAMP (
1574 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1576 destV[dest_uv_idx] =
1577 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1580 } else if (i == h - 1) {
1581 /* 3.1. Handle the first destination pixel if it doesn't
1582 * start at the macro pixel boundary, i.e. blend with
1583 * the background! */
1584 if (dest_x % 2 == 1) {
1589 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1591 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1594 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1598 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1599 src_uv_idx = (src_x % 2) + 1;
1602 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1603 src_uv_idx = (src_x % 2);
1606 /* 3.2. Copy all macro pixels from the source to the destination
1607 * but blend with the background because we're only filling
1608 * the upper part of the macro pixels. */
1609 for (; j < w - 1; j += 2) {
1610 y1 = srcY[src_y_idx];
1611 y2 = srcY[src_y_idx + 1];
1613 u1 = srcU[src_uv_idx / 2];
1614 v1 = srcV[src_uv_idx / 2];
1616 u2 = srcU[src_uv_idx / 2];
1617 v2 = srcV[src_uv_idx / 2];
1620 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1621 destY[dest_y_idx + 1] =
1622 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1624 destU[dest_uv_idx] = CLAMP (
1625 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1626 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1627 destV[dest_uv_idx] = CLAMP (
1628 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1629 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1636 /* 3.3. Now copy the last pixel if one exists and blend it
1637 * with the background because we only fill part of
1638 * the macro pixel. In case this is the last pixel of
1639 * the destination we will a larger part. */
1640 if (j == w - 1 && j == dest_width - 1) {
1641 y1 = srcY[src_y_idx];
1642 u1 = srcU[src_uv_idx / 2];
1643 v1 = srcV[src_uv_idx / 2];
1645 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1646 destU[dest_uv_idx] = CLAMP (
1647 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1649 destV[dest_uv_idx] =
1650 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1652 } else if (j == w - 1) {
1653 y1 = srcY[src_y_idx];
1654 u1 = srcU[src_uv_idx / 2];
1655 v1 = srcV[src_uv_idx / 2];
1657 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1658 destU[dest_uv_idx] = CLAMP (
1659 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1661 destV[dest_uv_idx] =
1662 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1669 copy_i420_ayuv (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
1670 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
1671 gint dest_y, GstVideoFormat src_format, const guint8 * src,
1672 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
1675 const guint8 *srcY, *srcU, *srcV;
1676 gint src_strideY, src_strideUV;
1680 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 0, src_width);
1682 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 1, src_width);
1685 src + gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, 0,
1686 src_width, src_height);
1688 src + gst_video_format_get_component_offset (src_format, 1,
1689 src_width, src_height);
1691 src + gst_video_format_get_component_offset (src_format, 2,
1692 src_width, src_height);
1694 dest_stride = dest_width * 4;
1696 dest = dest + dest_y * dest_stride + dest_x * 4;
1698 srcY = srcY + src_y * src_strideY + src_x;
1699 srcU = srcU + (src_y / 2) * src_strideUV + src_x / 2;
1700 srcV = srcV + (src_y / 2) * src_strideUV + src_x / 2;
1702 i_alpha = CLAMP (i_alpha, 0, 255);
1704 if (src_sdtv != dest_sdtv) {
1711 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1712 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1714 for (i = 0; i < h; i++) {
1715 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1717 u = srcU[uv_idx / 2];
1718 v = srcV[uv_idx / 2];
1720 y1 = APPLY_MATRIX (matrix, 0, y, u, v);
1721 u1 = APPLY_MATRIX (matrix, 1, y, u, v);
1722 v1 = APPLY_MATRIX (matrix, 2, y, u, v);
1724 dest[4 * j + 0] = i_alpha;
1725 dest[4 * j + 1] = y1;
1726 dest[4 * j + 2] = u1;
1727 dest[4 * j + 3] = v1;
1729 dest += dest_stride;
1732 srcY += src_strideY;
1733 if (src_y % 2 == 0) {
1734 srcU += src_strideUV;
1735 srcV += src_strideUV;
1742 for (i = 0; i < h; i++) {
1743 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1745 u = srcU[uv_idx / 2];
1746 v = srcV[uv_idx / 2];
1748 dest[4 * j + 0] = i_alpha;
1749 dest[4 * j + 1] = y;
1750 dest[4 * j + 2] = u;
1751 dest[4 * j + 3] = v;
1753 dest += dest_stride;
1756 srcY += src_strideY;
1757 if (src_y % 2 == 0) {
1758 srcU += src_strideUV;
1759 srcV += src_strideUV;
1766 fill_rgb32 (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
1767 guint8 * dest, gboolean sdtv, gint width, gint height)
1769 guint32 empty_pixel;
1772 p[0] = gst_video_format_get_component_offset (format, 3, width, height);
1773 p[1] = gst_video_format_get_component_offset (format, 0, width, height);
1774 p[2] = gst_video_format_get_component_offset (format, 1, width, height);
1775 p[3] = gst_video_format_get_component_offset (format, 2, width, height);
1777 b_alpha = CLAMP (b_alpha, 0, 255);
1779 empty_pixel = GUINT32_FROM_LE ((b_alpha << (p[0] * 8)) |
1780 (rgb_colors_R[fill_type] << (p[1] * 8)) |
1781 (rgb_colors_G[fill_type] << (p[2] * 8)) |
1782 (rgb_colors_B[fill_type] << (p[3] * 8)));
1784 orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
1788 fill_rgb24 (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
1789 guint8 * dest, gboolean sdtv, gint width, gint height)
1791 gint dest_stride = GST_ROUND_UP_4 (width * 3);
1795 p[0] = gst_video_format_get_component_offset (format, 3, width, height);
1796 p[1] = gst_video_format_get_component_offset (format, 0, width, height);
1797 p[2] = gst_video_format_get_component_offset (format, 1, width, height);
1798 p[3] = gst_video_format_get_component_offset (format, 2, width, height);
1800 for (i = 0; i < height; i++) {
1801 for (j = 0; j < width; j++) {
1802 dest[3 * j + p[1]] = rgb_colors_R[fill_type];
1803 dest[3 * j + p[2]] = rgb_colors_G[fill_type];
1804 dest[3 * j + p[3]] = rgb_colors_B[fill_type];
1806 dest += dest_stride;
1811 copy_rgb32 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
1812 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
1813 gint dest_y, GstVideoFormat src_format, const guint8 * src,
1814 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
1818 gint src_stride, dest_stride;
1819 gboolean in_alpha, out_alpha;
1820 gint in_bpp, out_bpp;
1823 gboolean packed_out = (dest_format == GST_VIDEO_FORMAT_RGB
1824 || dest_format == GST_VIDEO_FORMAT_BGR);
1825 gboolean packed_in = (src_format == GST_VIDEO_FORMAT_RGB
1826 || src_format == GST_VIDEO_FORMAT_BGR);
1828 src_stride = (packed_in) ? GST_ROUND_UP_4 (3 * src_width) : 4 * src_width;
1829 dest_stride = (packed_out) ? GST_ROUND_UP_4 (3 * dest_width) : 4 * dest_width;
1830 in_bpp = (packed_in) ? 3 : 4;
1831 out_bpp = (packed_out) ? 3 : 4;
1833 out_alpha = gst_video_format_has_alpha (dest_format);
1835 gst_video_format_get_component_offset (dest_format, 3, dest_width,
1838 gst_video_format_get_component_offset (dest_format, 0, dest_width,
1841 gst_video_format_get_component_offset (dest_format, 1, dest_width,
1844 gst_video_format_get_component_offset (dest_format, 2, dest_width,
1847 in_alpha = gst_video_format_has_alpha (src_format);
1849 gst_video_format_get_component_offset (src_format, 3, src_width,
1852 gst_video_format_get_component_offset (src_format, 0, src_width,
1855 gst_video_format_get_component_offset (src_format, 1, src_width,
1858 gst_video_format_get_component_offset (src_format, 2, src_width,
1861 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
1862 src = src + src_y * src_stride + src_x * in_bpp;
1864 if (in_alpha && out_alpha) {
1866 for (i = 0; i < h; i++) {
1867 for (j = 0; j < w; j += 4) {
1868 dest[j + p_out[0]] = (src[j + p_in[0]] * i_alpha) >> 8;
1869 dest[j + p_out[1]] = src[j + p_in[1]];
1870 dest[j + p_out[2]] = src[j + p_in[2]];
1871 dest[j + p_out[3]] = src[j + p_in[3]];
1873 dest += dest_stride;
1876 } else if (out_alpha && !packed_in) {
1878 i_alpha = CLAMP (i_alpha, 0, 255);
1880 for (i = 0; i < h; i++) {
1881 for (j = 0; j < w; j += 4) {
1882 dest[j + p_out[0]] = i_alpha;
1883 dest[j + p_out[1]] = src[j + p_in[1]];
1884 dest[j + p_out[2]] = src[j + p_in[2]];
1885 dest[j + p_out[3]] = src[j + p_in[3]];
1887 dest += dest_stride;
1890 } else if (out_alpha && packed_in) {
1891 i_alpha = CLAMP (i_alpha, 0, 255);
1893 for (i = 0; i < h; i++) {
1894 for (j = 0; j < w; j++) {
1895 dest[4 * j + p_out[0]] = i_alpha;
1896 dest[4 * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1897 dest[4 * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1898 dest[4 * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1900 dest += dest_stride;
1903 } else if (!packed_out && !packed_in) {
1905 for (i = 0; i < h; i++) {
1906 for (j = 0; j < w; j += 4) {
1907 dest[j + p_out[1]] = src[j + p_in[1]];
1908 dest[j + p_out[2]] = src[j + p_in[2]];
1909 dest[j + p_out[3]] = src[j + p_in[3]];
1911 dest += dest_stride;
1915 for (i = 0; i < h; i++) {
1916 for (j = 0; j < w; j++) {
1917 dest[out_bpp * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1918 dest[out_bpp * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1919 dest[out_bpp * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1921 dest += dest_stride;
1928 copy_rgb32_ayuv (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
1929 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
1930 gint dest_y, GstVideoFormat src_format, const guint8 * src,
1931 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
1935 gint src_stride, dest_stride;
1939 gboolean packed_in = (src_format == GST_VIDEO_FORMAT_RGB
1940 || src_format == GST_VIDEO_FORMAT_BGR);
1946 src_stride = (packed_in) ? GST_ROUND_UP_4 (3 * src_width) : 4 * src_width;
1947 dest_stride = 4 * dest_width;
1948 in_bpp = (packed_in) ? 3 : 4;
1950 in_alpha = gst_video_format_has_alpha (src_format);
1952 gst_video_format_get_component_offset (src_format, 3, src_width,
1955 gst_video_format_get_component_offset (src_format, 0, src_width,
1958 gst_video_format_get_component_offset (src_format, 1, src_width,
1961 gst_video_format_get_component_offset (src_format, 2, src_width,
1965 (dest_sdtv) ? cog_rgb_to_ycbcr_matrix_8bit_sdtv :
1966 cog_rgb_to_ycbcr_matrix_8bit_hdtv, 12 * sizeof (gint));
1968 dest = dest + dest_y * dest_stride + dest_x * 4;
1969 src = src + src_y * src_stride + src_x * in_bpp;
1973 for (i = 0; i < h; i++) {
1974 for (j = 0; j < w; j += 4) {
1975 a = (src[j + p_in[0]] * i_alpha) >> 8;
1976 r = src[j + p_in[1]];
1977 g = src[j + p_in[2]];
1978 b = src[j + p_in[3]];
1980 y = APPLY_MATRIX (matrix, 0, r, g, b);
1981 u = APPLY_MATRIX (matrix, 1, r, g, b);
1982 v = APPLY_MATRIX (matrix, 2, r, g, b);
1985 dest[j + 1] = CLAMP (y, 0, 255);
1986 dest[j + 2] = CLAMP (u, 0, 255);
1987 dest[j + 3] = CLAMP (v, 0, 255);
1989 dest += dest_stride;
1992 } else if (!packed_in) {
1994 i_alpha = CLAMP (i_alpha, 0, 255);
1996 for (i = 0; i < h; i++) {
1997 for (j = 0; j < w; j += 4) {
1999 r = src[j + p_in[1]];
2000 g = src[j + p_in[2]];
2001 b = src[j + p_in[3]];
2003 y = APPLY_MATRIX (matrix, 0, r, g, b);
2004 u = APPLY_MATRIX (matrix, 1, r, g, b);
2005 v = APPLY_MATRIX (matrix, 2, r, g, b);
2008 dest[j + 1] = CLAMP (y, 0, 255);
2009 dest[j + 2] = CLAMP (u, 0, 255);
2010 dest[j + 3] = CLAMP (v, 0, 255);
2012 dest += dest_stride;
2016 i_alpha = CLAMP (i_alpha, 0, 255);
2018 for (i = 0; i < h; i++) {
2019 for (j = 0; j < w; j++) {
2021 r = src[in_bpp * j + p_in[1]];
2022 g = src[in_bpp * j + p_in[2]];
2023 b = src[in_bpp * j + p_in[3]];
2025 y = APPLY_MATRIX (matrix, 0, r, g, b);
2026 u = APPLY_MATRIX (matrix, 1, r, g, b);
2027 v = APPLY_MATRIX (matrix, 2, r, g, b);
2029 dest[4 * j + 0] = a;
2030 dest[4 * j + 1] = CLAMP (y, 0, 255);
2031 dest[4 * j + 2] = CLAMP (u, 0, 255);
2032 dest[4 * j + 3] = CLAMP (v, 0, 255);
2034 dest += dest_stride;
2041 copy_ayuv_rgb32 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
2042 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
2043 gint dest_y, GstVideoFormat src_format, const guint8 * src,
2044 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
2048 gint src_stride, dest_stride;
2052 gboolean packed_out = (dest_format == GST_VIDEO_FORMAT_RGB
2053 || dest_format == GST_VIDEO_FORMAT_BGR);
2059 dest_stride = (packed_out) ? GST_ROUND_UP_4 (3 * dest_width) : 4 * dest_width;
2060 src_stride = 4 * src_width;
2061 out_bpp = (packed_out) ? 3 : 4;
2063 out_alpha = gst_video_format_has_alpha (dest_format);
2065 gst_video_format_get_component_offset (dest_format, 3, dest_width,
2068 gst_video_format_get_component_offset (dest_format, 0, dest_width,
2071 gst_video_format_get_component_offset (dest_format, 1, dest_width,
2074 gst_video_format_get_component_offset (dest_format, 2, dest_width,
2078 (src_sdtv) ? cog_ycbcr_to_rgb_matrix_8bit_sdtv :
2079 cog_ycbcr_to_rgb_matrix_8bit_hdtv, 12 * sizeof (gint));
2081 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
2082 src = src + src_y * src_stride + src_x * 4;
2086 for (i = 0; i < h; i++) {
2087 for (j = 0; j < w; j += 4) {
2088 a = (src[j + 0] * i_alpha) >> 8;
2093 r = APPLY_MATRIX (matrix, 0, y, u, v);
2094 g = APPLY_MATRIX (matrix, 1, y, u, v);
2095 b = APPLY_MATRIX (matrix, 2, y, u, v);
2097 dest[j + p_out[0]] = a;
2098 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2099 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2100 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2102 dest += dest_stride;
2105 } else if (!packed_out) {
2107 for (i = 0; i < h; i++) {
2108 for (j = 0; j < w; j += 4) {
2113 r = APPLY_MATRIX (matrix, 0, y, u, v);
2114 g = APPLY_MATRIX (matrix, 1, y, u, v);
2115 b = APPLY_MATRIX (matrix, 2, y, u, v);
2117 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2118 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2119 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2121 dest += dest_stride;
2125 for (i = 0; i < h; i++) {
2126 for (j = 0; j < w; j++) {
2131 r = APPLY_MATRIX (matrix, 0, y, u, v);
2132 g = APPLY_MATRIX (matrix, 1, y, u, v);
2133 b = APPLY_MATRIX (matrix, 2, y, u, v);
2135 dest[out_bpp * j + p_out[1]] = CLAMP (r, 0, 255);
2136 dest[out_bpp * j + p_out[2]] = CLAMP (g, 0, 255);
2137 dest[out_bpp * j + p_out[3]] = CLAMP (b, 0, 255);
2139 dest += dest_stride;
2146 fill_gray (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
2147 guint8 * dest, gboolean sdtv, gint width, gint height)
2152 if (format == GST_VIDEO_FORMAT_GRAY8) {
2153 guint8 val = yuv_sdtv_colors_Y[fill_type];
2155 dest_stride = GST_ROUND_UP_4 (width);
2156 for (i = 0; i < height; i++) {
2157 memset (dest, val, width);
2158 dest += dest_stride;
2161 guint16 val = yuv_sdtv_colors_Y[fill_type] << 8;
2163 dest_stride = GST_ROUND_UP_4 (width * 2);
2164 if (format == GST_VIDEO_FORMAT_GRAY16_BE) {
2165 for (i = 0; i < height; i++) {
2166 for (j = 0; j < width; j++) {
2167 GST_WRITE_UINT16_BE (dest + 2 * j, val);
2169 dest += dest_stride;
2172 for (i = 0; i < height; i++) {
2173 for (j = 0; j < width; j++) {
2174 GST_WRITE_UINT16_LE (dest + 2 * j, val);
2176 dest += dest_stride;
2183 copy_packed_simple (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
2184 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
2185 gint dest_y, GstVideoFormat src_format, const guint8 * src,
2186 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
2190 gint src_stride, dest_stride;
2191 gint pixel_stride, row_size;
2193 src_stride = gst_video_format_get_row_stride (src_format, 0, src_width);
2194 dest_stride = gst_video_format_get_row_stride (dest_format, 0, dest_width);
2195 pixel_stride = gst_video_format_get_pixel_stride (dest_format, 0);
2196 row_size = w * pixel_stride;
2198 dest = dest + dest_y * dest_stride + dest_x * pixel_stride;
2199 src = src + src_y * src_stride + src_x * pixel_stride;
2201 for (i = 0; i < h; i++) {
2202 memcpy (dest, src, row_size);
2203 dest += dest_stride;
2209 fill_yuy2 (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
2210 guint8 * dest, gboolean sdtv, gint width, gint height)
2214 gint stride = gst_video_format_get_row_stride (format, 0, width);
2216 y = (sdtv) ? yuv_sdtv_colors_Y[fill_type] : yuv_hdtv_colors_Y[fill_type];
2217 u = (sdtv) ? yuv_sdtv_colors_U[fill_type] : yuv_hdtv_colors_U[fill_type];
2218 v = (sdtv) ? yuv_sdtv_colors_V[fill_type] : yuv_hdtv_colors_V[fill_type];
2220 width = width + (width % 2);
2222 if (format == GST_VIDEO_FORMAT_YUY2) {
2223 for (i = 0; i < height; i++) {
2224 for (j = 0; j < width; j += 2) {
2225 dest[j * 2 + 0] = y;
2226 dest[j * 2 + 1] = u;
2227 dest[j * 2 + 2] = y;
2228 dest[j * 2 + 3] = v;
2233 } else if (format == GST_VIDEO_FORMAT_YVYU) {
2234 for (i = 0; i < height; i++) {
2235 for (j = 0; j < width; j += 2) {
2236 dest[j * 2 + 0] = y;
2237 dest[j * 2 + 1] = v;
2238 dest[j * 2 + 2] = y;
2239 dest[j * 2 + 3] = u;
2245 for (i = 0; i < height; i++) {
2246 for (j = 0; j < width; j += 2) {
2247 dest[j * 2 + 0] = u;
2248 dest[j * 2 + 1] = y;
2249 dest[j * 2 + 2] = v;
2250 dest[j * 2 + 3] = y;
2259 copy_yuy2_yuy2 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
2260 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
2261 gint dest_y, GstVideoFormat src_format, const guint8 * src,
2262 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
2266 gint src_stride, dest_stride;
2268 src_stride = gst_video_format_get_row_stride (src_format, 0, src_width);
2269 dest_stride = gst_video_format_get_row_stride (dest_format, 0, dest_width);
2271 dest_x = (dest_x & ~1);
2272 src_x = (src_x & ~1);
2276 dest = dest + dest_y * dest_stride + dest_x * 2;
2277 src = src + src_y * src_stride + src_x * 2;
2279 if (src_sdtv != dest_sdtv) {
2285 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
2286 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
2288 if (src_format == GST_VIDEO_FORMAT_YUY2) {
2289 for (i = 0; i < h; i++) {
2290 for (j = 0; j < w; j += 2) {
2291 y1 = src[j * 2 + 0];
2292 y2 = src[j * 2 + 2];
2293 u1 = u2 = src[j * 2 + 1];
2294 v1 = v2 = src[j * 2 + 3];
2296 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2297 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2298 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2299 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2301 dest += dest_stride;
2304 } else if (src_format == GST_VIDEO_FORMAT_YVYU) {
2305 for (i = 0; i < h; i++) {
2306 for (j = 0; j < w; j += 2) {
2307 y1 = src[j * 2 + 0];
2308 y2 = src[j * 2 + 2];
2309 v1 = v2 = src[j * 2 + 1];
2310 u1 = u2 = src[j * 2 + 3];
2312 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2313 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 2, y1, u1, v1);
2314 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2315 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 1, y2, u2, v2);
2317 dest += dest_stride;
2321 for (i = 0; i < h; i++) {
2322 for (j = 0; j < w; j += 2) {
2323 u1 = u2 = src[j * 2 + 0];
2324 v1 = v2 = src[j * 2 + 2];
2325 y1 = src[j * 2 + 1];
2326 y2 = src[j * 2 + 3];
2328 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2329 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2330 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2331 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2333 dest += dest_stride;
2338 for (i = 0; i < h; i++) {
2339 memcpy (dest, src, w * 2);
2340 dest += dest_stride;
2346 #define DEFAULT_LEFT 0
2347 #define DEFAULT_RIGHT 0
2348 #define DEFAULT_TOP 0
2349 #define DEFAULT_BOTTOM 0
2350 #define DEFAULT_FILL_TYPE VIDEO_BOX_FILL_BLACK
2351 #define DEFAULT_ALPHA 1.0
2352 #define DEFAULT_BORDER_ALPHA 1.0
2368 static GstStaticPadTemplate gst_video_box_src_template =
2369 GST_STATIC_PAD_TEMPLATE ("src",
2372 GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("AYUV") ";"
2373 GST_VIDEO_CAPS_ARGB ";" GST_VIDEO_CAPS_BGRA ";"
2374 GST_VIDEO_CAPS_ABGR ";" GST_VIDEO_CAPS_RGBA ";"
2375 GST_VIDEO_CAPS_xRGB ";" GST_VIDEO_CAPS_BGRx ";"
2376 GST_VIDEO_CAPS_xBGR ";" GST_VIDEO_CAPS_RGBx ";"
2377 GST_VIDEO_CAPS_RGB ";" GST_VIDEO_CAPS_BGR ";"
2378 GST_VIDEO_CAPS_YUV ("Y444") ";"
2379 GST_VIDEO_CAPS_YUV ("Y42B") ";"
2380 GST_VIDEO_CAPS_YUV ("YUY2") ";"
2381 GST_VIDEO_CAPS_YUV ("YVYU") ";"
2382 GST_VIDEO_CAPS_YUV ("UYVY") ";"
2383 GST_VIDEO_CAPS_YUV ("I420") ";"
2384 GST_VIDEO_CAPS_YUV ("YV12") ";"
2385 GST_VIDEO_CAPS_YUV ("Y41B") ";"
2386 GST_VIDEO_CAPS_GRAY8 ";"
2387 GST_VIDEO_CAPS_GRAY16 ("BIG_ENDIAN") ";"
2388 GST_VIDEO_CAPS_GRAY16 ("LITTLE_ENDIAN"))
2391 static GstStaticPadTemplate gst_video_box_sink_template =
2392 GST_STATIC_PAD_TEMPLATE ("sink",
2395 GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("AYUV") ";"
2396 GST_VIDEO_CAPS_ARGB ";" GST_VIDEO_CAPS_BGRA ";"
2397 GST_VIDEO_CAPS_ABGR ";" GST_VIDEO_CAPS_RGBA ";"
2398 GST_VIDEO_CAPS_xRGB ";" GST_VIDEO_CAPS_BGRx ";"
2399 GST_VIDEO_CAPS_xBGR ";" GST_VIDEO_CAPS_RGBx ";"
2400 GST_VIDEO_CAPS_RGB ";" GST_VIDEO_CAPS_BGR ";"
2401 GST_VIDEO_CAPS_YUV ("Y444") ";"
2402 GST_VIDEO_CAPS_YUV ("Y42B") ";"
2403 GST_VIDEO_CAPS_YUV ("YUY2") ";"
2404 GST_VIDEO_CAPS_YUV ("YVYU") ";"
2405 GST_VIDEO_CAPS_YUV ("UYVY") ";"
2406 GST_VIDEO_CAPS_YUV ("I420") ";"
2407 GST_VIDEO_CAPS_YUV ("YV12") ";"
2408 GST_VIDEO_CAPS_YUV ("Y41B") ";"
2409 GST_VIDEO_CAPS_GRAY8 ";"
2410 GST_VIDEO_CAPS_GRAY16 ("BIG_ENDIAN") ";"
2411 GST_VIDEO_CAPS_GRAY16 ("LITTLE_ENDIAN"))
2414 GST_BOILERPLATE (GstVideoBox, gst_video_box, GstBaseTransform,
2415 GST_TYPE_BASE_TRANSFORM);
2417 static void gst_video_box_set_property (GObject * object, guint prop_id,
2418 const GValue * value, GParamSpec * pspec);
2419 static void gst_video_box_get_property (GObject * object, guint prop_id,
2420 GValue * value, GParamSpec * pspec);
2422 static gboolean gst_video_box_recalc_transform (GstVideoBox * video_box);
2423 static GstCaps *gst_video_box_transform_caps (GstBaseTransform * trans,
2424 GstPadDirection direction, GstCaps * from);
2425 static gboolean gst_video_box_set_caps (GstBaseTransform * trans,
2426 GstCaps * in, GstCaps * out);
2427 static gboolean gst_video_box_get_unit_size (GstBaseTransform * trans,
2428 GstCaps * caps, guint * size);
2429 static GstFlowReturn gst_video_box_transform (GstBaseTransform * trans,
2430 GstBuffer * in, GstBuffer * out);
2431 static void gst_video_box_before_transform (GstBaseTransform * trans,
2433 static void gst_video_box_fixate_caps (GstBaseTransform * trans,
2434 GstPadDirection direction, GstCaps * caps, GstCaps * othercaps);
2435 static gboolean gst_video_box_src_event (GstBaseTransform * trans,
2438 #define GST_TYPE_VIDEO_BOX_FILL (gst_video_box_fill_get_type())
2440 gst_video_box_fill_get_type (void)
2442 static GType video_box_fill_type = 0;
2443 static const GEnumValue video_box_fill[] = {
2444 {VIDEO_BOX_FILL_BLACK, "Black", "black"},
2445 {VIDEO_BOX_FILL_GREEN, "Green", "green"},
2446 {VIDEO_BOX_FILL_BLUE, "Blue", "blue"},
2447 {VIDEO_BOX_FILL_RED, "Red", "red"},
2448 {VIDEO_BOX_FILL_YELLOW, "Yellow", "yellow"},
2449 {VIDEO_BOX_FILL_WHITE, "White", "white"},
2453 if (!video_box_fill_type) {
2454 video_box_fill_type =
2455 g_enum_register_static ("GstVideoBoxFill", video_box_fill);
2457 return video_box_fill_type;
2462 gst_video_box_base_init (gpointer g_class)
2464 GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
2466 gst_element_class_set_details_simple (element_class, "Video box filter",
2467 "Filter/Effect/Video",
2468 "Resizes a video by adding borders or cropping",
2469 "Wim Taymans <wim@fluendo.com>");
2471 gst_element_class_add_static_pad_template (element_class,
2472 &gst_video_box_sink_template);
2473 gst_element_class_add_static_pad_template (element_class,
2474 &gst_video_box_src_template);
2478 gst_video_box_finalize (GObject * object)
2480 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2482 if (video_box->mutex) {
2483 g_mutex_free (video_box->mutex);
2484 video_box->mutex = NULL;
2487 G_OBJECT_CLASS (parent_class)->finalize (object);
2491 gst_video_box_class_init (GstVideoBoxClass * klass)
2493 GObjectClass *gobject_class = (GObjectClass *) klass;
2494 GstBaseTransformClass *trans_class = (GstBaseTransformClass *) klass;
2496 gobject_class->set_property = gst_video_box_set_property;
2497 gobject_class->get_property = gst_video_box_get_property;
2498 gobject_class->finalize = gst_video_box_finalize;
2500 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_FILL_TYPE,
2501 g_param_spec_enum ("fill", "Fill", "How to fill the borders",
2502 GST_TYPE_VIDEO_BOX_FILL, DEFAULT_FILL_TYPE,
2503 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2504 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_LEFT,
2505 g_param_spec_int ("left", "Left",
2506 "Pixels to box at left (<0 = add a border)", G_MININT, G_MAXINT,
2508 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2509 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_RIGHT,
2510 g_param_spec_int ("right", "Right",
2511 "Pixels to box at right (<0 = add a border)", G_MININT, G_MAXINT,
2513 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2514 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_TOP,
2515 g_param_spec_int ("top", "Top",
2516 "Pixels to box at top (<0 = add a border)", G_MININT, G_MAXINT,
2518 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2519 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BOTTOM,
2520 g_param_spec_int ("bottom", "Bottom",
2521 "Pixels to box at bottom (<0 = add a border)", G_MININT, G_MAXINT,
2523 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2524 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_ALPHA,
2525 g_param_spec_double ("alpha", "Alpha", "Alpha value picture", 0.0, 1.0,
2527 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2528 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BORDER_ALPHA,
2529 g_param_spec_double ("border-alpha", "Border Alpha",
2530 "Alpha value of the border", 0.0, 1.0, DEFAULT_BORDER_ALPHA,
2531 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2533 * GstVideoBox:autocrop
2535 * If set to %TRUE videobox will automatically crop/pad the input
2536 * video to be centered in the output.
2540 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_AUTOCROP,
2541 g_param_spec_boolean ("autocrop", "Auto crop",
2542 "Auto crop", FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
2544 trans_class->transform = GST_DEBUG_FUNCPTR (gst_video_box_transform);
2545 trans_class->before_transform =
2546 GST_DEBUG_FUNCPTR (gst_video_box_before_transform);
2547 trans_class->transform_caps =
2548 GST_DEBUG_FUNCPTR (gst_video_box_transform_caps);
2549 trans_class->set_caps = GST_DEBUG_FUNCPTR (gst_video_box_set_caps);
2550 trans_class->get_unit_size = GST_DEBUG_FUNCPTR (gst_video_box_get_unit_size);
2551 trans_class->fixate_caps = GST_DEBUG_FUNCPTR (gst_video_box_fixate_caps);
2552 trans_class->src_event = GST_DEBUG_FUNCPTR (gst_video_box_src_event);
2556 gst_video_box_init (GstVideoBox * video_box, GstVideoBoxClass * g_class)
2558 video_box->box_right = DEFAULT_RIGHT;
2559 video_box->box_left = DEFAULT_LEFT;
2560 video_box->box_top = DEFAULT_TOP;
2561 video_box->box_bottom = DEFAULT_BOTTOM;
2562 video_box->crop_right = 0;
2563 video_box->crop_left = 0;
2564 video_box->crop_top = 0;
2565 video_box->crop_bottom = 0;
2566 video_box->fill_type = DEFAULT_FILL_TYPE;
2567 video_box->alpha = DEFAULT_ALPHA;
2568 video_box->border_alpha = DEFAULT_BORDER_ALPHA;
2569 video_box->autocrop = FALSE;
2571 video_box->mutex = g_mutex_new ();
2575 gst_video_box_set_property (GObject * object, guint prop_id,
2576 const GValue * value, GParamSpec * pspec)
2578 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2580 g_mutex_lock (video_box->mutex);
2583 video_box->box_left = g_value_get_int (value);
2584 if (video_box->box_left < 0) {
2585 video_box->border_left = -video_box->box_left;
2586 video_box->crop_left = 0;
2588 video_box->border_left = 0;
2589 video_box->crop_left = video_box->box_left;
2593 video_box->box_right = g_value_get_int (value);
2594 if (video_box->box_right < 0) {
2595 video_box->border_right = -video_box->box_right;
2596 video_box->crop_right = 0;
2598 video_box->border_right = 0;
2599 video_box->crop_right = video_box->box_right;
2603 video_box->box_top = g_value_get_int (value);
2604 if (video_box->box_top < 0) {
2605 video_box->border_top = -video_box->box_top;
2606 video_box->crop_top = 0;
2608 video_box->border_top = 0;
2609 video_box->crop_top = video_box->box_top;
2613 video_box->box_bottom = g_value_get_int (value);
2614 if (video_box->box_bottom < 0) {
2615 video_box->border_bottom = -video_box->box_bottom;
2616 video_box->crop_bottom = 0;
2618 video_box->border_bottom = 0;
2619 video_box->crop_bottom = video_box->box_bottom;
2622 case PROP_FILL_TYPE:
2623 video_box->fill_type = g_value_get_enum (value);
2626 video_box->alpha = g_value_get_double (value);
2628 case PROP_BORDER_ALPHA:
2629 video_box->border_alpha = g_value_get_double (value);
2632 video_box->autocrop = g_value_get_boolean (value);
2635 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2638 gst_video_box_recalc_transform (video_box);
2640 GST_DEBUG_OBJECT (video_box, "Calling reconfigure");
2641 gst_base_transform_reconfigure (GST_BASE_TRANSFORM_CAST (video_box));
2643 g_mutex_unlock (video_box->mutex);
2647 gst_video_box_autocrop (GstVideoBox * video_box)
2649 gint crop_w = video_box->in_width - video_box->out_width;
2650 gint crop_h = video_box->in_height - video_box->out_height;
2652 video_box->box_left = crop_w / 2;
2653 if (video_box->box_left < 0) {
2654 video_box->border_left = -video_box->box_left;
2655 video_box->crop_left = 0;
2657 video_box->border_left = 0;
2658 video_box->crop_left = video_box->box_left;
2661 /* Round down/up for odd width differences */
2667 video_box->box_right = crop_w / 2;
2668 if (video_box->box_right < 0) {
2669 video_box->border_right = -video_box->box_right;
2670 video_box->crop_right = 0;
2672 video_box->border_right = 0;
2673 video_box->crop_right = video_box->box_right;
2676 video_box->box_top = crop_h / 2;
2677 if (video_box->box_top < 0) {
2678 video_box->border_top = -video_box->box_top;
2679 video_box->crop_top = 0;
2681 video_box->border_top = 0;
2682 video_box->crop_top = video_box->box_top;
2685 /* Round down/up for odd height differences */
2690 video_box->box_bottom = crop_h / 2;
2692 if (video_box->box_bottom < 0) {
2693 video_box->border_bottom = -video_box->box_bottom;
2694 video_box->crop_bottom = 0;
2696 video_box->border_bottom = 0;
2697 video_box->crop_bottom = video_box->box_bottom;
2702 gst_video_box_get_property (GObject * object, guint prop_id, GValue * value,
2705 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2709 g_value_set_int (value, video_box->box_left);
2712 g_value_set_int (value, video_box->box_right);
2715 g_value_set_int (value, video_box->box_top);
2718 g_value_set_int (value, video_box->box_bottom);
2720 case PROP_FILL_TYPE:
2721 g_value_set_enum (value, video_box->fill_type);
2724 g_value_set_double (value, video_box->alpha);
2726 case PROP_BORDER_ALPHA:
2727 g_value_set_double (value, video_box->border_alpha);
2730 g_value_set_boolean (value, video_box->autocrop);
2733 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2739 gst_video_box_transform_dimension (gint val, gint delta)
2741 gint64 new_val = (gint64) val + (gint64) delta;
2743 new_val = CLAMP (new_val, 1, G_MAXINT);
2745 return (gint) new_val;
2749 gst_video_box_transform_dimension_value (const GValue * src_val,
2750 gint delta, GValue * dest_val)
2752 gboolean ret = TRUE;
2754 g_value_init (dest_val, G_VALUE_TYPE (src_val));
2756 if (G_VALUE_HOLDS_INT (src_val)) {
2757 gint ival = g_value_get_int (src_val);
2759 ival = gst_video_box_transform_dimension (ival, delta);
2760 g_value_set_int (dest_val, ival);
2761 } else if (GST_VALUE_HOLDS_INT_RANGE (src_val)) {
2762 gint min = gst_value_get_int_range_min (src_val);
2763 gint max = gst_value_get_int_range_max (src_val);
2765 min = gst_video_box_transform_dimension (min, delta);
2766 max = gst_video_box_transform_dimension (max, delta);
2769 g_value_unset (dest_val);
2771 gst_value_set_int_range (dest_val, min, max);
2773 } else if (GST_VALUE_HOLDS_LIST (src_val)) {
2776 for (i = 0; i < gst_value_list_get_size (src_val); ++i) {
2777 const GValue *list_val;
2778 GValue newval = { 0, };
2780 list_val = gst_value_list_get_value (src_val, i);
2781 if (gst_video_box_transform_dimension_value (list_val, delta, &newval))
2782 gst_value_list_append_value (dest_val, &newval);
2783 g_value_unset (&newval);
2786 if (gst_value_list_get_size (dest_val) == 0) {
2787 g_value_unset (dest_val);
2791 g_value_unset (dest_val);
2799 gst_video_box_transform_caps (GstBaseTransform * trans,
2800 GstPadDirection direction, GstCaps * from)
2802 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
2804 const GstCaps *templ;
2806 GstStructure *structure;
2809 to = gst_caps_copy (from);
2810 /* Just to be sure... */
2811 gst_caps_truncate (to);
2812 structure = gst_caps_get_structure (to, 0);
2814 /* Transform width/height */
2815 if (video_box->autocrop) {
2816 gst_structure_remove_field (structure, "width");
2817 gst_structure_remove_field (structure, "height");
2819 gint dw = 0, dh = 0;
2821 GValue w_val = { 0, };
2822 GValue h_val = { 0, };
2824 /* calculate width and height */
2825 if (direction == GST_PAD_SINK) {
2826 dw -= video_box->box_left;
2827 dw -= video_box->box_right;
2829 dw += video_box->box_left;
2830 dw += video_box->box_right;
2833 if (direction == GST_PAD_SINK) {
2834 dh -= video_box->box_top;
2835 dh -= video_box->box_bottom;
2837 dh += video_box->box_top;
2838 dh += video_box->box_bottom;
2841 v = gst_structure_get_value (structure, "width");
2842 if (!gst_video_box_transform_dimension_value (v, dw, &w_val)) {
2843 GST_WARNING_OBJECT (video_box, "could not tranform width value with dw=%d"
2844 ", caps structure=%" GST_PTR_FORMAT, dw, structure);
2845 gst_caps_unref (to);
2846 to = gst_caps_new_empty ();
2849 gst_structure_set_value (structure, "width", &w_val);
2851 v = gst_structure_get_value (structure, "height");
2852 if (!gst_video_box_transform_dimension_value (v, dh, &h_val)) {
2853 g_value_unset (&w_val);
2854 GST_WARNING_OBJECT (video_box,
2855 "could not tranform height value with dh=%d" ", caps structure=%"
2856 GST_PTR_FORMAT, dh, structure);
2857 gst_caps_unref (to);
2858 to = gst_caps_new_empty ();
2861 gst_structure_set_value (structure, "height", &h_val);
2862 g_value_unset (&w_val);
2863 g_value_unset (&h_val);
2866 /* Supported conversions:
2873 * AYUV->xRGB (24bpp, 32bpp, incl. alpha)
2874 * xRGB->xRGB (24bpp, 32bpp, from/to all variants, incl. alpha)
2875 * xRGB->AYUV (24bpp, 32bpp, incl. alpha)
2877 * Passthrough only for everything else.
2879 name = gst_structure_get_name (structure);
2880 if (g_str_equal (name, "video/x-raw-yuv")) {
2883 if (gst_structure_get_fourcc (structure, "format", &fourcc) &&
2884 (fourcc == GST_STR_FOURCC ("AYUV") ||
2885 fourcc == GST_STR_FOURCC ("I420") ||
2886 fourcc == GST_STR_FOURCC ("YV12"))) {
2887 GValue list = { 0, };
2888 GValue val = { 0, };
2891 /* get rid of format */
2892 gst_structure_remove_field (structure, "format");
2893 gst_structure_remove_field (structure, "color-matrix");
2894 gst_structure_remove_field (structure, "chroma-site");
2896 s2 = gst_structure_copy (structure);
2898 g_value_init (&list, GST_TYPE_LIST);
2899 g_value_init (&val, GST_TYPE_FOURCC);
2900 gst_value_set_fourcc (&val, GST_STR_FOURCC ("AYUV"));
2901 gst_value_list_append_value (&list, &val);
2902 g_value_reset (&val);
2903 gst_value_set_fourcc (&val, GST_STR_FOURCC ("I420"));
2904 gst_value_list_append_value (&list, &val);
2905 g_value_reset (&val);
2906 gst_value_set_fourcc (&val, GST_STR_FOURCC ("YV12"));
2907 gst_value_list_append_value (&list, &val);
2908 g_value_unset (&val);
2909 gst_structure_set_value (structure, "format", &list);
2910 g_value_unset (&list);
2912 /* We can only convert to RGB if input is AYUV */
2913 if (fourcc == GST_STR_FOURCC ("AYUV")) {
2914 gst_structure_set_name (s2, "video/x-raw-rgb");
2915 g_value_init (&list, GST_TYPE_LIST);
2916 g_value_init (&val, G_TYPE_INT);
2917 g_value_set_int (&val, 32);
2918 gst_value_list_append_value (&list, &val);
2919 g_value_reset (&val);
2920 g_value_set_int (&val, 24);
2921 gst_value_list_append_value (&list, &val);
2922 g_value_unset (&val);
2923 gst_structure_set_value (s2, "depth", &list);
2924 gst_structure_set_value (s2, "bpp", &list);
2925 g_value_unset (&list);
2926 gst_caps_append_structure (to, s2);
2928 gst_structure_free (s2);
2931 } else if (g_str_equal (name, "video/x-raw-rgb")) {
2934 if (gst_structure_get_int (structure, "bpp", &bpp) &&
2935 (bpp == 32 || bpp == 24)) {
2936 GValue list = { 0, };
2937 GValue val = { 0, };
2940 /* get rid of format */
2941 gst_structure_remove_field (structure, "depth");
2942 gst_structure_remove_field (structure, "bpp");
2943 gst_structure_remove_field (structure, "red_mask");
2944 gst_structure_remove_field (structure, "green_mask");
2945 gst_structure_remove_field (structure, "blue_mask");
2946 gst_structure_remove_field (structure, "alpha_mask");
2948 s2 = gst_structure_copy (structure);
2950 g_value_init (&list, GST_TYPE_LIST);
2951 g_value_init (&val, G_TYPE_INT);
2952 g_value_set_int (&val, 32);
2953 gst_value_list_append_value (&list, &val);
2954 g_value_reset (&val);
2955 g_value_set_int (&val, 24);
2956 gst_value_list_append_value (&list, &val);
2957 g_value_unset (&val);
2958 gst_structure_set_value (structure, "depth", &list);
2959 gst_structure_set_value (structure, "bpp", &list);
2960 g_value_unset (&list);
2962 gst_structure_set_name (s2, "video/x-raw-yuv");
2963 gst_structure_set (s2, "format", GST_TYPE_FOURCC, GST_STR_FOURCC ("AYUV"),
2965 gst_caps_append_structure (to, s2);
2969 /* filter against set allowed caps on the pad */
2970 other = (direction == GST_PAD_SINK) ? trans->srcpad : trans->sinkpad;
2972 templ = gst_pad_get_pad_template_caps (other);
2973 ret = gst_caps_intersect (to, templ);
2974 gst_caps_unref (to);
2976 GST_DEBUG_OBJECT (video_box, "direction %d, transformed %" GST_PTR_FORMAT
2977 " to %" GST_PTR_FORMAT, direction, from, ret);
2983 gst_video_box_recalc_transform (GstVideoBox * video_box)
2985 gboolean res = TRUE;
2987 /* if we have the same format in and out and we don't need to perform any
2988 * cropping at all, we can just operate in passthrough mode */
2989 if (video_box->in_format == video_box->out_format &&
2990 video_box->box_left == 0 && video_box->box_right == 0 &&
2991 video_box->box_top == 0 && video_box->box_bottom == 0 &&
2992 video_box->in_sdtv == video_box->out_sdtv) {
2994 GST_LOG_OBJECT (video_box, "we are using passthrough");
2995 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
2998 GST_LOG_OBJECT (video_box, "we are not using passthrough");
2999 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
3006 gst_video_box_select_processing_functions (GstVideoBox * video_box)
3008 switch (video_box->out_format) {
3009 case GST_VIDEO_FORMAT_AYUV:
3010 video_box->fill = fill_ayuv;
3011 switch (video_box->in_format) {
3012 case GST_VIDEO_FORMAT_AYUV:
3013 video_box->copy = copy_ayuv_ayuv;
3015 case GST_VIDEO_FORMAT_I420:
3016 case GST_VIDEO_FORMAT_YV12:
3017 video_box->copy = copy_i420_ayuv;
3019 case GST_VIDEO_FORMAT_ARGB:
3020 case GST_VIDEO_FORMAT_ABGR:
3021 case GST_VIDEO_FORMAT_RGBA:
3022 case GST_VIDEO_FORMAT_BGRA:
3023 case GST_VIDEO_FORMAT_xRGB:
3024 case GST_VIDEO_FORMAT_xBGR:
3025 case GST_VIDEO_FORMAT_RGBx:
3026 case GST_VIDEO_FORMAT_BGRx:
3027 case GST_VIDEO_FORMAT_RGB:
3028 case GST_VIDEO_FORMAT_BGR:
3029 video_box->copy = copy_rgb32_ayuv;
3035 case GST_VIDEO_FORMAT_I420:
3036 case GST_VIDEO_FORMAT_YV12:
3037 video_box->fill = fill_planar_yuv;
3038 switch (video_box->in_format) {
3039 case GST_VIDEO_FORMAT_AYUV:
3040 video_box->copy = copy_ayuv_i420;
3042 case GST_VIDEO_FORMAT_I420:
3043 case GST_VIDEO_FORMAT_YV12:
3044 video_box->copy = copy_i420_i420;
3050 case GST_VIDEO_FORMAT_ARGB:
3051 case GST_VIDEO_FORMAT_ABGR:
3052 case GST_VIDEO_FORMAT_RGBA:
3053 case GST_VIDEO_FORMAT_BGRA:
3054 case GST_VIDEO_FORMAT_xRGB:
3055 case GST_VIDEO_FORMAT_xBGR:
3056 case GST_VIDEO_FORMAT_RGBx:
3057 case GST_VIDEO_FORMAT_BGRx:
3058 case GST_VIDEO_FORMAT_RGB:
3059 case GST_VIDEO_FORMAT_BGR:
3060 video_box->fill = (video_box->out_format == GST_VIDEO_FORMAT_BGR
3061 || video_box->out_format ==
3062 GST_VIDEO_FORMAT_RGB) ? fill_rgb24 : fill_rgb32;
3063 switch (video_box->in_format) {
3064 case GST_VIDEO_FORMAT_ARGB:
3065 case GST_VIDEO_FORMAT_ABGR:
3066 case GST_VIDEO_FORMAT_RGBA:
3067 case GST_VIDEO_FORMAT_BGRA:
3068 case GST_VIDEO_FORMAT_xRGB:
3069 case GST_VIDEO_FORMAT_xBGR:
3070 case GST_VIDEO_FORMAT_RGBx:
3071 case GST_VIDEO_FORMAT_BGRx:
3072 case GST_VIDEO_FORMAT_RGB:
3073 case GST_VIDEO_FORMAT_BGR:
3074 video_box->copy = copy_rgb32;
3076 case GST_VIDEO_FORMAT_AYUV:
3077 video_box->copy = copy_ayuv_rgb32;
3082 case GST_VIDEO_FORMAT_GRAY8:
3083 case GST_VIDEO_FORMAT_GRAY16_BE:
3084 case GST_VIDEO_FORMAT_GRAY16_LE:
3085 video_box->fill = fill_gray;
3086 switch (video_box->in_format) {
3087 case GST_VIDEO_FORMAT_GRAY8:
3088 case GST_VIDEO_FORMAT_GRAY16_BE:
3089 case GST_VIDEO_FORMAT_GRAY16_LE:
3090 video_box->copy = copy_packed_simple;
3096 case GST_VIDEO_FORMAT_YUY2:
3097 case GST_VIDEO_FORMAT_YVYU:
3098 case GST_VIDEO_FORMAT_UYVY:
3099 video_box->fill = fill_yuy2;
3100 switch (video_box->in_format) {
3101 case GST_VIDEO_FORMAT_YUY2:
3102 case GST_VIDEO_FORMAT_YVYU:
3103 case GST_VIDEO_FORMAT_UYVY:
3104 video_box->copy = copy_yuy2_yuy2;
3110 case GST_VIDEO_FORMAT_Y444:
3111 case GST_VIDEO_FORMAT_Y42B:
3112 case GST_VIDEO_FORMAT_Y41B:
3113 video_box->fill = fill_planar_yuv;
3114 switch (video_box->in_format) {
3115 case GST_VIDEO_FORMAT_Y444:
3116 video_box->copy = copy_y444_y444;
3118 case GST_VIDEO_FORMAT_Y42B:
3119 video_box->copy = copy_y42b_y42b;
3121 case GST_VIDEO_FORMAT_Y41B:
3122 video_box->copy = copy_y41b_y41b;
3132 return video_box->fill != NULL && video_box->copy != NULL;
3136 gst_video_box_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out)
3138 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3140 const gchar *matrix;
3142 g_mutex_lock (video_box->mutex);
3145 gst_video_format_parse_caps (in, &video_box->in_format,
3146 &video_box->in_width, &video_box->in_height);
3148 gst_video_format_parse_caps (out, &video_box->out_format,
3149 &video_box->out_width, &video_box->out_height);
3151 matrix = gst_video_parse_caps_color_matrix (in);
3152 video_box->in_sdtv = matrix ? g_str_equal (matrix, "sdtv") : TRUE;
3153 matrix = gst_video_parse_caps_color_matrix (out);
3154 video_box->out_sdtv = matrix ? g_str_equal (matrix, "sdtv") : TRUE;
3156 /* something wrong getting the caps */
3160 GST_DEBUG_OBJECT (trans, "Input w: %d h: %d", video_box->in_width,
3161 video_box->in_height);
3162 GST_DEBUG_OBJECT (trans, "Output w: %d h: %d", video_box->out_width,
3163 video_box->out_height);
3165 if (video_box->autocrop)
3166 gst_video_box_autocrop (video_box);
3168 /* recalc the transformation strategy */
3169 ret = gst_video_box_recalc_transform (video_box);
3172 ret = gst_video_box_select_processing_functions (video_box);
3173 g_mutex_unlock (video_box->mutex);
3180 GST_DEBUG_OBJECT (video_box,
3181 "Invalid caps: %" GST_PTR_FORMAT " -> %" GST_PTR_FORMAT, in, out);
3182 g_mutex_unlock (video_box->mutex);
3188 gst_video_box_get_unit_size (GstBaseTransform * trans, GstCaps * caps,
3191 GstVideoFormat format;
3197 ret = gst_video_format_parse_caps (caps, &format, &width, &height);
3199 GST_ERROR_OBJECT (trans, "Invalid caps: %" GST_PTR_FORMAT, caps);
3203 *size = gst_video_format_get_size (format, width, height);
3205 GST_LOG_OBJECT (trans, "Returning from _unit_size %d", *size);
3211 gst_video_box_fixate_caps (GstBaseTransform * trans,
3212 GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
3218 ret = gst_video_format_parse_caps (caps, NULL, &width, &height);
3222 s = gst_caps_get_structure (othercaps, 0);
3223 gst_structure_fixate_field_nearest_int (s, "width", width);
3224 gst_structure_fixate_field_nearest_int (s, "height", height);
3228 gst_video_box_src_event (GstBaseTransform * trans, GstEvent * event)
3230 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3231 GstStructure *new_structure;
3232 const GstStructure *structure;
3233 const gchar *event_name;
3237 GST_OBJECT_LOCK (video_box);
3238 if (GST_EVENT_TYPE (event) == GST_EVENT_NAVIGATION &&
3239 (video_box->box_left != 0 || video_box->box_top != 0)) {
3240 structure = gst_event_get_structure (event);
3241 event_name = gst_structure_get_string (structure, "event");
3244 (strcmp (event_name, "mouse-move") == 0 ||
3245 strcmp (event_name, "mouse-button-press") == 0 ||
3246 strcmp (event_name, "mouse-button-release") == 0)) {
3247 if (gst_structure_get_double (structure, "pointer_x", &pointer_x) &&
3248 gst_structure_get_double (structure, "pointer_y", &pointer_y)) {
3249 gdouble new_pointer_x, new_pointer_y;
3250 GstEvent *new_event;
3252 new_pointer_x = pointer_x + video_box->box_left;
3253 new_pointer_y = pointer_y + video_box->box_top;
3255 new_structure = gst_structure_copy (structure);
3256 gst_structure_set (new_structure,
3257 "pointer_x", G_TYPE_DOUBLE, (gdouble) (new_pointer_x),
3258 "pointer_y", G_TYPE_DOUBLE, (gdouble) (new_pointer_y), NULL);
3260 new_event = gst_event_new_navigation (new_structure);
3261 gst_event_unref (event);
3264 GST_WARNING_OBJECT (video_box, "Failed to read navigation event");
3268 GST_OBJECT_UNLOCK (video_box);
3270 return GST_BASE_TRANSFORM_CLASS (parent_class)->src_event (trans, event);
3274 gst_video_box_process (GstVideoBox * video_box, const guint8 * src,
3277 guint b_alpha = CLAMP (video_box->border_alpha * 256, 0, 255);
3278 guint i_alpha = CLAMP (video_box->alpha * 256, 0, 255);
3279 GstVideoBoxFill fill_type = video_box->fill_type;
3280 gint br, bl, bt, bb, crop_w, crop_h;
3285 br = video_box->box_right;
3286 bl = video_box->box_left;
3287 bt = video_box->box_top;
3288 bb = video_box->box_bottom;
3290 if (br >= 0 && bl >= 0) {
3291 crop_w = video_box->in_width - (br + bl);
3292 } else if (br >= 0 && bl < 0) {
3293 crop_w = video_box->in_width - (br);
3294 } else if (br < 0 && bl >= 0) {
3295 crop_w = video_box->in_width - (bl);
3296 } else if (br < 0 && bl < 0) {
3297 crop_w = video_box->in_width;
3300 if (bb >= 0 && bt >= 0) {
3301 crop_h = video_box->in_height - (bb + bt);
3302 } else if (bb >= 0 && bt < 0) {
3303 crop_h = video_box->in_height - (bb);
3304 } else if (bb < 0 && bt >= 0) {
3305 crop_h = video_box->in_height - (bt);
3306 } else if (bb < 0 && bt < 0) {
3307 crop_h = video_box->in_height;
3310 GST_DEBUG_OBJECT (video_box, "Borders are: L:%d, R:%d, T:%d, B:%d", bl, br,
3312 GST_DEBUG_OBJECT (video_box, "Alpha value is: %u (frame) %u (border)",
3315 if (crop_h < 0 || crop_w < 0) {
3316 video_box->fill (fill_type, b_alpha, video_box->out_format, dest,
3317 video_box->out_sdtv, video_box->out_width, video_box->out_height);
3318 } else if (bb == 0 && bt == 0 && br == 0 && bl == 0) {
3319 video_box->copy (i_alpha, video_box->out_format, dest, video_box->out_sdtv,
3320 video_box->out_width, video_box->out_height, 0, 0, video_box->in_format,
3321 src, video_box->in_sdtv, video_box->in_width, video_box->in_height, 0,
3324 gint src_x = 0, src_y = 0;
3325 gint dest_x = 0, dest_y = 0;
3327 /* Fill everything if a border should be added somewhere */
3328 if (bt < 0 || bb < 0 || br < 0 || bl < 0)
3329 video_box->fill (fill_type, b_alpha, video_box->out_format, dest,
3330 video_box->out_sdtv, video_box->out_width, video_box->out_height);
3347 video_box->copy (i_alpha, video_box->out_format, dest, video_box->out_sdtv,
3348 video_box->out_width, video_box->out_height, dest_x, dest_y,
3349 video_box->in_format, src, video_box->in_sdtv, video_box->in_width,
3350 video_box->in_height, src_x, src_y, crop_w, crop_h);
3353 GST_LOG_OBJECT (video_box, "image created");
3357 gst_video_box_before_transform (GstBaseTransform * trans, GstBuffer * in)
3359 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3360 GstClockTime timestamp, stream_time;
3362 timestamp = GST_BUFFER_TIMESTAMP (in);
3364 gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME, timestamp);
3366 GST_DEBUG_OBJECT (video_box, "sync to %" GST_TIME_FORMAT,
3367 GST_TIME_ARGS (timestamp));
3369 if (GST_CLOCK_TIME_IS_VALID (stream_time))
3370 gst_object_sync_values (G_OBJECT (video_box), stream_time);
3373 static GstFlowReturn
3374 gst_video_box_transform (GstBaseTransform * trans, GstBuffer * in,
3377 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3378 const guint8 *indata;
3381 indata = GST_BUFFER_DATA (in);
3382 outdata = GST_BUFFER_DATA (out);
3384 g_mutex_lock (video_box->mutex);
3385 gst_video_box_process (video_box, indata, outdata);
3386 g_mutex_unlock (video_box->mutex);
3390 /* FIXME: 0.11 merge with videocrop plugin */
3392 plugin_init (GstPlugin * plugin)
3394 gst_controller_init (NULL, NULL);
3396 GST_DEBUG_CATEGORY_INIT (videobox_debug, "videobox", 0,
3397 "Resizes a video by adding borders or cropping");
3399 return gst_element_register (plugin, "videobox", GST_RANK_NONE,
3400 GST_TYPE_VIDEO_BOX);
3403 GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
3406 "resizes a video by adding borders or cropping",
3407 plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)