2 * Copyright (C) 1999 Erik Walthinsen <omega@cse.ogi.edu>
3 * Copyright (C) 2006 Tim-Philipp Müller <tim centricular net>
4 * Copyright (C) 2010 Sebastian Dröge <sebastian.droege@collabora.co.uk>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
19 * Boston, MA 02110-1301, USA.
22 * SECTION:element-videobox
24 * @see_also: #GstVideoCrop
26 * This plugin crops or enlarges the image. It takes 4 values as input, a
27 * top, bottom, left and right offset. Positive values will crop that much
28 * pixels from the respective border of the image, negative values will add
29 * that much pixels. When pixels are added, you can specify their color.
30 * Some predefined colors are usable with an enum property.
32 * The plugin is alpha channel aware and will try to negotiate with a format
33 * that supports alpha channels first. When alpha channel is active two
34 * other properties, alpha and border_alpha can be used to set the alpha
35 * values of the inner picture and the border respectively. an alpha value of
36 * 0.0 means total transparency, 1.0 is opaque.
38 * The videobox plugin has many uses such as doing a mosaic of pictures,
39 * letterboxing video, cutting out pieces of video, picture in picture, etc..
41 * Setting autocrop to true changes the behavior of the plugin so that
42 * caps determine crop properties rather than the other way around: given
43 * input and output dimensions, the crop values are selected so that the
44 * smaller frame is effectively centered in the larger frame. This
45 * involves either cropping or padding.
47 * If you use autocrop there is little point in setting the other
48 * properties manually because they will be overridden if the caps change,
49 * but nothing stops you from doing so.
53 * gst-launch-1.0 videotestsrc ! videobox autocrop=true ! \
54 * "video/x-raw, width=600, height=400" ! videoconvert ! ximagesink
62 #include "gstvideobox.h"
63 #include "gstvideoboxorc.h"
68 GST_DEBUG_CATEGORY_STATIC (videobox_debug);
69 #define GST_CAT_DEFAULT videobox_debug
71 /* From videotestsrc.c */
72 static const guint8 yuv_sdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
73 { 16, 145, 41, 81, 210, 235 };
74 static const guint8 yuv_sdtv_colors_U[VIDEO_BOX_FILL_LAST] =
75 { 128, 54, 240, 90, 16, 128 };
76 static const guint8 yuv_sdtv_colors_V[VIDEO_BOX_FILL_LAST] =
77 { 128, 34, 110, 240, 146, 128 };
79 static const guint8 yuv_hdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
80 { 16, 173, 32, 63, 219, 235 };
81 static const guint8 yuv_hdtv_colors_U[VIDEO_BOX_FILL_LAST] =
82 { 128, 42, 240, 102, 16, 128 };
83 static const guint8 yuv_hdtv_colors_V[VIDEO_BOX_FILL_LAST] =
84 { 128, 26, 118, 240, 138, 128 };
86 static const guint8 rgb_colors_R[VIDEO_BOX_FILL_LAST] =
87 { 0, 0, 0, 255, 255, 255 };
88 static const guint8 rgb_colors_G[VIDEO_BOX_FILL_LAST] =
89 { 0, 255, 0, 0, 255, 255 };
90 static const guint8 rgb_colors_B[VIDEO_BOX_FILL_LAST] =
91 { 0, 0, 255, 0, 0, 255 };
93 /* Generated by -bad/ext/cog/generate_tables */
94 static const int cog_ycbcr_to_rgb_matrix_8bit_hdtv[] = {
96 298, -55, -136, 19681,
100 static const int cog_ycbcr_to_rgb_matrix_8bit_sdtv[] = {
102 298, -100, -208, 34707,
106 static const gint cog_rgb_to_ycbcr_matrix_8bit_hdtv[] = {
108 -26, -87, 112, 32768,
109 112, -102, -10, 32768,
112 static const gint cog_rgb_to_ycbcr_matrix_8bit_sdtv[] = {
114 -38, -74, 112, 32768,
115 112, -94, -18, 32768,
118 static const gint cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit[] = {
119 256, -30, -53, 10600,
124 static const gint cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit[] = {
130 static const gint cog_identity_matrix_8bit[] = {
136 #define APPLY_MATRIX(m,o,v1,v2,v3) ((m[o*4] * v1 + m[o*4+1] * v2 + m[o*4+2] * v3 + m[o*4+3]) >> 8)
139 fill_ayuv (GstVideoBoxFill fill_type, guint b_alpha,
140 GstVideoFrame * frame, gboolean sdtv)
147 width = GST_VIDEO_FRAME_WIDTH (frame);
148 height = GST_VIDEO_FRAME_HEIGHT (frame);
150 b_alpha = MIN (b_alpha, 255);
153 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
154 (yuv_sdtv_colors_Y[fill_type] << 16) |
155 (yuv_sdtv_colors_U[fill_type] << 8) | yuv_sdtv_colors_V[fill_type]);
157 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
158 (yuv_hdtv_colors_Y[fill_type] << 16) |
159 (yuv_hdtv_colors_U[fill_type] << 8) | yuv_hdtv_colors_V[fill_type]);
161 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
162 stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
164 if (G_LIKELY (stride == 4 * width))
165 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
167 for (; height; --height) {
168 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width);
175 copy_ayuv_ayuv (guint i_alpha, GstVideoFrame * dest_frame,
176 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
177 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
184 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
185 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
187 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
188 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
190 dest = dest + dest_y * dest_stride + dest_x * 4;
191 src = src + src_y * src_stride + src_x * 4;
195 if (dest_sdtv != src_sdtv) {
200 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
201 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
203 for (i = 0; i < h; i++) {
204 for (j = 0; j < w; j += 4) {
206 dest[j] = (src[j] * i_alpha) >> 8;
210 dest[j + 1] = APPLY_MATRIX (matrix, 0, y, u, v);
211 dest[j + 2] = APPLY_MATRIX (matrix, 1, y, u, v);
212 dest[j + 3] = APPLY_MATRIX (matrix, 2, y, u, v);
218 for (i = 0; i < h; i++) {
219 for (j = 0; j < w; j += 4) {
221 dest[j] = (src[j] * i_alpha) >> 8;
222 dest[j + 1] = src[j + 1];
223 dest[j + 2] = src[j + 2];
224 dest[j + 3] = src[j + 3];
233 copy_ayuv_i420 (guint i_alpha, GstVideoFrame * dest_frame,
234 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
235 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
238 guint8 *destY, *destY2, *destU, *destV;
239 gint dest_strideY, dest_strideU, dest_strideV;
248 gint dest_height, src_height, dest_width;
250 dest_height = GST_VIDEO_FRAME_HEIGHT (dest_frame);
251 dest_width = GST_VIDEO_FRAME_WIDTH (dest_frame);
252 src_height = GST_VIDEO_FRAME_HEIGHT (src_frame);
254 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest_frame, 0);
255 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest_frame, 1);
256 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest_frame, 2);
258 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
260 destY = GST_VIDEO_FRAME_COMP_DATA (dest_frame, 0);
261 destU = GST_VIDEO_FRAME_COMP_DATA (dest_frame, 1);
262 destV = GST_VIDEO_FRAME_COMP_DATA (dest_frame, 2);
264 destY = destY + dest_y * dest_strideY + dest_x;
265 destY2 = (dest_y < dest_height) ? destY + dest_strideY : destY;
266 destU = destU + (dest_y / 2) * dest_strideU + dest_x / 2;
267 destV = destV + (dest_y / 2) * dest_strideV + dest_x / 2;
269 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
270 src = src + src_y * src_stride + src_x * 4;
271 src2 = (src_y < src_height) ? src + src_stride : src;
276 if (src_sdtv != dest_sdtv)
278 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
279 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
281 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
283 /* 1. Handle the first destination scanline specially if it
284 * doesn't start at the macro pixel boundary, i.e. blend
285 * with the background! */
286 if (dest_y % 2 == 1) {
287 /* 1.1. Handle the first destination pixel if it doesn't
288 * start at the macro pixel boundary, i.e. blend with
290 if (dest_x % 2 == 1) {
295 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
297 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
300 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
310 /* 1.2. Copy all macro pixels from the source to the destination
311 * but blend with the background because we're only filling
312 * the lower part of the macro pixels. */
313 for (; j < w - 1; j += 2) {
314 y1 = src[4 * y_idx + 1];
315 y2 = src[4 * y_idx + 4 + 1];
317 u1 = src[4 * y_idx + 2];
318 u2 = src[4 * y_idx + 4 + 2];
320 v1 = src[4 * y_idx + 3];
321 v2 = src[4 * y_idx + 4 + 3];
323 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
324 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
325 destU[uv_idx] = CLAMP (
326 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
327 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
328 destV[uv_idx] = CLAMP (
329 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
330 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
336 /* 1.3. Now copy the last pixel if one exists and blend it
337 * with the background because we only fill part of
338 * the macro pixel. In case this is the last pixel of
339 * the destination we will a larger part. */
340 if (j == w - 1 && j == dest_width - 1) {
341 y1 = src[4 * y_idx + 1];
342 u1 = src[4 * y_idx + 2];
343 v1 = src[4 * y_idx + 3];
345 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
346 destU[uv_idx] = CLAMP (
347 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
348 destV[uv_idx] = CLAMP (
349 (destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
350 } else if (j == w - 1) {
351 y1 = src[4 * y_idx + 1];
352 u1 = src[4 * y_idx + 2];
353 v1 = src[4 * y_idx + 3];
355 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
356 destU[uv_idx] = CLAMP (
357 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
360 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4,
364 destY += dest_strideY;
365 destY2 += dest_strideY;
366 destU += dest_strideU;
367 destV += dest_strideV;
375 /* 2. Copy all macro pixel scanlines, the destination scanline
376 * now starts at macro pixel boundary. */
377 for (; i < h - 1; i += 2) {
378 /* 2.1. Handle the first destination pixel if it doesn't
379 * start at the macro pixel boundary, i.e. blend with
381 if (dest_x % 2 == 1) {
383 y2 = src2[4 * 0 + 1];
385 u2 = src2[4 * 0 + 2];
387 v2 = src2[4 * 0 + 3];
389 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
390 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
392 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
393 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
395 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
396 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
404 /* 2.2. Copy all macro pixels from the source to the destination.
405 * All pixels now start at macro pixel boundary, i.e. no
406 * blending with the background is necessary. */
407 for (; j < w - 1; j += 2) {
408 y1 = src[4 * y_idx + 1];
409 y2 = src[4 * y_idx + 4 + 1];
410 y3 = src2[4 * y_idx + 1];
411 y4 = src2[4 * y_idx + 4 + 1];
413 u1 = src[4 * y_idx + 2];
414 u2 = src[4 * y_idx + 4 + 2];
415 u3 = src2[4 * y_idx + 2];
416 u4 = src2[4 * y_idx + 4 + 2];
418 v1 = src[4 * y_idx + 3];
419 v2 = src[4 * y_idx + 4 + 3];
420 v3 = src2[4 * y_idx + 3];
421 v4 = src2[4 * y_idx + 4 + 3];
423 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
424 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
425 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
426 destY2[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
428 destU[uv_idx] = CLAMP (
429 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
430 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
431 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
432 destV[uv_idx] = CLAMP (
433 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
434 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
435 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
441 /* 2.3. Now copy the last pixel if one exists and blend it
442 * with the background because we only fill part of
443 * the macro pixel. In case this is the last pixel of
444 * the destination we will a larger part. */
445 if (j == w - 1 && j == dest_width - 1) {
446 y1 = src[4 * y_idx + 1];
447 y2 = src2[4 * y_idx + 1];
449 u1 = src[4 * y_idx + 2];
450 u2 = src2[4 * y_idx + 2];
452 v1 = src[4 * y_idx + 3];
453 v2 = src2[4 * y_idx + 3];
455 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
456 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
457 destU[uv_idx] = CLAMP (
458 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
459 u2, v2)) / 2, 0, 255);
460 destV[uv_idx] = CLAMP (
461 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
462 u2, v2)) / 2, 0, 255);
463 } else if (j == w - 1) {
464 y1 = src[4 * y_idx + 1];
465 y2 = src2[4 * y_idx + 1];
467 u1 = src[4 * y_idx + 2];
468 u2 = src2[4 * y_idx + 2];
470 v1 = src[4 * y_idx + 3];
471 v2 = src2[4 * y_idx + 3];
473 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
474 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
475 destU[uv_idx] = CLAMP (
476 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
477 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
478 destV[uv_idx] = CLAMP (
479 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
480 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
483 destY += 2 * dest_strideY;
484 destY2 += 2 * dest_strideY;
485 destU += dest_strideU;
486 destV += dest_strideV;
487 src += 2 * src_stride;
488 src2 += 2 * src_stride;
491 /* 3. Handle the last scanline if one exists. This again
492 * doesn't start at macro pixel boundary but should
493 * only fill the upper part of the macro pixels. */
494 if (i == h - 1 && i == dest_height - 1) {
495 /* 3.1. Handle the first destination pixel if it doesn't
496 * start at the macro pixel boundary, i.e. blend with
498 if (dest_x % 2 == 1) {
503 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
505 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
507 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
516 /* 3.2. Copy all macro pixels from the source to the destination
517 * but blend with the background because we're only filling
518 * the upper part of the macro pixels. */
519 for (; j < w - 1; j += 2) {
520 y1 = src[4 * y_idx + 1];
521 y2 = src[4 * y_idx + 4 + 1];
523 u1 = src[4 * y_idx + 2];
524 u2 = src[4 * y_idx + 4 + 2];
526 v1 = src[4 * y_idx + 3];
527 v2 = src[4 * y_idx + 4 + 3];
529 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
530 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
532 destU[uv_idx] = CLAMP (
533 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
534 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
535 destV[uv_idx] = CLAMP (
536 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
537 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
543 /* 3.3. Now copy the last pixel if one exists and blend it
544 * with the background because we only fill part of
545 * the macro pixel. In case this is the last pixel of
546 * the destination we will a larger part. */
547 if (j == w - 1 && j == dest_width - 1) {
548 y1 = src[4 * y_idx + 1];
549 u1 = src[4 * y_idx + 2];
550 v1 = src[4 * y_idx + 3];
552 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
553 destU[uv_idx] = CLAMP (
554 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
555 destV[uv_idx] = CLAMP (
556 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
557 } else if (j == w - 1) {
558 y1 = src[4 * y_idx + 1];
559 u1 = src[4 * y_idx + 2];
560 v1 = src[4 * y_idx + 3];
562 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
563 destU[uv_idx] = CLAMP (
564 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
567 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
570 } else if (i == h - 1) {
571 /* 3.1. Handle the first destination pixel if it doesn't
572 * start at the macro pixel boundary, i.e. blend with
574 if (dest_x % 2 == 1) {
579 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
581 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
584 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
594 /* 3.2. Copy all macro pixels from the source to the destination
595 * but blend with the background because we're only filling
596 * the upper part of the macro pixels. */
597 for (; j < w - 1; j += 2) {
598 y1 = src[4 * y_idx + 1];
599 y2 = src[4 * y_idx + 4 + 1];
601 u1 = src[4 * y_idx + 2];
602 u2 = src[4 * y_idx + 4 + 2];
604 v1 = src[4 * y_idx + 3];
605 v2 = src[4 * y_idx + 4 + 3];
607 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
608 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
610 destU[uv_idx] = CLAMP (
611 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
612 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
613 destV[uv_idx] = CLAMP (
614 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
615 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
621 /* 3.3. Now copy the last pixel if one exists and blend it
622 * with the background because we only fill part of
623 * the macro pixel. In case this is the last pixel of
624 * the destination we will a larger part. */
625 if (j == w - 1 && j == dest_width - 1) {
626 y1 = src[4 * y_idx + 1];
627 u1 = src[4 * y_idx + 2];
628 v1 = src[4 * y_idx + 3];
630 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
631 destU[uv_idx] = CLAMP (
632 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
633 destV[uv_idx] = CLAMP (
634 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
635 } else if (j == w - 1) {
636 y1 = src[4 * y_idx + 1];
637 u1 = src[4 * y_idx + 2];
638 v1 = src[4 * y_idx + 3];
640 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
641 destU[uv_idx] = CLAMP (
642 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
645 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
652 fill_planar_yuv (GstVideoBoxFill fill_type, guint b_alpha,
653 GstVideoFrame * frame, gboolean sdtv)
655 guint8 empty_pixel[3];
656 guint8 *destY, *destU, *destV;
657 gint strideY, strideU, strideV;
658 gint heightY, heightU, heightV;
659 gint widthY, widthU, widthV;
662 empty_pixel[0] = yuv_sdtv_colors_Y[fill_type];
663 empty_pixel[1] = yuv_sdtv_colors_U[fill_type];
664 empty_pixel[2] = yuv_sdtv_colors_V[fill_type];
666 empty_pixel[0] = yuv_hdtv_colors_Y[fill_type];
667 empty_pixel[1] = yuv_hdtv_colors_U[fill_type];
668 empty_pixel[2] = yuv_hdtv_colors_V[fill_type];
671 strideY = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
672 strideU = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1);
673 strideV = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2);
675 destY = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
676 destU = GST_VIDEO_FRAME_COMP_DATA (frame, 1);
677 destV = GST_VIDEO_FRAME_COMP_DATA (frame, 2);
679 widthY = GST_VIDEO_FRAME_COMP_WIDTH (frame, 0);
680 widthU = GST_VIDEO_FRAME_COMP_WIDTH (frame, 1);
681 widthV = GST_VIDEO_FRAME_COMP_WIDTH (frame, 2);
683 heightY = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 0);
684 heightU = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 1);
685 heightV = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 2);
687 if (strideY == widthY) {
688 memset (destY, empty_pixel[0], strideY * heightY);
689 } else if (heightY) {
690 for (; heightY; --heightY) {
691 memset (destY, empty_pixel[0], widthY);
695 if (strideU == widthU) {
696 memset (destU, empty_pixel[1], strideU * heightU);
697 } else if (heightU) {
698 for (; heightU; --heightU) {
699 memset (destU, empty_pixel[1], widthU);
703 if (strideV == widthV) {
704 memset (destV, empty_pixel[2], strideV * heightV);
705 } else if (heightV) {
706 for (; heightV; --heightV) {
707 memset (destV, empty_pixel[2], widthV);
714 copy_y444_y444 (guint i_alpha, GstVideoFrame * dest,
715 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
716 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
719 guint8 *destY, *destU, *destV;
720 const guint8 *srcY, *srcU, *srcV;
721 gint dest_strideY, dest_strideU, dest_strideV;
722 gint src_strideY, src_strideU, src_strideV;
724 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
725 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
726 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
728 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
729 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
730 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
732 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
733 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
734 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
736 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
737 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
738 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
740 destY = destY + dest_y * dest_strideY + dest_x;
741 destU = destU + dest_y * dest_strideU + dest_x;
742 destV = destV + dest_y * dest_strideV + dest_x;
744 srcY = srcY + src_y * src_strideY + src_x;
745 srcU = srcU + src_y * src_strideU + src_x;
746 srcV = srcV + src_y * src_strideV + src_x;
748 if (src_sdtv != dest_sdtv) {
753 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
754 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
756 for (i = 0; i < h; i++) {
757 for (j = 0; j < w; j++) {
758 y = APPLY_MATRIX (matrix, 0, srcY[j], srcU[j], srcV[j]);
759 u = APPLY_MATRIX (matrix, 1, srcY[j], srcU[j], srcV[j]);
760 v = APPLY_MATRIX (matrix, 2, srcY[j], srcU[j], srcV[j]);
766 destY += dest_strideY;
767 destU += dest_strideU;
768 destV += dest_strideV;
775 for (i = 0; i < h; i++) {
776 memcpy (destY, srcY, w);
777 memcpy (destU, srcU, w);
778 memcpy (destV, srcV, w);
780 destY += dest_strideY;
781 destU += dest_strideU;
782 destV += dest_strideV;
792 copy_y42b_y42b (guint i_alpha, GstVideoFrame * dest,
793 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
794 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
797 guint8 *destY, *destU, *destV;
798 const guint8 *srcY, *srcU, *srcV;
799 gint dest_strideY, dest_strideU, dest_strideV;
800 gint src_strideY, src_strideU, src_strideV;
801 gint src_y_idx, src_uv_idx;
802 gint dest_y_idx, dest_uv_idx;
809 dest_width = GST_VIDEO_FRAME_WIDTH (dest);
811 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
812 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
813 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
815 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
816 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
817 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
819 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
820 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
821 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
823 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
824 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
825 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
827 destY = destY + dest_y * dest_strideY + dest_x;
828 destU = destU + dest_y * dest_strideU + dest_x / 2;
829 destV = destV + dest_y * dest_strideV + dest_x / 2;
831 srcY = srcY + src_y * src_strideY + src_x;
832 srcU = srcU + src_y * src_strideU + src_x / 2;
833 srcV = srcV + src_y * src_strideV + src_x / 2;
838 if (src_sdtv != dest_sdtv)
840 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
841 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
843 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
845 /* 1. Copy all macro pixel scanlines, the destination scanline
846 * now starts at macro pixel boundary. */
847 for (i = dest_y; i < h; i++) {
848 /* 1.1. Handle the first destination pixel if it doesn't
849 * start at the macro pixel boundary, i.e. blend with
851 if (dest_x % 2 == 1) {
856 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
858 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
860 (destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
862 src_y_idx = dest_y_idx = dest_uv_idx = 1;
863 src_uv_idx = (src_x % 2) + 1;
866 src_y_idx = dest_y_idx = dest_uv_idx = 0;
867 src_uv_idx = (src_x % 2);
870 /* 1.2. Copy all macro pixels from the source to the destination.
871 * All pixels now start at macro pixel boundary, i.e. no
872 * blending with the background is necessary. */
873 for (; j < w - 1; j += 2) {
874 y1 = srcY[src_y_idx];
875 y2 = srcY[src_y_idx + 1];
877 u1 = srcU[src_uv_idx / 2];
878 v1 = srcV[src_uv_idx / 2];
880 u2 = srcU[src_uv_idx / 2];
881 v2 = srcV[src_uv_idx / 2];
884 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
885 destY[dest_y_idx + 1] =
886 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
888 destU[dest_uv_idx] = CLAMP (
889 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
890 u2, v2)) / 2, 0, 255);
891 destV[dest_uv_idx] = CLAMP (
892 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
893 u2, v2)) / 2, 0, 255);
900 /* 1.3. Now copy the last pixel if one exists and blend it
901 * with the background because we only fill part of
902 * the macro pixel. In case this is the last pixel of
903 * the destination we will a larger part. */
904 if (j == w - 1 && j == dest_width - 1) {
905 y1 = srcY[src_y_idx];
906 u1 = srcU[src_uv_idx / 2];
907 v1 = srcV[src_uv_idx / 2];
909 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
910 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
911 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
912 } else if (j == w - 1) {
913 y1 = srcY[src_y_idx];
914 u1 = srcU[src_uv_idx / 2];
915 v1 = srcV[src_uv_idx / 2];
917 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
918 destU[dest_uv_idx] = CLAMP (
919 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
921 destV[dest_uv_idx] = CLAMP (
922 (destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
926 destY += dest_strideY;
927 destU += dest_strideU;
928 destV += dest_strideV;
937 copy_y41b_y41b (guint i_alpha, GstVideoFrame * dest,
938 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
939 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
942 guint8 *destY, *destU, *destV;
943 const guint8 *srcY, *srcU, *srcV;
944 gint dest_strideY, dest_strideU, dest_strideV;
945 gint src_strideY, src_strideU, src_strideV;
946 gint src_y_idx, src_uv_idx;
947 gint dest_y_idx, dest_uv_idx;
954 dest_width = GST_VIDEO_FRAME_WIDTH (dest);
956 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
957 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
958 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
960 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
961 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
962 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
964 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
965 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
966 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
968 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
969 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
970 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
972 destY = destY + dest_y * dest_strideY + dest_x;
973 destU = destU + dest_y * dest_strideU + dest_x / 4;
974 destV = destV + dest_y * dest_strideV + dest_x / 4;
976 srcY = srcY + src_y * src_strideY + src_x;
977 srcU = srcU + src_y * src_strideU + src_x / 4;
978 srcV = srcV + src_y * src_strideV + src_x / 4;
983 if (src_sdtv != dest_sdtv)
985 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
986 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
988 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
990 /* 1. Copy all macro pixel scanlines, the destination scanline
991 * now starts at macro pixel boundary. */
992 for (i = dest_y; i < h; i++) {
993 /* 1.1. Handle the first destination pixel if it doesn't
994 * start at the macro pixel boundary, i.e. blend with
996 if (dest_x % 4 == 1) {
1003 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1004 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1005 destY[2] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1008 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1009 v1) + APPLY_MATRIX (matrix, 1, y2, u1,
1010 v1) + APPLY_MATRIX (matrix, 1, y3, u1, v1)) / 4, 0, 255);
1012 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1013 v1) + APPLY_MATRIX (matrix, 2, y2, u1,
1014 v1) + APPLY_MATRIX (matrix, 2, y3, u1, v1)) / 4, 0, 255);
1017 src_y_idx = dest_y_idx = 3;
1019 src_uv_idx = (src_x % 4) + 3;
1020 } else if (dest_x % 4 == 2) {
1026 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1027 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1030 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1031 v1) + APPLY_MATRIX (matrix, 1, y2, u1, v1)) / 4, 0, 255);
1033 CLAMP ((2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1034 v1) + APPLY_MATRIX (matrix, 2, y2, u1, v1)) / 4, 0, 255);
1037 src_y_idx = dest_y_idx = 2;
1039 src_uv_idx = (src_x % 4) + 2;
1040 } else if (dest_x % 4 == 3) {
1045 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1048 (3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0, 255);
1050 (3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0, 255);
1053 src_y_idx = dest_y_idx = 1;
1055 src_uv_idx = (src_x % 4) + 1;
1058 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1059 src_uv_idx = (src_x % 4);
1062 /* 1.2. Copy all macro pixels from the source to the destination.
1063 * All pixels now start at macro pixel boundary, i.e. no
1064 * blending with the background is necessary. */
1065 for (; j < w - 3; j += 4) {
1066 y1 = srcY[src_y_idx];
1067 y2 = srcY[src_y_idx + 1];
1068 y3 = srcY[src_y_idx + 2];
1069 y4 = srcY[src_y_idx + 3];
1071 u1 = srcU[src_uv_idx / 4];
1072 v1 = srcV[src_uv_idx / 4];
1074 u2 = srcU[src_uv_idx / 4];
1075 v2 = srcV[src_uv_idx / 4];
1077 u3 = srcU[src_uv_idx / 4];
1078 v3 = srcV[src_uv_idx / 4];
1080 u4 = srcU[src_uv_idx / 4];
1081 v4 = srcV[src_uv_idx / 4];
1084 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1085 destY[dest_y_idx + 1] =
1086 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1087 destY[dest_y_idx + 2] =
1088 CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1089 destY[dest_y_idx + 3] =
1090 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1092 destU[dest_uv_idx] = CLAMP (
1093 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1094 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1095 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1096 destV[dest_uv_idx] =
1097 CLAMP ((APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix,
1098 2, y2, u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1099 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1106 /* 1.3. Now copy the last pixel if one exists and blend it
1107 * with the background because we only fill part of
1108 * the macro pixel. In case this is the last pixel of
1109 * the destination we will a larger part. */
1110 if (j == w - 1 && j == dest_width - 1) {
1111 y1 = srcY[src_y_idx];
1112 u1 = srcU[src_uv_idx / 4];
1113 v1 = srcV[src_uv_idx / 4];
1115 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1116 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1117 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1118 } else if (j == w - 1) {
1119 y1 = srcY[src_y_idx];
1120 u1 = srcU[src_uv_idx / 4];
1121 v1 = srcV[src_uv_idx / 4];
1123 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1124 destU[dest_uv_idx] = CLAMP (
1125 (destU[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1127 destV[dest_uv_idx] = CLAMP (
1128 (destV[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1130 } else if (j == w - 2 && j == dest_width - 2) {
1131 y1 = srcY[src_y_idx];
1132 y2 = srcY[src_y_idx + 1];
1133 u1 = srcU[src_uv_idx / 4];
1134 v1 = srcV[src_uv_idx / 4];
1136 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1137 destY[dest_y_idx + 1] =
1138 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1139 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1140 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1141 } else if (j == w - 2) {
1142 y1 = srcY[src_y_idx];
1143 y2 = srcY[src_y_idx + 1];
1144 u1 = srcU[src_uv_idx / 4];
1145 v1 = srcV[src_uv_idx / 4];
1147 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1148 destY[dest_y_idx + 1] =
1149 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1150 destU[dest_uv_idx] =
1151 CLAMP ((destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1153 destV[dest_uv_idx] =
1154 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1156 } else if (j == w - 3 && j == dest_width - 3) {
1157 y1 = srcY[src_y_idx];
1158 y2 = srcY[src_y_idx + 1];
1159 y3 = srcY[src_y_idx + 2];
1160 u1 = srcU[src_uv_idx / 4];
1161 v1 = srcV[src_uv_idx / 4];
1163 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1164 destY[dest_y_idx + 1] =
1165 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1166 destY[dest_y_idx + 2] =
1167 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1168 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1169 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1170 } else if (j == w - 3) {
1171 y1 = srcY[src_y_idx];
1172 y2 = srcY[src_y_idx + 1];
1173 y3 = srcY[src_y_idx + 2];
1174 u1 = srcU[src_uv_idx / 4];
1175 v1 = srcV[src_uv_idx / 4];
1177 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1178 destY[dest_y_idx + 1] =
1179 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1180 destY[dest_y_idx + 2] =
1181 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1182 destU[dest_uv_idx] =
1183 CLAMP ((3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1185 destV[dest_uv_idx] =
1186 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1190 destY += dest_strideY;
1191 destU += dest_strideU;
1192 destV += dest_strideV;
1193 srcY += src_strideY;
1194 srcU += src_strideU;
1195 srcV += src_strideV;
1200 copy_i420_i420 (guint i_alpha, GstVideoFrame * dest,
1201 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
1202 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1205 guint8 *destY, *destU, *destV;
1206 const guint8 *srcY, *srcU, *srcV;
1208 const guint8 *srcY2, *srcU2, *srcV2;
1209 gint dest_strideY, dest_strideU, dest_strideV;
1210 gint src_strideY, src_strideU, src_strideV;
1211 gint src_y_idx, src_uv_idx;
1212 gint dest_y_idx, dest_uv_idx;
1214 gint y1, y2, y3, y4;
1215 gint u1, u2, u3, u4;
1216 gint v1, v2, v3, v4;
1217 gint dest_width, dest_height;
1219 dest_width = GST_VIDEO_FRAME_WIDTH (dest);
1220 dest_height = GST_VIDEO_FRAME_HEIGHT (dest);
1222 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
1223 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
1224 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
1226 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
1227 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
1228 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
1230 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
1231 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
1232 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
1234 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
1235 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
1236 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
1238 destY = destY + dest_y * dest_strideY + dest_x;
1239 destU = destU + (dest_y / 2) * dest_strideU + dest_x / 2;
1240 destV = destV + (dest_y / 2) * dest_strideV + dest_x / 2;
1242 srcY = srcY + src_y * src_strideY + src_x;
1243 srcU = srcU + (src_y / 2) * src_strideU + src_x / 2;
1244 srcV = srcV + (src_y / 2) * src_strideV + src_x / 2;
1246 destY2 = destY + dest_strideY;
1247 srcY2 = srcY + src_strideY;
1252 if (src_sdtv != dest_sdtv)
1254 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1255 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1257 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
1259 /* 1. Handle the first destination scanline specially if it
1260 * doesn't start at the macro pixel boundary, i.e. blend
1261 * with the background! */
1262 if (dest_y % 2 == 1) {
1263 /* 1.1. Handle the first destination pixel if it doesn't
1264 * start at the macro pixel boundary, i.e. blend with
1265 * the background! */
1266 if (dest_x % 2 == 1) {
1271 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1273 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1276 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1280 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1281 src_uv_idx = (src_x % 2) + 1;
1284 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1285 src_uv_idx = (src_x % 2);
1288 /* 1.2. Copy all macro pixels from the source to the destination
1289 * but blend with the background because we're only filling
1290 * the lower part of the macro pixels. */
1291 for (; j < w - 1; j += 2) {
1292 y1 = srcY[src_y_idx];
1293 y2 = srcY[src_y_idx + 1];
1295 u1 = srcU[src_uv_idx / 2];
1296 v1 = srcV[src_uv_idx / 2];
1298 u2 = srcU[src_uv_idx / 2];
1299 v2 = srcV[src_uv_idx / 2];
1302 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1303 destY[dest_y_idx + 1] =
1304 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1305 destU[dest_uv_idx] =
1306 CLAMP ((2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1307 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1308 destV[dest_uv_idx] =
1309 CLAMP ((2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1310 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1317 /* 1.3. Now copy the last pixel if one exists and blend it
1318 * with the background because we only fill part of
1319 * the macro pixel. In case this is the last pixel of
1320 * the destination we will a larger part. */
1321 if (j == w - 1 && j == dest_width - 1) {
1322 y1 = srcY[src_y_idx];
1323 u1 = srcU[src_uv_idx / 2];
1324 v1 = srcV[src_uv_idx / 2];
1326 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1327 destU[dest_uv_idx] = CLAMP (
1328 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1330 destV[dest_uv_idx] =
1331 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1333 } else if (j == w - 1) {
1334 y1 = srcY[src_y_idx];
1335 u1 = srcU[src_uv_idx / 2];
1336 v1 = srcV[src_uv_idx / 2];
1338 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1339 destU[dest_uv_idx] = CLAMP (
1340 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1342 destV[dest_uv_idx] =
1343 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1347 destY += dest_strideY;
1348 destY2 += dest_strideY;
1349 destU += dest_strideU;
1350 destV += dest_strideV;
1351 srcY += src_strideY;
1352 srcY2 += src_strideY;
1354 if (src_y % 2 == 0) {
1355 srcU += src_strideU;
1356 srcV += src_strideV;
1363 /* 2. Copy all macro pixel scanlines, the destination scanline
1364 * now starts at macro pixel boundary. */
1365 for (; i < h - 1; i += 2) {
1366 /* 2.1. Handle the first destination pixel if it doesn't
1367 * start at the macro pixel boundary, i.e. blend with
1368 * the background! */
1372 if (src_y % 2 == 1) {
1373 srcU2 += src_strideU;
1374 srcV2 += src_strideV;
1377 if (dest_x % 2 == 1) {
1385 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1386 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1388 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1389 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1391 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1392 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1394 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1395 src_uv_idx = (src_x % 2) + 1;
1398 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1399 src_uv_idx = (src_x % 2);
1402 /* 2.2. Copy all macro pixels from the source to the destination.
1403 * All pixels now start at macro pixel boundary, i.e. no
1404 * blending with the background is necessary. */
1405 for (; j < w - 1; j += 2) {
1406 y1 = srcY[src_y_idx];
1407 y2 = srcY[src_y_idx + 1];
1408 y3 = srcY2[src_y_idx];
1409 y4 = srcY2[src_y_idx + 1];
1411 u1 = srcU[src_uv_idx / 2];
1412 u3 = srcU2[src_uv_idx / 2];
1413 v1 = srcV[src_uv_idx / 2];
1414 v3 = srcV2[src_uv_idx / 2];
1416 u2 = srcU[src_uv_idx / 2];
1417 u4 = srcU2[src_uv_idx / 2];
1418 v2 = srcV[src_uv_idx / 2];
1419 v4 = srcV2[src_uv_idx / 2];
1422 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1423 destY[dest_y_idx + 1] =
1424 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1425 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1426 destY2[dest_y_idx + 1] =
1427 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1429 destU[dest_uv_idx] = CLAMP (
1430 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1431 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1432 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1433 destV[dest_uv_idx] = CLAMP (
1434 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1435 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1436 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1443 /* 2.3. Now copy the last pixel if one exists and blend it
1444 * with the background because we only fill part of
1445 * the macro pixel. In case this is the last pixel of
1446 * the destination we will a larger part. */
1447 if (j == w - 1 && j == dest_width - 1) {
1448 y1 = srcY[src_y_idx];
1449 y2 = srcY2[src_y_idx];
1451 u1 = srcU[src_uv_idx / 2];
1452 u2 = srcU2[src_uv_idx / 2];
1454 v1 = srcV[src_uv_idx / 2];
1455 v2 = srcV2[src_uv_idx / 2];
1457 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1458 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1459 destU[dest_uv_idx] = CLAMP (
1460 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1461 u2, v2)) / 2, 0, 255);
1462 destV[dest_uv_idx] = CLAMP (
1463 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1464 u2, v2)) / 2, 0, 255);
1465 } else if (j == w - 1) {
1466 y1 = srcY[src_y_idx];
1467 y2 = srcY2[src_y_idx];
1469 u1 = srcU[src_uv_idx / 2];
1470 u2 = srcU2[src_uv_idx / 2];
1472 v1 = srcV[src_uv_idx / 2];
1473 v2 = srcV2[src_uv_idx / 2];
1475 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1476 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1477 destU[dest_uv_idx] = CLAMP (
1478 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1479 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1480 destV[dest_uv_idx] = CLAMP (
1481 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1482 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1485 destY += 2 * dest_strideY;
1486 destY2 += 2 * dest_strideY;
1487 destU += dest_strideU;
1488 destV += dest_strideV;
1489 srcY += 2 * src_strideY;
1490 srcY2 += 2 * src_strideY;
1493 srcU += src_strideU;
1494 srcV += src_strideV;
1497 /* 3. Handle the last scanline if one exists. This again
1498 * doesn't start at macro pixel boundary but should
1499 * only fill the upper part of the macro pixels. */
1500 if (i == h - 1 && i == dest_height - 1) {
1501 /* 3.1. Handle the first destination pixel if it doesn't
1502 * start at the macro pixel boundary, i.e. blend with
1503 * the background! */
1504 if (dest_x % 2 == 1) {
1509 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1511 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
1513 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
1516 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1517 src_uv_idx = (src_x % 2) + 1;
1520 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1521 src_uv_idx = (src_x % 2);
1524 /* 3.2. Copy all macro pixels from the source to the destination
1525 * but blend with the background because we're only filling
1526 * the upper part of the macro pixels. */
1527 for (; j < w - 1; j += 2) {
1528 y1 = srcY[src_y_idx];
1529 y2 = srcY[src_y_idx + 1];
1531 u1 = srcU[src_uv_idx / 2];
1532 v1 = srcV[src_uv_idx / 2];
1534 u2 = srcU[src_uv_idx / 2];
1535 v2 = srcV[src_uv_idx / 2];
1538 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1539 destY[dest_y_idx + 1] =
1540 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1542 destU[dest_uv_idx] = CLAMP (
1543 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1544 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1545 destV[dest_uv_idx] = CLAMP (
1546 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1547 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1554 /* 3.3. Now copy the last pixel if one exists and blend it
1555 * with the background because we only fill part of
1556 * the macro pixel. In case this is the last pixel of
1557 * the destination we will a larger part. */
1558 if (j == w - 1 && j == dest_width - 1) {
1559 y1 = srcY[src_y_idx];
1560 u1 = srcU[src_uv_idx / 2];
1561 v1 = srcV[src_uv_idx / 2];
1563 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1564 destU[dest_uv_idx] = CLAMP (
1565 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1567 destV[dest_uv_idx] =
1568 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1570 } else if (j == w - 1) {
1571 y1 = srcY[src_y_idx];
1572 u1 = srcU[src_uv_idx / 2];
1573 v1 = srcV[src_uv_idx / 2];
1575 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1576 destU[dest_uv_idx] = CLAMP (
1577 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1579 destV[dest_uv_idx] =
1580 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1583 } else if (i == h - 1) {
1584 /* 3.1. Handle the first destination pixel if it doesn't
1585 * start at the macro pixel boundary, i.e. blend with
1586 * the background! */
1587 if (dest_x % 2 == 1) {
1592 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1594 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1597 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1601 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1602 src_uv_idx = (src_x % 2) + 1;
1605 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1606 src_uv_idx = (src_x % 2);
1609 /* 3.2. Copy all macro pixels from the source to the destination
1610 * but blend with the background because we're only filling
1611 * the upper part of the macro pixels. */
1612 for (; j < w - 1; j += 2) {
1613 y1 = srcY[src_y_idx];
1614 y2 = srcY[src_y_idx + 1];
1616 u1 = srcU[src_uv_idx / 2];
1617 v1 = srcV[src_uv_idx / 2];
1619 u2 = srcU[src_uv_idx / 2];
1620 v2 = srcV[src_uv_idx / 2];
1623 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1624 destY[dest_y_idx + 1] =
1625 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1627 destU[dest_uv_idx] = CLAMP (
1628 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1629 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1630 destV[dest_uv_idx] = CLAMP (
1631 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1632 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1639 /* 3.3. Now copy the last pixel if one exists and blend it
1640 * with the background because we only fill part of
1641 * the macro pixel. In case this is the last pixel of
1642 * the destination we will a larger part. */
1643 if (j == w - 1 && j == dest_width - 1) {
1644 y1 = srcY[src_y_idx];
1645 u1 = srcU[src_uv_idx / 2];
1646 v1 = srcV[src_uv_idx / 2];
1648 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1649 destU[dest_uv_idx] = CLAMP (
1650 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1652 destV[dest_uv_idx] =
1653 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1655 } else if (j == w - 1) {
1656 y1 = srcY[src_y_idx];
1657 u1 = srcU[src_uv_idx / 2];
1658 v1 = srcV[src_uv_idx / 2];
1660 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1661 destU[dest_uv_idx] = CLAMP (
1662 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1664 destV[dest_uv_idx] =
1665 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1672 copy_i420_ayuv (guint i_alpha, GstVideoFrame * dest_frame,
1673 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
1674 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1676 const guint8 *srcY, *srcU, *srcV;
1677 gint src_strideY, src_strideU, src_strideV;
1681 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src_frame, 0);
1682 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src_frame, 1);
1683 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src_frame, 2);
1685 srcY = GST_VIDEO_FRAME_COMP_DATA (src_frame, 0);
1686 srcU = GST_VIDEO_FRAME_COMP_DATA (src_frame, 1);
1687 srcV = GST_VIDEO_FRAME_COMP_DATA (src_frame, 2);
1689 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
1691 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
1692 dest = dest + dest_y * dest_stride + dest_x * 4;
1694 srcY = srcY + src_y * src_strideY + src_x;
1695 srcU = srcU + (src_y / 2) * src_strideU + src_x / 2;
1696 srcV = srcV + (src_y / 2) * src_strideV + src_x / 2;
1698 i_alpha = MIN (i_alpha, 255);
1700 if (src_sdtv != dest_sdtv) {
1707 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1708 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1710 for (i = 0; i < h; i++) {
1711 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1713 u = srcU[uv_idx / 2];
1714 v = srcV[uv_idx / 2];
1716 y1 = APPLY_MATRIX (matrix, 0, y, u, v);
1717 u1 = APPLY_MATRIX (matrix, 1, y, u, v);
1718 v1 = APPLY_MATRIX (matrix, 2, y, u, v);
1720 dest[4 * j + 0] = i_alpha;
1721 dest[4 * j + 1] = y1;
1722 dest[4 * j + 2] = u1;
1723 dest[4 * j + 3] = v1;
1725 dest += dest_stride;
1728 srcY += src_strideY;
1729 if (src_y % 2 == 0) {
1730 srcU += src_strideU;
1731 srcV += src_strideV;
1738 for (i = 0; i < h; i++) {
1739 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1741 u = srcU[uv_idx / 2];
1742 v = srcV[uv_idx / 2];
1744 dest[4 * j + 0] = i_alpha;
1745 dest[4 * j + 1] = y;
1746 dest[4 * j + 2] = u;
1747 dest[4 * j + 3] = v;
1749 dest += dest_stride;
1752 srcY += src_strideY;
1753 if (src_y % 2 == 0) {
1754 srcU += src_strideU;
1755 srcV += src_strideV;
1762 fill_rgb32 (GstVideoBoxFill fill_type, guint b_alpha,
1763 GstVideoFrame * frame, gboolean sdtv)
1765 guint32 empty_pixel;
1771 width = GST_VIDEO_FRAME_WIDTH (frame);
1772 height = GST_VIDEO_FRAME_HEIGHT (frame);
1774 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
1775 stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
1777 p[0] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 3);
1778 p[1] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 0);
1779 p[2] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 1);
1780 p[3] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 2);
1782 b_alpha = MIN (b_alpha, 255);
1784 if (GST_VIDEO_FRAME_N_COMPONENTS (frame) == 4) {
1785 empty_pixel = GUINT32_FROM_LE ((b_alpha << (p[0] * 8)) |
1786 (rgb_colors_R[fill_type] << (p[1] * 8)) |
1787 (rgb_colors_G[fill_type] << (p[2] * 8)) |
1788 (rgb_colors_B[fill_type] << (p[3] * 8)));
1790 empty_pixel = GUINT32_FROM_LE (
1791 (rgb_colors_R[fill_type] << (p[1] * 8)) |
1792 (rgb_colors_G[fill_type] << (p[2] * 8)) |
1793 (rgb_colors_B[fill_type] << (p[3] * 8)));
1796 if (stride == width * 4) {
1797 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
1798 } else if (height) {
1799 for (; height; --height) {
1800 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width);
1807 fill_rgb24 (GstVideoBoxFill fill_type, guint b_alpha,
1808 GstVideoFrame * frame, gboolean sdtv)
1816 width = GST_VIDEO_FRAME_WIDTH (frame);
1817 height = GST_VIDEO_FRAME_HEIGHT (frame);
1819 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
1820 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
1822 p[1] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 0);
1823 p[2] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 1);
1824 p[3] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 2);
1826 for (i = 0; i < height; i++) {
1827 for (j = 0; j < width; j++) {
1828 dest[3 * j + p[1]] = rgb_colors_R[fill_type];
1829 dest[3 * j + p[2]] = rgb_colors_G[fill_type];
1830 dest[3 * j + p[3]] = rgb_colors_B[fill_type];
1832 dest += dest_stride;
1837 copy_rgb32 (guint i_alpha, GstVideoFrame * dest_frame,
1838 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
1839 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1842 gint src_stride, dest_stride;
1843 gboolean in_alpha, out_alpha;
1844 gint in_bpp, out_bpp;
1847 gboolean packed_out;
1851 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
1852 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
1853 in_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (src_frame, 0);
1854 out_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (dest_frame, 0);
1855 packed_in = (in_bpp < 4);
1856 packed_out = (out_bpp < 4);
1858 out_alpha = GST_VIDEO_INFO_HAS_ALPHA (&dest_frame->info);
1859 p_out[0] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 3);
1860 p_out[1] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 0);
1861 p_out[2] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 1);
1862 p_out[3] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 2);
1864 in_alpha = GST_VIDEO_INFO_HAS_ALPHA (&src_frame->info);
1865 p_in[0] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 3);
1866 p_in[1] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 0);
1867 p_in[2] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 1);
1868 p_in[3] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 2);
1870 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
1871 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
1872 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
1873 src = src + src_y * src_stride + src_x * in_bpp;
1875 if (in_alpha && out_alpha) {
1877 for (i = 0; i < h; i++) {
1878 for (j = 0; j < w; j += 4) {
1879 dest[j + p_out[0]] = (src[j + p_in[0]] * i_alpha) >> 8;
1880 dest[j + p_out[1]] = src[j + p_in[1]];
1881 dest[j + p_out[2]] = src[j + p_in[2]];
1882 dest[j + p_out[3]] = src[j + p_in[3]];
1884 dest += dest_stride;
1887 } else if (out_alpha && !packed_in) {
1889 i_alpha = MIN (i_alpha, 255);
1891 for (i = 0; i < h; i++) {
1892 for (j = 0; j < w; j += 4) {
1893 dest[j + p_out[0]] = i_alpha;
1894 dest[j + p_out[1]] = src[j + p_in[1]];
1895 dest[j + p_out[2]] = src[j + p_in[2]];
1896 dest[j + p_out[3]] = src[j + p_in[3]];
1898 dest += dest_stride;
1901 } else if (out_alpha && packed_in) {
1902 i_alpha = MIN (i_alpha, 255);
1904 for (i = 0; i < h; i++) {
1905 for (j = 0; j < w; j++) {
1906 dest[4 * j + p_out[0]] = i_alpha;
1907 dest[4 * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1908 dest[4 * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1909 dest[4 * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1911 dest += dest_stride;
1914 } else if (!packed_out && !packed_in) {
1916 for (i = 0; i < h; i++) {
1917 for (j = 0; j < w; j += 4) {
1918 dest[j + p_out[1]] = src[j + p_in[1]];
1919 dest[j + p_out[2]] = src[j + p_in[2]];
1920 dest[j + p_out[3]] = src[j + p_in[3]];
1922 dest += dest_stride;
1926 for (i = 0; i < h; i++) {
1927 for (j = 0; j < w; j++) {
1928 dest[out_bpp * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1929 dest[out_bpp * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1930 dest[out_bpp * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1932 dest += dest_stride;
1939 copy_rgb32_ayuv (guint i_alpha, GstVideoFrame * dest_frame,
1940 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
1941 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1944 gint src_stride, dest_stride;
1955 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
1956 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
1957 in_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (src_frame, 0);
1958 packed_in = (in_bpp < 4);
1960 in_alpha = GST_VIDEO_INFO_HAS_ALPHA (&src_frame->info);
1961 p_in[0] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 3);
1962 p_in[1] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 0);
1963 p_in[2] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 1);
1964 p_in[3] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 2);
1967 (dest_sdtv) ? cog_rgb_to_ycbcr_matrix_8bit_sdtv :
1968 cog_rgb_to_ycbcr_matrix_8bit_hdtv, 12 * sizeof (gint));
1970 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
1971 dest = dest + dest_y * dest_stride + dest_x * 4;
1972 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
1973 src = src + src_y * src_stride + src_x * in_bpp;
1977 for (i = 0; i < h; i++) {
1978 for (j = 0; j < w; j += 4) {
1979 a = (src[j + p_in[0]] * i_alpha) >> 8;
1980 r = src[j + p_in[1]];
1981 g = src[j + p_in[2]];
1982 b = src[j + p_in[3]];
1984 y = APPLY_MATRIX (matrix, 0, r, g, b);
1985 u = APPLY_MATRIX (matrix, 1, r, g, b);
1986 v = APPLY_MATRIX (matrix, 2, r, g, b);
1989 dest[j + 1] = CLAMP (y, 0, 255);
1990 dest[j + 2] = CLAMP (u, 0, 255);
1991 dest[j + 3] = CLAMP (v, 0, 255);
1993 dest += dest_stride;
1996 } else if (!packed_in) {
1998 i_alpha = MIN (i_alpha, 255);
2000 for (i = 0; i < h; i++) {
2001 for (j = 0; j < w; j += 4) {
2003 r = src[j + p_in[1]];
2004 g = src[j + p_in[2]];
2005 b = src[j + p_in[3]];
2007 y = APPLY_MATRIX (matrix, 0, r, g, b);
2008 u = APPLY_MATRIX (matrix, 1, r, g, b);
2009 v = APPLY_MATRIX (matrix, 2, r, g, b);
2012 dest[j + 1] = CLAMP (y, 0, 255);
2013 dest[j + 2] = CLAMP (u, 0, 255);
2014 dest[j + 3] = CLAMP (v, 0, 255);
2016 dest += dest_stride;
2020 i_alpha = MIN (i_alpha, 255);
2022 for (i = 0; i < h; i++) {
2023 for (j = 0; j < w; j++) {
2025 r = src[in_bpp * j + p_in[1]];
2026 g = src[in_bpp * j + p_in[2]];
2027 b = src[in_bpp * j + p_in[3]];
2029 y = APPLY_MATRIX (matrix, 0, r, g, b);
2030 u = APPLY_MATRIX (matrix, 1, r, g, b);
2031 v = APPLY_MATRIX (matrix, 2, r, g, b);
2033 dest[4 * j + 0] = a;
2034 dest[4 * j + 1] = CLAMP (y, 0, 255);
2035 dest[4 * j + 2] = CLAMP (u, 0, 255);
2036 dest[4 * j + 3] = CLAMP (v, 0, 255);
2038 dest += dest_stride;
2045 copy_ayuv_rgb32 (guint i_alpha, GstVideoFrame * dest_frame,
2046 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
2047 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
2050 gint src_stride, dest_stride;
2054 gboolean packed_out;
2061 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
2062 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
2063 out_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (dest_frame, 0);
2064 packed_out = (out_bpp < 4);
2066 out_alpha = GST_VIDEO_INFO_HAS_ALPHA (&dest_frame->info);
2067 p_out[0] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 3);
2068 p_out[1] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 0);
2069 p_out[2] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 1);
2070 p_out[3] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 2);
2073 (src_sdtv) ? cog_ycbcr_to_rgb_matrix_8bit_sdtv :
2074 cog_ycbcr_to_rgb_matrix_8bit_hdtv, 12 * sizeof (gint));
2076 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
2077 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
2078 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
2079 src = src + src_y * src_stride + src_x * 4;
2083 for (i = 0; i < h; i++) {
2084 for (j = 0; j < w; j += 4) {
2085 a = (src[j + 0] * i_alpha) >> 8;
2090 r = APPLY_MATRIX (matrix, 0, y, u, v);
2091 g = APPLY_MATRIX (matrix, 1, y, u, v);
2092 b = APPLY_MATRIX (matrix, 2, y, u, v);
2094 dest[j + p_out[0]] = a;
2095 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2096 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2097 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2099 dest += dest_stride;
2102 } else if (!packed_out) {
2104 for (i = 0; i < h; i++) {
2105 for (j = 0; j < w; j += 4) {
2110 r = APPLY_MATRIX (matrix, 0, y, u, v);
2111 g = APPLY_MATRIX (matrix, 1, y, u, v);
2112 b = APPLY_MATRIX (matrix, 2, y, u, v);
2114 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2115 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2116 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2118 dest += dest_stride;
2122 for (i = 0; i < h; i++) {
2123 for (j = 0; j < w; j++) {
2128 r = APPLY_MATRIX (matrix, 0, y, u, v);
2129 g = APPLY_MATRIX (matrix, 1, y, u, v);
2130 b = APPLY_MATRIX (matrix, 2, y, u, v);
2132 dest[out_bpp * j + p_out[1]] = CLAMP (r, 0, 255);
2133 dest[out_bpp * j + p_out[2]] = CLAMP (g, 0, 255);
2134 dest[out_bpp * j + p_out[3]] = CLAMP (b, 0, 255);
2136 dest += dest_stride;
2143 fill_gray (GstVideoBoxFill fill_type, guint b_alpha,
2144 GstVideoFrame * frame, gboolean sdtv)
2150 GstVideoFormat format;
2152 format = GST_VIDEO_FRAME_FORMAT (frame);
2154 width = GST_VIDEO_FRAME_WIDTH (frame);
2155 height = GST_VIDEO_FRAME_HEIGHT (frame);
2157 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
2158 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
2160 if (format == GST_VIDEO_FORMAT_GRAY8) {
2161 guint8 val = yuv_sdtv_colors_Y[fill_type];
2163 for (i = 0; i < height; i++) {
2164 memset (dest, val, width);
2165 dest += dest_stride;
2168 guint16 val = yuv_sdtv_colors_Y[fill_type] << 8;
2170 if (format == GST_VIDEO_FORMAT_GRAY16_BE) {
2171 for (i = 0; i < height; i++) {
2172 for (j = 0; j < width; j++) {
2173 GST_WRITE_UINT16_BE (dest + 2 * j, val);
2175 dest += dest_stride;
2178 for (i = 0; i < height; i++) {
2179 for (j = 0; j < width; j++) {
2180 GST_WRITE_UINT16_LE (dest + 2 * j, val);
2182 dest += dest_stride;
2189 copy_packed_simple (guint i_alpha, GstVideoFrame * dest_frame,
2190 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
2191 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
2194 gint src_stride, dest_stride;
2195 gint pixel_stride, row_size;
2198 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
2199 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
2200 pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (dest_frame, 0);
2201 row_size = w * pixel_stride;
2203 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
2204 dest = dest + dest_y * dest_stride + dest_x * pixel_stride;
2205 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
2206 src = src + src_y * src_stride + src_x * pixel_stride;
2208 for (i = 0; i < h; i++) {
2209 memcpy (dest, src, row_size);
2210 dest += dest_stride;
2216 fill_yuy2 (GstVideoBoxFill fill_type, guint b_alpha,
2217 GstVideoFrame * frame, gboolean sdtv)
2224 GstVideoFormat format;
2226 format = GST_VIDEO_FRAME_FORMAT (frame);
2228 width = GST_VIDEO_FRAME_WIDTH (frame);
2229 height = GST_VIDEO_FRAME_HEIGHT (frame);
2231 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
2232 stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
2234 y = (sdtv) ? yuv_sdtv_colors_Y[fill_type] : yuv_hdtv_colors_Y[fill_type];
2235 u = (sdtv) ? yuv_sdtv_colors_U[fill_type] : yuv_hdtv_colors_U[fill_type];
2236 v = (sdtv) ? yuv_sdtv_colors_V[fill_type] : yuv_hdtv_colors_V[fill_type];
2238 width = width + (width % 2);
2240 if (format == GST_VIDEO_FORMAT_YUY2) {
2241 for (i = 0; i < height; i++) {
2242 for (j = 0; j < width; j += 2) {
2243 dest[j * 2 + 0] = y;
2244 dest[j * 2 + 1] = u;
2245 dest[j * 2 + 2] = y;
2246 dest[j * 2 + 3] = v;
2251 } else if (format == GST_VIDEO_FORMAT_YVYU) {
2252 for (i = 0; i < height; i++) {
2253 for (j = 0; j < width; j += 2) {
2254 dest[j * 2 + 0] = y;
2255 dest[j * 2 + 1] = v;
2256 dest[j * 2 + 2] = y;
2257 dest[j * 2 + 3] = u;
2263 for (i = 0; i < height; i++) {
2264 for (j = 0; j < width; j += 2) {
2265 dest[j * 2 + 0] = u;
2266 dest[j * 2 + 1] = y;
2267 dest[j * 2 + 2] = v;
2268 dest[j * 2 + 3] = y;
2277 copy_yuy2_yuy2 (guint i_alpha, GstVideoFrame * dest_frame,
2278 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
2279 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
2282 gint src_stride, dest_stride;
2284 GstVideoFormat src_format;
2286 src_format = GST_VIDEO_FRAME_FORMAT (src_frame);
2288 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
2289 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
2291 dest_x = (dest_x & ~1);
2292 src_x = (src_x & ~1);
2296 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
2297 dest = dest + dest_y * dest_stride + dest_x * 2;
2298 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
2299 src = src + src_y * src_stride + src_x * 2;
2301 if (src_sdtv != dest_sdtv) {
2307 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
2308 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
2310 if (src_format == GST_VIDEO_FORMAT_YUY2) {
2311 for (i = 0; i < h; i++) {
2312 for (j = 0; j < w; j += 2) {
2313 y1 = src[j * 2 + 0];
2314 y2 = src[j * 2 + 2];
2315 u1 = u2 = src[j * 2 + 1];
2316 v1 = v2 = src[j * 2 + 3];
2318 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2319 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2320 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2321 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2323 dest += dest_stride;
2326 } else if (src_format == GST_VIDEO_FORMAT_YVYU) {
2327 for (i = 0; i < h; i++) {
2328 for (j = 0; j < w; j += 2) {
2329 y1 = src[j * 2 + 0];
2330 y2 = src[j * 2 + 2];
2331 v1 = v2 = src[j * 2 + 1];
2332 u1 = u2 = src[j * 2 + 3];
2334 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2335 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 2, y1, u1, v1);
2336 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2337 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 1, y2, u2, v2);
2339 dest += dest_stride;
2343 for (i = 0; i < h; i++) {
2344 for (j = 0; j < w; j += 2) {
2345 u1 = u2 = src[j * 2 + 0];
2346 v1 = v2 = src[j * 2 + 2];
2347 y1 = src[j * 2 + 1];
2348 y2 = src[j * 2 + 3];
2350 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2351 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2352 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2353 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2355 dest += dest_stride;
2360 for (i = 0; i < h; i++) {
2361 memcpy (dest, src, w * 2);
2362 dest += dest_stride;
2368 #define DEFAULT_LEFT 0
2369 #define DEFAULT_RIGHT 0
2370 #define DEFAULT_TOP 0
2371 #define DEFAULT_BOTTOM 0
2372 #define DEFAULT_FILL_TYPE VIDEO_BOX_FILL_BLACK
2373 #define DEFAULT_ALPHA 1.0
2374 #define DEFAULT_BORDER_ALPHA 1.0
2390 static GstStaticPadTemplate gst_video_box_src_template =
2391 GST_STATIC_PAD_TEMPLATE ("src",
2394 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("{ AYUV, "
2395 "ARGB, BGRA, ABGR, RGBA, xRGB, BGRx, xBGR, RGBx, RGB, BGR, "
2396 "Y444, Y42B, YUY2, YVYU, UYVY, I420, YV12, Y41B, "
2397 "GRAY8, GRAY16_BE, GRAY16_LE } "))
2400 static GstStaticPadTemplate gst_video_box_sink_template =
2401 GST_STATIC_PAD_TEMPLATE ("sink",
2404 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("{ AYUV, "
2405 "ARGB, BGRA, ABGR, RGBA, xRGB, BGRx, xBGR, RGBx, RGB, BGR, "
2406 "Y444, Y42B, YUY2, YVYU, UYVY, I420, YV12, Y41B, "
2407 "GRAY8, GRAY16_BE, GRAY16_LE } "))
2410 #define gst_video_box_parent_class parent_class
2411 G_DEFINE_TYPE (GstVideoBox, gst_video_box, GST_TYPE_VIDEO_FILTER);
2412 GST_ELEMENT_REGISTER_DEFINE_WITH_CODE (videobox, "videobox", GST_RANK_NONE,
2413 GST_TYPE_VIDEO_BOX, GST_DEBUG_CATEGORY_INIT (videobox_debug, "videobox", 0,
2414 "Resizes a video by adding borders or cropping"));
2416 static void gst_video_box_set_property (GObject * object, guint prop_id,
2417 const GValue * value, GParamSpec * pspec);
2418 static void gst_video_box_get_property (GObject * object, guint prop_id,
2419 GValue * value, GParamSpec * pspec);
2421 static gboolean gst_video_box_recalc_transform (GstVideoBox * video_box);
2422 static GstCaps *gst_video_box_transform_caps (GstBaseTransform * trans,
2423 GstPadDirection direction, GstCaps * from, GstCaps * filter);
2424 static void gst_video_box_before_transform (GstBaseTransform * trans,
2426 static gboolean gst_video_box_src_event (GstBaseTransform * trans,
2429 static gboolean gst_video_box_set_info (GstVideoFilter * vfilter, GstCaps * in,
2430 GstVideoInfo * in_info, GstCaps * out, GstVideoInfo * out_info);
2431 static GstFlowReturn gst_video_box_transform_frame (GstVideoFilter * vfilter,
2432 GstVideoFrame * in_frame, GstVideoFrame * out_frame);
2434 #define GST_TYPE_VIDEO_BOX_FILL (gst_video_box_fill_get_type())
2436 gst_video_box_fill_get_type (void)
2438 static GType video_box_fill_type = 0;
2439 static const GEnumValue video_box_fill[] = {
2440 {VIDEO_BOX_FILL_BLACK, "Black", "black"},
2441 {VIDEO_BOX_FILL_GREEN, "Green", "green"},
2442 {VIDEO_BOX_FILL_BLUE, "Blue", "blue"},
2443 {VIDEO_BOX_FILL_RED, "Red", "red"},
2444 {VIDEO_BOX_FILL_YELLOW, "Yellow", "yellow"},
2445 {VIDEO_BOX_FILL_WHITE, "White", "white"},
2449 if (!video_box_fill_type) {
2450 video_box_fill_type =
2451 g_enum_register_static ("GstVideoBoxFill", video_box_fill);
2453 return video_box_fill_type;
2457 gst_video_box_finalize (GObject * object)
2459 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2461 g_mutex_clear (&video_box->mutex);
2463 G_OBJECT_CLASS (parent_class)->finalize (object);
2467 gst_video_box_class_init (GstVideoBoxClass * klass)
2469 GObjectClass *gobject_class = (GObjectClass *) klass;
2470 GstElementClass *element_class = (GstElementClass *) (klass);
2471 GstBaseTransformClass *trans_class = (GstBaseTransformClass *) klass;
2472 GstVideoFilterClass *vfilter_class = (GstVideoFilterClass *) klass;
2474 gobject_class->set_property = gst_video_box_set_property;
2475 gobject_class->get_property = gst_video_box_get_property;
2476 gobject_class->finalize = gst_video_box_finalize;
2478 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_FILL_TYPE,
2479 g_param_spec_enum ("fill", "Fill", "How to fill the borders",
2480 GST_TYPE_VIDEO_BOX_FILL, DEFAULT_FILL_TYPE,
2481 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2482 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_LEFT,
2483 g_param_spec_int ("left", "Left",
2484 "Pixels to box at left (<0 = add a border)", G_MININT, G_MAXINT,
2486 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2487 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_RIGHT,
2488 g_param_spec_int ("right", "Right",
2489 "Pixels to box at right (<0 = add a border)", G_MININT, G_MAXINT,
2491 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2492 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_TOP,
2493 g_param_spec_int ("top", "Top",
2494 "Pixels to box at top (<0 = add a border)", G_MININT, G_MAXINT,
2496 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2497 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BOTTOM,
2498 g_param_spec_int ("bottom", "Bottom",
2499 "Pixels to box at bottom (<0 = add a border)", G_MININT, G_MAXINT,
2501 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2502 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_ALPHA,
2503 g_param_spec_double ("alpha", "Alpha", "Alpha value picture", 0.0, 1.0,
2505 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2506 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BORDER_ALPHA,
2507 g_param_spec_double ("border-alpha", "Border Alpha",
2508 "Alpha value of the border", 0.0, 1.0, DEFAULT_BORDER_ALPHA,
2509 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2511 * GstVideoBox:autocrop:
2513 * If set to %TRUE videobox will automatically crop/pad the input
2514 * video to be centered in the output.
2516 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_AUTOCROP,
2517 g_param_spec_boolean ("autocrop", "Auto crop",
2518 "Auto crop", FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
2520 trans_class->before_transform =
2521 GST_DEBUG_FUNCPTR (gst_video_box_before_transform);
2522 trans_class->transform_caps =
2523 GST_DEBUG_FUNCPTR (gst_video_box_transform_caps);
2524 trans_class->src_event = GST_DEBUG_FUNCPTR (gst_video_box_src_event);
2526 vfilter_class->set_info = GST_DEBUG_FUNCPTR (gst_video_box_set_info);
2527 vfilter_class->transform_frame =
2528 GST_DEBUG_FUNCPTR (gst_video_box_transform_frame);
2530 gst_element_class_set_static_metadata (element_class, "Video box filter",
2531 "Filter/Effect/Video",
2532 "Resizes a video by adding borders or cropping",
2533 "Wim Taymans <wim@fluendo.com>");
2535 gst_element_class_add_static_pad_template (element_class,
2536 &gst_video_box_sink_template);
2537 gst_element_class_add_static_pad_template (element_class,
2538 &gst_video_box_src_template);
2540 gst_type_mark_as_plugin_api (GST_TYPE_VIDEO_BOX_FILL, 0);
2544 gst_video_box_init (GstVideoBox * video_box)
2546 video_box->box_right = DEFAULT_RIGHT;
2547 video_box->box_left = DEFAULT_LEFT;
2548 video_box->box_top = DEFAULT_TOP;
2549 video_box->box_bottom = DEFAULT_BOTTOM;
2550 video_box->crop_right = 0;
2551 video_box->crop_left = 0;
2552 video_box->crop_top = 0;
2553 video_box->crop_bottom = 0;
2554 video_box->fill_type = DEFAULT_FILL_TYPE;
2555 video_box->alpha = DEFAULT_ALPHA;
2556 video_box->border_alpha = DEFAULT_BORDER_ALPHA;
2557 video_box->autocrop = FALSE;
2559 g_mutex_init (&video_box->mutex);
2563 gst_video_box_set_property (GObject * object, guint prop_id,
2564 const GValue * value, GParamSpec * pspec)
2566 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2568 g_mutex_lock (&video_box->mutex);
2571 video_box->box_left = g_value_get_int (value);
2572 if (video_box->box_left < 0) {
2573 video_box->border_left = -video_box->box_left;
2574 video_box->crop_left = 0;
2576 video_box->border_left = 0;
2577 video_box->crop_left = video_box->box_left;
2581 video_box->box_right = g_value_get_int (value);
2582 if (video_box->box_right < 0) {
2583 video_box->border_right = -video_box->box_right;
2584 video_box->crop_right = 0;
2586 video_box->border_right = 0;
2587 video_box->crop_right = video_box->box_right;
2591 video_box->box_top = g_value_get_int (value);
2592 if (video_box->box_top < 0) {
2593 video_box->border_top = -video_box->box_top;
2594 video_box->crop_top = 0;
2596 video_box->border_top = 0;
2597 video_box->crop_top = video_box->box_top;
2601 video_box->box_bottom = g_value_get_int (value);
2602 if (video_box->box_bottom < 0) {
2603 video_box->border_bottom = -video_box->box_bottom;
2604 video_box->crop_bottom = 0;
2606 video_box->border_bottom = 0;
2607 video_box->crop_bottom = video_box->box_bottom;
2610 case PROP_FILL_TYPE:
2611 video_box->fill_type = g_value_get_enum (value);
2614 video_box->alpha = g_value_get_double (value);
2616 case PROP_BORDER_ALPHA:
2617 video_box->border_alpha = g_value_get_double (value);
2620 video_box->autocrop = g_value_get_boolean (value);
2623 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2626 gst_video_box_recalc_transform (video_box);
2628 GST_DEBUG_OBJECT (video_box, "Calling reconfigure");
2629 gst_base_transform_reconfigure_src (GST_BASE_TRANSFORM_CAST (video_box));
2631 g_mutex_unlock (&video_box->mutex);
2635 gst_video_box_autocrop (GstVideoBox * video_box)
2637 gint crop_w = video_box->in_width - video_box->out_width;
2638 gint crop_h = video_box->in_height - video_box->out_height;
2640 video_box->box_left = crop_w / 2;
2641 if (video_box->box_left < 0) {
2642 video_box->border_left = -video_box->box_left;
2643 video_box->crop_left = 0;
2645 video_box->border_left = 0;
2646 video_box->crop_left = video_box->box_left;
2649 /* Round down/up for odd width differences */
2655 video_box->box_right = crop_w / 2;
2656 if (video_box->box_right < 0) {
2657 video_box->border_right = -video_box->box_right;
2658 video_box->crop_right = 0;
2660 video_box->border_right = 0;
2661 video_box->crop_right = video_box->box_right;
2664 video_box->box_top = crop_h / 2;
2665 if (video_box->box_top < 0) {
2666 video_box->border_top = -video_box->box_top;
2667 video_box->crop_top = 0;
2669 video_box->border_top = 0;
2670 video_box->crop_top = video_box->box_top;
2673 /* Round down/up for odd height differences */
2678 video_box->box_bottom = crop_h / 2;
2680 if (video_box->box_bottom < 0) {
2681 video_box->border_bottom = -video_box->box_bottom;
2682 video_box->crop_bottom = 0;
2684 video_box->border_bottom = 0;
2685 video_box->crop_bottom = video_box->box_bottom;
2690 gst_video_box_get_property (GObject * object, guint prop_id, GValue * value,
2693 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2697 g_value_set_int (value, video_box->box_left);
2700 g_value_set_int (value, video_box->box_right);
2703 g_value_set_int (value, video_box->box_top);
2706 g_value_set_int (value, video_box->box_bottom);
2708 case PROP_FILL_TYPE:
2709 g_value_set_enum (value, video_box->fill_type);
2712 g_value_set_double (value, video_box->alpha);
2714 case PROP_BORDER_ALPHA:
2715 g_value_set_double (value, video_box->border_alpha);
2718 g_value_set_boolean (value, video_box->autocrop);
2721 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2727 gst_video_box_transform_dimension (gint val, gint delta)
2729 gint64 new_val = (gint64) val + (gint64) delta;
2731 new_val = CLAMP (new_val, 1, G_MAXINT);
2733 return (gint) new_val;
2737 gst_video_box_transform_dimension_value (const GValue * src_val,
2738 gint delta, GValue * dest_val)
2740 gboolean ret = TRUE;
2742 g_value_init (dest_val, G_VALUE_TYPE (src_val));
2744 if (G_VALUE_HOLDS_INT (src_val)) {
2745 gint ival = g_value_get_int (src_val);
2747 ival = gst_video_box_transform_dimension (ival, delta);
2748 g_value_set_int (dest_val, ival);
2749 } else if (GST_VALUE_HOLDS_INT_RANGE (src_val)) {
2750 gint min = gst_value_get_int_range_min (src_val);
2751 gint max = gst_value_get_int_range_max (src_val);
2753 min = gst_video_box_transform_dimension (min, delta);
2754 max = gst_video_box_transform_dimension (max, delta);
2757 g_value_unset (dest_val);
2759 gst_value_set_int_range (dest_val, min, max);
2761 } else if (GST_VALUE_HOLDS_LIST (src_val)) {
2764 for (i = 0; i < gst_value_list_get_size (src_val); ++i) {
2765 const GValue *list_val;
2766 GValue newval = { 0, };
2768 list_val = gst_value_list_get_value (src_val, i);
2769 if (gst_video_box_transform_dimension_value (list_val, delta, &newval))
2770 gst_value_list_append_value (dest_val, &newval);
2771 g_value_unset (&newval);
2774 if (gst_value_list_get_size (dest_val) == 0) {
2775 g_value_unset (dest_val);
2779 g_value_unset (dest_val);
2787 gst_video_box_transform_caps (GstBaseTransform * trans,
2788 GstPadDirection direction, GstCaps * from, GstCaps * filter)
2790 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
2793 GstStructure *structure;
2797 to = gst_caps_new_empty ();
2798 for (i = 0; i < gst_caps_get_size (from); i++) {
2799 const GValue *fval, *lval;
2800 GValue list = { 0, };
2801 GValue val = { 0, };
2802 gboolean seen_yuv = FALSE, seen_rgb = FALSE;
2805 structure = gst_structure_copy (gst_caps_get_structure (from, i));
2807 /* Transform width/height */
2808 if (video_box->autocrop) {
2809 gst_structure_remove_field (structure, "width");
2810 gst_structure_remove_field (structure, "height");
2812 gint dw = 0, dh = 0;
2814 GValue w_val = { 0, };
2815 GValue h_val = { 0, };
2817 /* calculate width and height */
2818 if (direction == GST_PAD_SINK) {
2819 dw -= video_box->box_left;
2820 dw -= video_box->box_right;
2822 dw += video_box->box_left;
2823 dw += video_box->box_right;
2826 if (direction == GST_PAD_SINK) {
2827 dh -= video_box->box_top;
2828 dh -= video_box->box_bottom;
2830 dh += video_box->box_top;
2831 dh += video_box->box_bottom;
2834 v = gst_structure_get_value (structure, "width");
2835 if (!gst_video_box_transform_dimension_value (v, dw, &w_val)) {
2836 GST_WARNING_OBJECT (video_box,
2837 "could not transform width value with dw=%d" ", caps structure=%"
2838 GST_PTR_FORMAT, dw, structure);
2841 gst_structure_set_value (structure, "width", &w_val);
2843 v = gst_structure_get_value (structure, "height");
2844 if (!gst_video_box_transform_dimension_value (v, dh, &h_val)) {
2845 g_value_unset (&w_val);
2846 GST_WARNING_OBJECT (video_box,
2847 "could not transform height value with dh=%d" ", caps structure=%"
2848 GST_PTR_FORMAT, dh, structure);
2851 gst_structure_set_value (structure, "height", &h_val);
2852 g_value_unset (&w_val);
2853 g_value_unset (&h_val);
2856 /* Supported conversions:
2863 * AYUV->xRGB (24bpp, 32bpp, incl. alpha)
2864 * xRGB->xRGB (24bpp, 32bpp, from/to all variants, incl. alpha)
2865 * xRGB->AYUV (24bpp, 32bpp, incl. alpha)
2867 * Passthrough only for everything else.
2869 fval = gst_structure_get_value (structure, "format");
2870 if (fval && GST_VALUE_HOLDS_LIST (fval)) {
2871 for (j = 0; j < gst_value_list_get_size (fval); j++) {
2872 lval = gst_value_list_get_value (fval, j);
2873 if ((str = g_value_get_string (lval))) {
2874 if (strcmp (str, "AYUV") == 0) {
2878 } else if (strstr (str, "RGB") || strstr (str, "BGR")) {
2880 } else if (strcmp (str, "I420") == 0 || strcmp (str, "YV12") == 0) {
2885 } else if (fval && G_VALUE_HOLDS_STRING (fval)) {
2886 if ((str = g_value_get_string (fval))) {
2887 if (strcmp (str, "AYUV") == 0) {
2890 } else if (strstr (str, "RGB") || strstr (str, "BGR")) {
2892 } else if (strcmp (str, "I420") == 0 || strcmp (str, "YV12") == 0) {
2898 if (seen_yuv || seen_rgb) {
2899 g_value_init (&list, GST_TYPE_LIST);
2901 g_value_init (&val, G_TYPE_STRING);
2902 g_value_set_string (&val, "AYUV");
2903 gst_value_list_append_value (&list, &val);
2904 g_value_unset (&val);
2907 g_value_init (&val, G_TYPE_STRING);
2908 g_value_set_string (&val, "I420");
2909 gst_value_list_append_value (&list, &val);
2910 g_value_reset (&val);
2911 g_value_set_string (&val, "YV12");
2912 gst_value_list_append_value (&list, &val);
2913 g_value_unset (&val);
2916 g_value_init (&val, G_TYPE_STRING);
2917 g_value_set_string (&val, "RGBx");
2918 gst_value_list_append_value (&list, &val);
2919 g_value_reset (&val);
2920 g_value_set_string (&val, "BGRx");
2921 gst_value_list_append_value (&list, &val);
2922 g_value_reset (&val);
2923 g_value_set_string (&val, "xRGB");
2924 gst_value_list_append_value (&list, &val);
2925 g_value_reset (&val);
2926 g_value_set_string (&val, "xBGR");
2927 gst_value_list_append_value (&list, &val);
2928 g_value_reset (&val);
2929 g_value_set_string (&val, "RGBA");
2930 gst_value_list_append_value (&list, &val);
2931 g_value_reset (&val);
2932 g_value_set_string (&val, "BGRA");
2933 gst_value_list_append_value (&list, &val);
2934 g_value_reset (&val);
2935 g_value_set_string (&val, "ARGB");
2936 gst_value_list_append_value (&list, &val);
2937 g_value_reset (&val);
2938 g_value_set_string (&val, "ABGR");
2939 gst_value_list_append_value (&list, &val);
2940 g_value_reset (&val);
2941 g_value_set_string (&val, "RGB");
2942 gst_value_list_append_value (&list, &val);
2943 g_value_reset (&val);
2944 g_value_set_string (&val, "BGR");
2945 gst_value_list_append_value (&list, &val);
2946 g_value_unset (&val);
2948 gst_value_list_merge (&val, fval, &list);
2949 gst_structure_set_value (structure, "format", &val);
2950 g_value_unset (&val);
2951 g_value_unset (&list);
2953 /* format list above makes for non-fixed caps;
2954 * so basetransform and peers will be enlisted to decide these parts,
2955 * otherwise leave as-is for passthrough case */
2956 gst_structure_remove_field (structure, "colorimetry");
2957 gst_structure_remove_field (structure, "chroma-site");
2959 gst_caps_append_structure (to, structure);
2962 /* filter against set allowed caps on the pad */
2963 other = (direction == GST_PAD_SINK) ? trans->srcpad : trans->sinkpad;
2964 templ = gst_pad_get_pad_template_caps (other);
2965 ret = gst_caps_intersect (to, templ);
2966 gst_caps_unref (to);
2967 gst_caps_unref (templ);
2969 GST_DEBUG_OBJECT (video_box, "direction %d, transformed %" GST_PTR_FORMAT
2970 " to %" GST_PTR_FORMAT, direction, from, ret);
2972 if (ret && filter) {
2973 GstCaps *intersection;
2975 GST_DEBUG_OBJECT (video_box, "Using filter caps %" GST_PTR_FORMAT, filter);
2977 gst_caps_intersect_full (filter, ret, GST_CAPS_INTERSECT_FIRST);
2978 gst_caps_unref (ret);
2980 GST_DEBUG_OBJECT (video_box, "Intersection %" GST_PTR_FORMAT, ret);
2988 gst_structure_free (structure);
2989 gst_caps_unref (to);
2990 to = gst_caps_new_empty ();
2996 gst_video_box_recalc_transform (GstVideoBox * video_box)
2998 gboolean res = TRUE;
3000 /* if we have the same format in and out and we don't need to perform any
3001 * cropping at all, we can just operate in passthrough mode */
3002 if (video_box->in_format == video_box->out_format &&
3003 video_box->box_left == 0 && video_box->box_right == 0 &&
3004 video_box->box_top == 0 && video_box->box_bottom == 0 &&
3005 video_box->in_sdtv == video_box->out_sdtv) {
3007 GST_LOG_OBJECT (video_box, "we are using passthrough");
3008 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
3011 GST_LOG_OBJECT (video_box, "we are not using passthrough");
3012 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
3019 gst_video_box_select_processing_functions (GstVideoBox * video_box)
3021 switch (video_box->out_format) {
3022 case GST_VIDEO_FORMAT_AYUV:
3023 video_box->fill = fill_ayuv;
3024 switch (video_box->in_format) {
3025 case GST_VIDEO_FORMAT_AYUV:
3026 video_box->copy = copy_ayuv_ayuv;
3028 case GST_VIDEO_FORMAT_I420:
3029 case GST_VIDEO_FORMAT_YV12:
3030 video_box->copy = copy_i420_ayuv;
3032 case GST_VIDEO_FORMAT_ARGB:
3033 case GST_VIDEO_FORMAT_ABGR:
3034 case GST_VIDEO_FORMAT_RGBA:
3035 case GST_VIDEO_FORMAT_BGRA:
3036 case GST_VIDEO_FORMAT_xRGB:
3037 case GST_VIDEO_FORMAT_xBGR:
3038 case GST_VIDEO_FORMAT_RGBx:
3039 case GST_VIDEO_FORMAT_BGRx:
3040 case GST_VIDEO_FORMAT_RGB:
3041 case GST_VIDEO_FORMAT_BGR:
3042 video_box->copy = copy_rgb32_ayuv;
3048 case GST_VIDEO_FORMAT_I420:
3049 case GST_VIDEO_FORMAT_YV12:
3050 video_box->fill = fill_planar_yuv;
3051 switch (video_box->in_format) {
3052 case GST_VIDEO_FORMAT_AYUV:
3053 video_box->copy = copy_ayuv_i420;
3055 case GST_VIDEO_FORMAT_I420:
3056 case GST_VIDEO_FORMAT_YV12:
3057 video_box->copy = copy_i420_i420;
3063 case GST_VIDEO_FORMAT_ARGB:
3064 case GST_VIDEO_FORMAT_ABGR:
3065 case GST_VIDEO_FORMAT_RGBA:
3066 case GST_VIDEO_FORMAT_BGRA:
3067 case GST_VIDEO_FORMAT_xRGB:
3068 case GST_VIDEO_FORMAT_xBGR:
3069 case GST_VIDEO_FORMAT_RGBx:
3070 case GST_VIDEO_FORMAT_BGRx:
3071 case GST_VIDEO_FORMAT_RGB:
3072 case GST_VIDEO_FORMAT_BGR:
3073 video_box->fill = (video_box->out_format == GST_VIDEO_FORMAT_BGR
3074 || video_box->out_format ==
3075 GST_VIDEO_FORMAT_RGB) ? fill_rgb24 : fill_rgb32;
3076 switch (video_box->in_format) {
3077 case GST_VIDEO_FORMAT_ARGB:
3078 case GST_VIDEO_FORMAT_ABGR:
3079 case GST_VIDEO_FORMAT_RGBA:
3080 case GST_VIDEO_FORMAT_BGRA:
3081 case GST_VIDEO_FORMAT_xRGB:
3082 case GST_VIDEO_FORMAT_xBGR:
3083 case GST_VIDEO_FORMAT_RGBx:
3084 case GST_VIDEO_FORMAT_BGRx:
3085 case GST_VIDEO_FORMAT_RGB:
3086 case GST_VIDEO_FORMAT_BGR:
3087 video_box->copy = copy_rgb32;
3089 case GST_VIDEO_FORMAT_AYUV:
3090 video_box->copy = copy_ayuv_rgb32;
3095 case GST_VIDEO_FORMAT_GRAY8:
3096 case GST_VIDEO_FORMAT_GRAY16_BE:
3097 case GST_VIDEO_FORMAT_GRAY16_LE:
3098 video_box->fill = fill_gray;
3099 switch (video_box->in_format) {
3100 case GST_VIDEO_FORMAT_GRAY8:
3101 case GST_VIDEO_FORMAT_GRAY16_BE:
3102 case GST_VIDEO_FORMAT_GRAY16_LE:
3103 video_box->copy = copy_packed_simple;
3109 case GST_VIDEO_FORMAT_YUY2:
3110 case GST_VIDEO_FORMAT_YVYU:
3111 case GST_VIDEO_FORMAT_UYVY:
3112 video_box->fill = fill_yuy2;
3113 switch (video_box->in_format) {
3114 case GST_VIDEO_FORMAT_YUY2:
3115 case GST_VIDEO_FORMAT_YVYU:
3116 case GST_VIDEO_FORMAT_UYVY:
3117 video_box->copy = copy_yuy2_yuy2;
3123 case GST_VIDEO_FORMAT_Y444:
3124 case GST_VIDEO_FORMAT_Y42B:
3125 case GST_VIDEO_FORMAT_Y41B:
3126 video_box->fill = fill_planar_yuv;
3127 switch (video_box->in_format) {
3128 case GST_VIDEO_FORMAT_Y444:
3129 video_box->copy = copy_y444_y444;
3131 case GST_VIDEO_FORMAT_Y42B:
3132 video_box->copy = copy_y42b_y42b;
3134 case GST_VIDEO_FORMAT_Y41B:
3135 video_box->copy = copy_y41b_y41b;
3145 return video_box->fill != NULL && video_box->copy != NULL;
3149 gst_video_box_set_info (GstVideoFilter * vfilter, GstCaps * in,
3150 GstVideoInfo * in_info, GstCaps * out, GstVideoInfo * out_info)
3152 GstVideoBox *video_box = GST_VIDEO_BOX (vfilter);
3155 g_mutex_lock (&video_box->mutex);
3157 video_box->in_format = GST_VIDEO_INFO_FORMAT (in_info);
3158 video_box->in_width = GST_VIDEO_INFO_WIDTH (in_info);
3159 video_box->in_height = GST_VIDEO_INFO_HEIGHT (in_info);
3161 video_box->out_format = GST_VIDEO_INFO_FORMAT (out_info);
3162 video_box->out_width = GST_VIDEO_INFO_WIDTH (out_info);
3163 video_box->out_height = GST_VIDEO_INFO_HEIGHT (out_info);
3165 video_box->in_sdtv =
3166 in_info->colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT601;
3167 video_box->out_sdtv =
3168 out_info->colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT601;
3170 GST_DEBUG_OBJECT (video_box, "Input w: %d h: %d", video_box->in_width,
3171 video_box->in_height);
3172 GST_DEBUG_OBJECT (video_box, "Output w: %d h: %d", video_box->out_width,
3173 video_box->out_height);
3175 if (video_box->autocrop)
3176 gst_video_box_autocrop (video_box);
3178 /* recalc the transformation strategy */
3179 ret = gst_video_box_recalc_transform (video_box);
3182 ret = gst_video_box_select_processing_functions (video_box);
3183 g_mutex_unlock (&video_box->mutex);
3189 gst_video_box_src_event (GstBaseTransform * trans, GstEvent * event)
3191 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3192 GstNavigationEventType type;
3196 GST_OBJECT_LOCK (video_box);
3197 type = gst_navigation_event_get_type (event);
3198 if (GST_EVENT_TYPE (event) == GST_EVENT_NAVIGATION &&
3199 (video_box->box_left != 0 || video_box->box_top != 0) &&
3200 (type == GST_NAVIGATION_EVENT_MOUSE_MOVE
3201 || type == GST_NAVIGATION_EVENT_MOUSE_BUTTON_PRESS
3202 || type == GST_NAVIGATION_EVENT_MOUSE_BUTTON_RELEASE)) {
3203 if (gst_navigation_event_get_coordinates (event, &pointer_x, &pointer_y)) {
3204 gdouble new_pointer_x, new_pointer_y;
3206 event = gst_event_make_writable (event);
3207 new_pointer_x = pointer_x + video_box->box_left;
3208 new_pointer_y = pointer_y + video_box->box_top;
3210 gst_navigation_event_set_coordinates (event, new_pointer_x,
3213 GST_WARNING_OBJECT (video_box, "Failed to read navigation event");
3216 GST_OBJECT_UNLOCK (video_box);
3218 return GST_BASE_TRANSFORM_CLASS (parent_class)->src_event (trans, event);
3222 gst_video_box_process (GstVideoBox * video_box, GstVideoFrame * in,
3223 GstVideoFrame * out)
3225 guint b_alpha = CLAMP (video_box->border_alpha * 256, 0, 255);
3226 guint i_alpha = CLAMP (video_box->alpha * 256, 0, 255);
3227 GstVideoBoxFill fill_type = video_box->fill_type;
3228 gint br, bl, bt, bb, crop_w, crop_h;
3233 br = video_box->box_right;
3234 bl = video_box->box_left;
3235 bt = video_box->box_top;
3236 bb = video_box->box_bottom;
3238 if (br >= 0 && bl >= 0) {
3239 crop_w = video_box->in_width - (br + bl);
3240 } else if (br >= 0 && bl < 0) {
3241 crop_w = video_box->in_width - (br);
3242 } else if (br < 0 && bl >= 0) {
3243 crop_w = video_box->in_width - (bl);
3244 } else if (br < 0 && bl < 0) {
3245 crop_w = video_box->in_width;
3248 if (bb >= 0 && bt >= 0) {
3249 crop_h = video_box->in_height - (bb + bt);
3250 } else if (bb >= 0 && bt < 0) {
3251 crop_h = video_box->in_height - (bb);
3252 } else if (bb < 0 && bt >= 0) {
3253 crop_h = video_box->in_height - (bt);
3254 } else if (bb < 0 && bt < 0) {
3255 crop_h = video_box->in_height;
3258 GST_DEBUG_OBJECT (video_box, "Borders are: L:%d, R:%d, T:%d, B:%d", bl, br,
3260 GST_DEBUG_OBJECT (video_box, "Alpha value is: %u (frame) %u (border)",
3263 if (crop_h < 0 || crop_w < 0) {
3264 video_box->fill (fill_type, b_alpha, out, video_box->out_sdtv);
3265 } else if (bb == 0 && bt == 0 && br == 0 && bl == 0) {
3266 video_box->copy (i_alpha, out, video_box->out_sdtv, 0, 0, in,
3267 video_box->in_sdtv, 0, 0, crop_w, crop_h);
3269 gint src_x = 0, src_y = 0;
3270 gint dest_x = 0, dest_y = 0;
3272 /* Fill everything if a border should be added somewhere */
3273 if (bt < 0 || bb < 0 || br < 0 || bl < 0)
3274 video_box->fill (fill_type, b_alpha, out, video_box->out_sdtv);
3291 video_box->copy (i_alpha, out, video_box->out_sdtv, dest_x, dest_y,
3292 in, video_box->in_sdtv, src_x, src_y, crop_w, crop_h);
3295 GST_LOG_OBJECT (video_box, "image created");
3299 gst_video_box_before_transform (GstBaseTransform * trans, GstBuffer * in)
3301 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3302 GstClockTime timestamp, stream_time;
3304 timestamp = GST_BUFFER_TIMESTAMP (in);
3306 gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME, timestamp);
3308 GST_DEBUG_OBJECT (video_box, "sync to %" GST_TIME_FORMAT,
3309 GST_TIME_ARGS (timestamp));
3311 if (GST_CLOCK_TIME_IS_VALID (stream_time))
3312 gst_object_sync_values (GST_OBJECT (video_box), stream_time);
3315 static GstFlowReturn
3316 gst_video_box_transform_frame (GstVideoFilter * vfilter,
3317 GstVideoFrame * in_frame, GstVideoFrame * out_frame)
3319 GstVideoBox *video_box = GST_VIDEO_BOX (vfilter);
3321 g_mutex_lock (&video_box->mutex);
3322 gst_video_box_process (video_box, in_frame, out_frame);
3323 g_mutex_unlock (&video_box->mutex);
3327 /* FIXME: 0.11 merge with videocrop plugin */
3329 plugin_init (GstPlugin * plugin)
3331 return GST_ELEMENT_REGISTER (videobox, plugin);
3334 GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
3337 "resizes a video by adding borders or cropping",
3338 plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)