2 * Copyright (C) 1999 Erik Walthinsen <omega@cse.ogi.edu>
3 * Copyright (C) 2006 Tim-Philipp Müller <tim centricular net>
4 * Copyright (C) 2010 Sebastian Dröge <sebastian.droege@collabora.co.uk>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 02111-1307, USA.
22 * SECTION:element-videobox
23 * @see_also: #GstVideoCrop
25 * This plugin crops or enlarges the image. It takes 4 values as input, a
26 * top, bottom, left and right offset. Positive values will crop that much
27 * pixels from the respective border of the image, negative values will add
28 * that much pixels. When pixels are added, you can specify their color.
29 * Some predefined colors are usable with an enum property.
31 * The plugin is alpha channel aware and will try to negotiate with a format
32 * that supports alpha channels first. When alpha channel is active two
33 * other properties, alpha and border_alpha can be used to set the alpha
34 * values of the inner picture and the border respectively. an alpha value of
35 * 0.0 means total transparency, 1.0 is opaque.
37 * The videobox plugin has many uses such as doing a mosaic of pictures,
38 * letterboxing video, cutting out pieces of video, picture in picture, etc..
40 * Setting autocrop to true changes the behavior of the plugin so that
41 * caps determine crop properties rather than the other way around: given
42 * input and output dimensions, the crop values are selected so that the
43 * smaller frame is effectively centered in the larger frame. This
44 * involves either cropping or padding.
46 * If you use autocrop there is little point in setting the other
47 * properties manually because they will be overriden if the caps change,
48 * but nothing stops you from doing so.
52 * gst-launch videotestsrc ! videobox autocrop=true ! \
53 * "video/x-raw-yuv, width=600, height=400" ! ffmpegcolorspace ! ximagesink
61 #include "gstvideobox.h"
62 #include "gstvideoboxorc.h"
67 GST_DEBUG_CATEGORY_STATIC (videobox_debug);
68 #define GST_CAT_DEFAULT videobox_debug
70 /* From videotestsrc.c */
71 static const guint8 yuv_sdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
72 { 16, 145, 41, 81, 210, 235 };
73 static const guint8 yuv_sdtv_colors_U[VIDEO_BOX_FILL_LAST] =
74 { 128, 54, 240, 90, 16, 128 };
75 static const guint8 yuv_sdtv_colors_V[VIDEO_BOX_FILL_LAST] =
76 { 128, 34, 110, 240, 146, 128 };
78 static const guint8 yuv_hdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
79 { 16, 173, 32, 63, 219, 235 };
80 static const guint8 yuv_hdtv_colors_U[VIDEO_BOX_FILL_LAST] =
81 { 128, 42, 240, 102, 16, 128 };
82 static const guint8 yuv_hdtv_colors_V[VIDEO_BOX_FILL_LAST] =
83 { 128, 26, 118, 240, 138, 128 };
85 static const guint8 rgb_colors_R[VIDEO_BOX_FILL_LAST] =
86 { 0, 0, 0, 255, 255, 255 };
87 static const guint8 rgb_colors_G[VIDEO_BOX_FILL_LAST] =
88 { 0, 255, 0, 0, 255, 255 };
89 static const guint8 rgb_colors_B[VIDEO_BOX_FILL_LAST] =
90 { 0, 0, 255, 0, 0, 255 };
92 /* Generated by -bad/ext/cog/generate_tables */
93 static const int cog_ycbcr_to_rgb_matrix_8bit_hdtv[] = {
95 298, -55, -136, 19681,
99 static const int cog_ycbcr_to_rgb_matrix_8bit_sdtv[] = {
101 298, -100, -208, 34707,
105 static const gint cog_rgb_to_ycbcr_matrix_8bit_hdtv[] = {
107 -26, -87, 112, 32768,
108 112, -102, -10, 32768,
111 static const gint cog_rgb_to_ycbcr_matrix_8bit_sdtv[] = {
113 -38, -74, 112, 32768,
114 112, -94, -18, 32768,
117 static const gint cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit[] = {
118 256, -30, -53, 10600,
123 static const gint cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit[] = {
129 static const gint cog_identity_matrix_8bit[] = {
135 #define APPLY_MATRIX(m,o,v1,v2,v3) ((m[o*4] * v1 + m[o*4+1] * v2 + m[o*4+2] * v3 + m[o*4+3]) >> 8)
138 fill_ayuv (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
139 guint8 * dest, gboolean sdtv, gint width, gint height)
143 b_alpha = CLAMP (b_alpha, 0, 255);
146 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
147 (yuv_sdtv_colors_Y[fill_type] << 16) |
148 (yuv_sdtv_colors_U[fill_type] << 8) | yuv_sdtv_colors_V[fill_type]);
150 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
151 (yuv_hdtv_colors_Y[fill_type] << 16) |
152 (yuv_hdtv_colors_U[fill_type] << 8) | yuv_hdtv_colors_V[fill_type]);
154 orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
158 copy_ayuv_ayuv (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
159 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
160 gint dest_y, GstVideoFormat src_format, const guint8 * src,
161 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
165 gint src_stride = 4 * src_width;
166 gint dest_stride = 4 * dest_width;
168 dest = dest + dest_y * dest_width * 4 + dest_x * 4;
169 src = src + src_y * src_width * 4 + src_x * 4;
173 if (dest_sdtv != src_sdtv) {
178 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
179 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
181 for (i = 0; i < h; i++) {
182 for (j = 0; j < w; j += 4) {
184 dest[j] = (src[j] * i_alpha) >> 8;
188 dest[j + 1] = APPLY_MATRIX (matrix, 0, y, u, v);
189 dest[j + 2] = APPLY_MATRIX (matrix, 1, y, u, v);
190 dest[j + 3] = APPLY_MATRIX (matrix, 2, y, u, v);
196 for (i = 0; i < h; i++) {
197 for (j = 0; j < w; j += 4) {
199 dest[j] = (src[j] * i_alpha) >> 8;
200 dest[j + 1] = src[j + 1];
201 dest[j + 2] = src[j + 2];
202 dest[j + 3] = src[j + 3];
211 copy_ayuv_i420 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
212 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
213 gint dest_y, GstVideoFormat src_format, const guint8 * src,
214 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
218 guint8 *destY, *destY2, *destU, *destV;
219 gint dest_strideY, dest_strideUV;
228 dest_strideY = gst_video_format_get_row_stride (dest_format, 0, dest_width);
229 dest_strideUV = gst_video_format_get_row_stride (dest_format, 1, dest_width);
231 src_stride = 4 * src_width;
234 dest + gst_video_format_get_component_offset (dest_format, 0,
235 dest_width, dest_height);
237 dest + gst_video_format_get_component_offset (dest_format, 1,
238 dest_width, dest_height);
240 dest + gst_video_format_get_component_offset (dest_format, 2,
241 dest_width, dest_height);
243 destY = destY + dest_y * dest_strideY + dest_x;
244 destY2 = (dest_y < dest_height) ? destY + dest_strideY : destY;
245 destU = destU + (dest_y / 2) * dest_strideUV + dest_x / 2;
246 destV = destV + (dest_y / 2) * dest_strideUV + dest_x / 2;
248 src = src + src_y * src_stride + src_x * 4;
249 src2 = (src_y < src_height) ? src + src_stride : src;
254 if (src_sdtv != dest_sdtv)
256 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
257 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
259 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
261 /* 1. Handle the first destination scanline specially if it
262 * doesn't start at the macro pixel boundary, i.e. blend
263 * with the background! */
264 if (dest_y % 2 == 1) {
265 /* 1.1. Handle the first destination pixel if it doesn't
266 * start at the macro pixel boundary, i.e. blend with
268 if (dest_x % 2 == 1) {
273 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
275 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
278 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
288 /* 1.2. Copy all macro pixels from the source to the destination
289 * but blend with the background because we're only filling
290 * the lower part of the macro pixels. */
291 for (; j < w - 1; j += 2) {
292 y1 = src[4 * y_idx + 1];
293 y2 = src[4 * y_idx + 4 + 1];
295 u1 = src[4 * y_idx + 2];
296 u2 = src[4 * y_idx + 4 + 2];
298 v1 = src[4 * y_idx + 3];
299 v2 = src[4 * y_idx + 4 + 3];
301 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
302 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
303 destU[uv_idx] = CLAMP (
304 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
305 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
306 destV[uv_idx] = CLAMP (
307 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
308 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
314 /* 1.3. Now copy the last pixel if one exists and blend it
315 * with the background because we only fill part of
316 * the macro pixel. In case this is the last pixel of
317 * the destination we will a larger part. */
318 if (j == w - 1 && j == dest_width - 1) {
319 y1 = src[4 * y_idx + 1];
320 u1 = src[4 * y_idx + 2];
321 v1 = src[4 * y_idx + 3];
323 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
324 destU[uv_idx] = CLAMP (
325 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
326 destV[uv_idx] = CLAMP (
327 (destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
328 } else if (j == w - 1) {
329 y1 = src[4 * y_idx + 1];
330 u1 = src[4 * y_idx + 2];
331 v1 = src[4 * y_idx + 3];
333 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
334 destU[uv_idx] = CLAMP (
335 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
338 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4,
342 destY += dest_strideY;
343 destY2 += dest_strideY;
344 destU += dest_strideUV;
345 destV += dest_strideUV;
353 /* 2. Copy all macro pixel scanlines, the destination scanline
354 * now starts at macro pixel boundary. */
355 for (; i < h - 1; i += 2) {
356 /* 2.1. Handle the first destination pixel if it doesn't
357 * start at the macro pixel boundary, i.e. blend with
359 if (dest_x % 2 == 1) {
361 y2 = src2[4 * 0 + 1];
363 u2 = src2[4 * 0 + 2];
365 v2 = src2[4 * 0 + 3];
367 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
368 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
370 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
371 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
373 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
374 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
382 /* 2.2. Copy all macro pixels from the source to the destination.
383 * All pixels now start at macro pixel boundary, i.e. no
384 * blending with the background is necessary. */
385 for (; j < w - 1; j += 2) {
386 y1 = src[4 * y_idx + 1];
387 y2 = src[4 * y_idx + 4 + 1];
388 y3 = src2[4 * y_idx + 1];
389 y4 = src2[4 * y_idx + 4 + 1];
391 u1 = src[4 * y_idx + 2];
392 u2 = src[4 * y_idx + 4 + 2];
393 u3 = src2[4 * y_idx + 2];
394 u4 = src2[4 * y_idx + 4 + 2];
396 v1 = src[4 * y_idx + 3];
397 v2 = src[4 * y_idx + 4 + 3];
398 v3 = src2[4 * y_idx + 3];
399 v4 = src2[4 * y_idx + 4 + 3];
401 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
402 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
403 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
404 destY2[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
406 destU[uv_idx] = CLAMP (
407 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
408 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
409 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
410 destV[uv_idx] = CLAMP (
411 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
412 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
413 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
419 /* 2.3. Now copy the last pixel if one exists and blend it
420 * with the background because we only fill part of
421 * the macro pixel. In case this is the last pixel of
422 * the destination we will a larger part. */
423 if (j == w - 1 && j == dest_width - 1) {
424 y1 = src[4 * y_idx + 1];
425 y2 = src2[4 * y_idx + 1];
427 u1 = src[4 * y_idx + 2];
428 u2 = src2[4 * y_idx + 2];
430 v1 = src[4 * y_idx + 3];
431 v2 = src2[4 * y_idx + 3];
433 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
434 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
435 destU[uv_idx] = CLAMP (
436 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
437 u2, v2)) / 2, 0, 255);
438 destV[uv_idx] = CLAMP (
439 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
440 u2, v2)) / 2, 0, 255);
441 } else if (j == w - 1) {
442 y1 = src[4 * y_idx + 1];
443 y2 = src2[4 * y_idx + 1];
445 u1 = src[4 * y_idx + 2];
446 u2 = src2[4 * y_idx + 2];
448 v1 = src[4 * y_idx + 3];
449 v2 = src2[4 * y_idx + 3];
451 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
452 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
453 destU[uv_idx] = CLAMP (
454 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
455 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
456 destV[uv_idx] = CLAMP (
457 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
458 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
461 destY += 2 * dest_strideY;
462 destY2 += 2 * dest_strideY;
463 destU += dest_strideUV;
464 destV += dest_strideUV;
465 src += 2 * src_stride;
466 src2 += 2 * src_stride;
469 /* 3. Handle the last scanline if one exists. This again
470 * doesn't start at macro pixel boundary but should
471 * only fill the upper part of the macro pixels. */
472 if (i == h - 1 && i == dest_height - 1) {
473 /* 3.1. Handle the first destination pixel if it doesn't
474 * start at the macro pixel boundary, i.e. blend with
476 if (dest_x % 2 == 1) {
481 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
483 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
485 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
494 /* 3.2. Copy all macro pixels from the source to the destination
495 * but blend with the background because we're only filling
496 * the upper part of the macro pixels. */
497 for (; j < w - 1; j += 2) {
498 y1 = src[4 * y_idx + 1];
499 y2 = src[4 * y_idx + 4 + 1];
501 u1 = src[4 * y_idx + 2];
502 u2 = src[4 * y_idx + 4 + 2];
504 v1 = src[4 * y_idx + 3];
505 v2 = src[4 * y_idx + 4 + 3];
507 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
508 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
510 destU[uv_idx] = CLAMP (
511 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
512 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
513 destV[uv_idx] = CLAMP (
514 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
515 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
521 /* 3.3. Now copy the last pixel if one exists and blend it
522 * with the background because we only fill part of
523 * the macro pixel. In case this is the last pixel of
524 * the destination we will a larger part. */
525 if (j == w - 1 && j == dest_width - 1) {
526 y1 = src[4 * y_idx + 1];
527 u1 = src[4 * y_idx + 2];
528 v1 = src[4 * y_idx + 3];
530 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
531 destU[uv_idx] = CLAMP (
532 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
533 destV[uv_idx] = CLAMP (
534 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
535 } else if (j == w - 1) {
536 y1 = src[4 * y_idx + 1];
537 u1 = src[4 * y_idx + 2];
538 v1 = src[4 * y_idx + 3];
540 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
541 destU[uv_idx] = CLAMP (
542 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
545 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
548 } else if (i == h - 1) {
549 /* 3.1. Handle the first destination pixel if it doesn't
550 * start at the macro pixel boundary, i.e. blend with
552 if (dest_x % 2 == 1) {
557 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
559 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
562 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
572 /* 3.2. Copy all macro pixels from the source to the destination
573 * but blend with the background because we're only filling
574 * the upper part of the macro pixels. */
575 for (; j < w - 1; j += 2) {
576 y1 = src[4 * y_idx + 1];
577 y2 = src[4 * y_idx + 4 + 1];
579 u1 = src[4 * y_idx + 2];
580 u2 = src[4 * y_idx + 4 + 2];
582 v1 = src[4 * y_idx + 3];
583 v2 = src[4 * y_idx + 4 + 3];
585 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
586 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
588 destU[uv_idx] = CLAMP (
589 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
590 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
591 destV[uv_idx] = CLAMP (
592 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
593 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
599 /* 3.3. Now copy the last pixel if one exists and blend it
600 * with the background because we only fill part of
601 * the macro pixel. In case this is the last pixel of
602 * the destination we will a larger part. */
603 if (j == w - 1 && j == dest_width - 1) {
604 y1 = src[4 * y_idx + 1];
605 u1 = src[4 * y_idx + 2];
606 v1 = src[4 * y_idx + 3];
608 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
609 destU[uv_idx] = CLAMP (
610 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
611 destV[uv_idx] = CLAMP (
612 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
613 } else if (j == w - 1) {
614 y1 = src[4 * y_idx + 1];
615 u1 = src[4 * y_idx + 2];
616 v1 = src[4 * y_idx + 3];
618 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
619 destU[uv_idx] = CLAMP (
620 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
623 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
630 fill_planar_yuv (GstVideoBoxFill fill_type, guint b_alpha,
631 GstVideoFormat format, guint8 * dest, gboolean sdtv, gint width,
634 guint8 empty_pixel[3];
635 guint8 *destY, *destU, *destV;
636 gint strideY, strideUV;
637 gint heightY, heightUV;
640 empty_pixel[0] = yuv_sdtv_colors_Y[fill_type];
641 empty_pixel[1] = yuv_sdtv_colors_U[fill_type];
642 empty_pixel[2] = yuv_sdtv_colors_V[fill_type];
644 empty_pixel[0] = yuv_hdtv_colors_Y[fill_type];
645 empty_pixel[1] = yuv_hdtv_colors_U[fill_type];
646 empty_pixel[2] = yuv_hdtv_colors_V[fill_type];
649 strideY = gst_video_format_get_row_stride (format, 0, width);
650 strideUV = gst_video_format_get_row_stride (format, 1, width);
653 dest + gst_video_format_get_component_offset (format, 0, width, height);
655 dest + gst_video_format_get_component_offset (format, 1, width, height);
657 dest + gst_video_format_get_component_offset (format, 2, width, height);
659 heightY = gst_video_format_get_component_height (format, 0, height);
660 heightUV = gst_video_format_get_component_height (format, 1, height);
662 memset (destY, empty_pixel[0], strideY * heightY);
663 memset (destU, empty_pixel[1], strideUV * heightUV);
664 memset (destV, empty_pixel[2], strideUV * heightUV);
668 copy_y444_y444 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
669 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
670 gint dest_y, GstVideoFormat src_format, const guint8 * src,
671 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
675 guint8 *destY, *destU, *destV;
676 const guint8 *srcY, *srcU, *srcV;
680 dest_stride = gst_video_format_get_row_stride (dest_format, 0, dest_width);
681 src_stride = gst_video_format_get_row_stride (src_format, 0, src_width);
684 dest + gst_video_format_get_component_offset (dest_format, 0,
685 dest_width, dest_height);
687 dest + gst_video_format_get_component_offset (dest_format, 1,
688 dest_width, dest_height);
690 dest + gst_video_format_get_component_offset (dest_format, 2,
691 dest_width, dest_height);
694 src + gst_video_format_get_component_offset (src_format, 0,
695 src_width, src_height);
697 src + gst_video_format_get_component_offset (src_format, 1,
698 src_width, src_height);
700 src + gst_video_format_get_component_offset (src_format, 2,
701 src_width, src_height);
703 destY = destY + dest_y * dest_stride + dest_x;
704 destU = destU + dest_y * dest_stride + dest_x;
705 destV = destV + dest_y * dest_stride + dest_x;
707 srcY = srcY + src_y * src_stride + src_x;
708 srcU = srcU + src_y * src_stride + src_x;
709 srcV = srcV + src_y * src_stride + src_x;
711 if (src_sdtv != dest_sdtv) {
716 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
717 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
719 for (i = 0; i < h; i++) {
720 for (j = 0; j < w; j++) {
721 y = APPLY_MATRIX (matrix, 0, srcY[j], srcU[j], srcV[j]);
722 u = APPLY_MATRIX (matrix, 1, srcY[j], srcU[j], srcV[j]);
723 v = APPLY_MATRIX (matrix, 2, srcY[j], srcU[j], srcV[j]);
729 destY += dest_stride;
730 destU += dest_stride;
731 destV += dest_stride;
738 for (i = 0; i < h; i++) {
739 memcpy (destY, srcY, w);
740 memcpy (destU, srcU, w);
741 memcpy (destV, srcV, w);
743 destY += dest_stride;
744 destU += dest_stride;
745 destV += dest_stride;
755 copy_y42b_y42b (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
756 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
757 gint dest_y, GstVideoFormat src_format, const guint8 * src,
758 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
762 guint8 *destY, *destU, *destV;
763 const guint8 *srcY, *srcU, *srcV;
764 gint dest_strideY, dest_strideUV;
765 gint src_strideY, src_strideUV;
766 gint src_y_idx, src_uv_idx;
767 gint dest_y_idx, dest_uv_idx;
773 dest_strideY = gst_video_format_get_row_stride (dest_format, 0, dest_width);
774 dest_strideUV = gst_video_format_get_row_stride (dest_format, 1, dest_width);
775 src_strideY = gst_video_format_get_row_stride (src_format, 0, src_width);
776 src_strideUV = gst_video_format_get_row_stride (src_format, 1, src_width);
779 dest + gst_video_format_get_component_offset (dest_format, 0,
780 dest_width, dest_height);
782 dest + gst_video_format_get_component_offset (dest_format, 1,
783 dest_width, dest_height);
785 dest + gst_video_format_get_component_offset (dest_format, 2,
786 dest_width, dest_height);
789 src + gst_video_format_get_component_offset (src_format, 0,
790 src_width, src_height);
792 src + gst_video_format_get_component_offset (src_format, 1,
793 src_width, src_height);
795 src + gst_video_format_get_component_offset (src_format, 2,
796 src_width, src_height);
799 destY = destY + dest_y * dest_strideY + dest_x;
800 destU = destU + dest_y * dest_strideUV + dest_x / 2;
801 destV = destV + dest_y * dest_strideUV + dest_x / 2;
803 srcY = srcY + src_y * src_strideY + src_x;
804 srcU = srcU + src_y * src_strideUV + src_x / 2;
805 srcV = srcV + src_y * src_strideUV + src_x / 2;
810 if (src_sdtv != dest_sdtv)
812 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
813 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
815 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
817 /* 1. Copy all macro pixel scanlines, the destination scanline
818 * now starts at macro pixel boundary. */
819 for (i = dest_y; i < h; i++) {
820 /* 1.1. Handle the first destination pixel if it doesn't
821 * start at the macro pixel boundary, i.e. blend with
823 if (dest_x % 2 == 1) {
828 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
830 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
832 (destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
834 src_y_idx = dest_y_idx = dest_uv_idx = 1;
835 src_uv_idx = (src_x % 2) + 1;
838 src_y_idx = dest_y_idx = dest_uv_idx = 0;
839 src_uv_idx = (src_x % 2);
842 /* 1.2. Copy all macro pixels from the source to the destination.
843 * All pixels now start at macro pixel boundary, i.e. no
844 * blending with the background is necessary. */
845 for (; j < w - 1; j += 2) {
846 y1 = srcY[src_y_idx];
847 y2 = srcY[src_y_idx + 1];
849 u1 = srcU[src_uv_idx / 2];
850 v1 = srcV[src_uv_idx / 2];
852 u2 = srcU[src_uv_idx / 2];
853 v2 = srcV[src_uv_idx / 2];
856 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
857 destY[dest_y_idx + 1] =
858 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
860 destU[dest_uv_idx] = CLAMP (
861 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
862 u2, v2)) / 2, 0, 255);
863 destV[dest_uv_idx] = CLAMP (
864 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
865 u2, v2)) / 2, 0, 255);
872 /* 1.3. Now copy the last pixel if one exists and blend it
873 * with the background because we only fill part of
874 * the macro pixel. In case this is the last pixel of
875 * the destination we will a larger part. */
876 if (j == w - 1 && j == dest_width - 1) {
877 y1 = srcY[src_y_idx];
878 u1 = srcU[src_uv_idx / 2];
879 v1 = srcV[src_uv_idx / 2];
881 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
882 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
883 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
884 } else if (j == w - 1) {
885 y1 = srcY[src_y_idx];
886 u1 = srcU[src_uv_idx / 2];
887 v1 = srcV[src_uv_idx / 2];
889 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
890 destU[dest_uv_idx] = CLAMP (
891 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
893 destV[dest_uv_idx] = CLAMP (
894 (destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
898 destY += dest_strideY;
899 destU += dest_strideUV;
900 destV += dest_strideUV;
903 srcU += src_strideUV;
904 srcV += src_strideUV;
909 copy_y41b_y41b (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
910 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
911 gint dest_y, GstVideoFormat src_format, const guint8 * src,
912 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
916 guint8 *destY, *destU, *destV;
917 const guint8 *srcY, *srcU, *srcV;
918 gint dest_strideY, dest_strideUV;
919 gint src_strideY, src_strideUV;
920 gint src_y_idx, src_uv_idx;
921 gint dest_y_idx, dest_uv_idx;
927 dest_strideY = gst_video_format_get_row_stride (dest_format, 0, dest_width);
928 dest_strideUV = gst_video_format_get_row_stride (dest_format, 1, dest_width);
929 src_strideY = gst_video_format_get_row_stride (src_format, 0, src_width);
930 src_strideUV = gst_video_format_get_row_stride (src_format, 1, src_width);
933 dest + gst_video_format_get_component_offset (dest_format, 0,
934 dest_width, dest_height);
936 dest + gst_video_format_get_component_offset (dest_format, 1,
937 dest_width, dest_height);
939 dest + gst_video_format_get_component_offset (dest_format, 2,
940 dest_width, dest_height);
943 src + gst_video_format_get_component_offset (src_format, 0,
944 src_width, src_height);
946 src + gst_video_format_get_component_offset (src_format, 1,
947 src_width, src_height);
949 src + gst_video_format_get_component_offset (src_format, 2,
950 src_width, src_height);
953 destY = destY + dest_y * dest_strideY + dest_x;
954 destU = destU + dest_y * dest_strideUV + dest_x / 4;
955 destV = destV + dest_y * dest_strideUV + dest_x / 4;
957 srcY = srcY + src_y * src_strideY + src_x;
958 srcU = srcU + src_y * src_strideUV + src_x / 4;
959 srcV = srcV + src_y * src_strideUV + src_x / 4;
964 if (src_sdtv != dest_sdtv)
966 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
967 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
969 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
971 /* 1. Copy all macro pixel scanlines, the destination scanline
972 * now starts at macro pixel boundary. */
973 for (i = dest_y; i < h; i++) {
974 /* 1.1. Handle the first destination pixel if it doesn't
975 * start at the macro pixel boundary, i.e. blend with
977 if (dest_x % 4 == 1) {
984 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
985 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
986 destY[2] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
989 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
990 v1) + APPLY_MATRIX (matrix, 1, y2, u1,
991 v1) + APPLY_MATRIX (matrix, 1, y3, u1, v1)) / 4, 0, 255);
993 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
994 v1) + APPLY_MATRIX (matrix, 2, y2, u1,
995 v1) + APPLY_MATRIX (matrix, 2, y3, u1, v1)) / 4, 0, 255);
998 src_y_idx = dest_y_idx = 3;
1000 src_uv_idx = (src_x % 4) + 3;
1001 } else if (dest_x % 4 == 2) {
1007 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1008 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1011 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1012 v1) + APPLY_MATRIX (matrix, 1, y2, u1, v1)) / 4, 0, 255);
1014 CLAMP ((2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1015 v1) + APPLY_MATRIX (matrix, 2, y2, u1, v1)) / 4, 0, 255);
1018 src_y_idx = dest_y_idx = 2;
1020 src_uv_idx = (src_x % 4) + 2;
1021 } else if (dest_x % 4 == 3) {
1026 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1029 (3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0, 255);
1031 (3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0, 255);
1034 src_y_idx = dest_y_idx = 1;
1036 src_uv_idx = (src_x % 4) + 1;
1039 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1040 src_uv_idx = (src_x % 4);
1043 /* 1.2. Copy all macro pixels from the source to the destination.
1044 * All pixels now start at macro pixel boundary, i.e. no
1045 * blending with the background is necessary. */
1046 for (; j < w - 3; j += 4) {
1047 y1 = srcY[src_y_idx];
1048 y2 = srcY[src_y_idx + 1];
1049 y3 = srcY[src_y_idx + 2];
1050 y4 = srcY[src_y_idx + 3];
1052 u1 = srcU[src_uv_idx / 4];
1053 v1 = srcV[src_uv_idx / 4];
1055 u2 = srcU[src_uv_idx / 4];
1056 v2 = srcV[src_uv_idx / 4];
1058 u3 = srcU[src_uv_idx / 4];
1059 v3 = srcV[src_uv_idx / 4];
1061 u4 = srcU[src_uv_idx / 4];
1062 v4 = srcV[src_uv_idx / 4];
1065 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1066 destY[dest_y_idx + 1] =
1067 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1068 destY[dest_y_idx + 2] =
1069 CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1070 destY[dest_y_idx + 3] =
1071 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1073 destU[dest_uv_idx] = CLAMP (
1074 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1075 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1076 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1077 destV[dest_uv_idx] =
1078 CLAMP ((APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix,
1079 2, y2, u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1080 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1087 /* 1.3. Now copy the last pixel if one exists and blend it
1088 * with the background because we only fill part of
1089 * the macro pixel. In case this is the last pixel of
1090 * the destination we will a larger part. */
1091 if (j == w - 1 && j == dest_width - 1) {
1092 y1 = srcY[src_y_idx];
1093 u1 = srcU[src_uv_idx / 4];
1094 v1 = srcV[src_uv_idx / 4];
1096 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1097 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1098 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1099 } else if (j == w - 1) {
1100 y1 = srcY[src_y_idx];
1101 u1 = srcU[src_uv_idx / 4];
1102 v1 = srcV[src_uv_idx / 4];
1104 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1105 destU[dest_uv_idx] = CLAMP (
1106 (destU[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1108 destV[dest_uv_idx] = CLAMP (
1109 (destV[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1111 } else if (j == w - 2 && j == dest_width - 2) {
1112 y1 = srcY[src_y_idx];
1113 y2 = srcY[src_y_idx + 1];
1114 u1 = srcU[src_uv_idx / 4];
1115 v1 = srcV[src_uv_idx / 4];
1117 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1118 destY[dest_y_idx + 1] =
1119 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1120 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1121 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1122 } else if (j == w - 2) {
1123 y1 = srcY[src_y_idx];
1124 y2 = srcY[src_y_idx + 1];
1125 u1 = srcU[src_uv_idx / 4];
1126 v1 = srcV[src_uv_idx / 4];
1128 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1129 destY[dest_y_idx + 1] =
1130 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1131 destU[dest_uv_idx] =
1132 CLAMP ((destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1134 destV[dest_uv_idx] =
1135 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1137 } else if (j == w - 3 && j == dest_width - 3) {
1138 y1 = srcY[src_y_idx];
1139 y2 = srcY[src_y_idx + 1];
1140 y3 = srcY[src_y_idx + 2];
1141 u1 = srcU[src_uv_idx / 4];
1142 v1 = srcV[src_uv_idx / 4];
1144 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1145 destY[dest_y_idx + 1] =
1146 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1147 destY[dest_y_idx + 2] =
1148 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1149 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1150 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1151 } else if (j == w - 3) {
1152 y1 = srcY[src_y_idx];
1153 y2 = srcY[src_y_idx + 1];
1154 y3 = srcY[src_y_idx + 2];
1155 u1 = srcU[src_uv_idx / 4];
1156 v1 = srcV[src_uv_idx / 4];
1158 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1159 destY[dest_y_idx + 1] =
1160 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1161 destY[dest_y_idx + 2] =
1162 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1163 destU[dest_uv_idx] =
1164 CLAMP ((3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1166 destV[dest_uv_idx] =
1167 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1171 destY += dest_strideY;
1172 destU += dest_strideUV;
1173 destV += dest_strideUV;
1174 srcY += src_strideY;
1175 srcU += src_strideUV;
1176 srcV += src_strideUV;
1181 copy_i420_i420 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
1182 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
1183 gint dest_y, GstVideoFormat src_format, const guint8 * src,
1184 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
1188 guint8 *destY, *destU, *destV;
1189 const guint8 *srcY, *srcU, *srcV;
1191 const guint8 *srcY2, *srcU2, *srcV2;
1192 gint dest_strideY, dest_strideUV;
1193 gint src_strideY, src_strideUV;
1194 gint src_y_idx, src_uv_idx;
1195 gint dest_y_idx, dest_uv_idx;
1197 gint y1, y2, y3, y4;
1198 gint u1, u2, u3, u4;
1199 gint v1, v2, v3, v4;
1202 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 0, dest_width);
1204 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 1, dest_width);
1206 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 0, src_width);
1208 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 1, src_width);
1211 dest + gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, 0,
1212 dest_width, dest_height);
1214 dest + gst_video_format_get_component_offset (dest_format, 1,
1215 dest_width, dest_height);
1217 dest + gst_video_format_get_component_offset (dest_format, 2,
1218 dest_width, dest_height);
1221 src + gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, 0,
1222 src_width, src_height);
1224 src + gst_video_format_get_component_offset (src_format, 1,
1225 src_width, src_height);
1227 src + gst_video_format_get_component_offset (src_format, 2,
1228 src_width, src_height);
1231 destY = destY + dest_y * dest_strideY + dest_x;
1232 destU = destU + (dest_y / 2) * dest_strideUV + dest_x / 2;
1233 destV = destV + (dest_y / 2) * dest_strideUV + dest_x / 2;
1235 srcY = srcY + src_y * src_strideY + src_x;
1236 srcU = srcU + (src_y / 2) * src_strideUV + src_x / 2;
1237 srcV = srcV + (src_y / 2) * src_strideUV + src_x / 2;
1239 destY2 = destY + dest_strideY;
1240 srcY2 = srcY + src_strideY;
1245 if (src_sdtv != dest_sdtv)
1247 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1248 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1250 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
1252 /* 1. Handle the first destination scanline specially if it
1253 * doesn't start at the macro pixel boundary, i.e. blend
1254 * with the background! */
1255 if (dest_y % 2 == 1) {
1256 /* 1.1. Handle the first destination pixel if it doesn't
1257 * start at the macro pixel boundary, i.e. blend with
1258 * the background! */
1259 if (dest_x % 2 == 1) {
1264 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1266 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1269 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1273 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1274 src_uv_idx = (src_x % 2) + 1;
1277 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1278 src_uv_idx = (src_x % 2);
1281 /* 1.2. Copy all macro pixels from the source to the destination
1282 * but blend with the background because we're only filling
1283 * the lower part of the macro pixels. */
1284 for (; j < w - 1; j += 2) {
1285 y1 = srcY[src_y_idx];
1286 y2 = srcY[src_y_idx + 1];
1288 u1 = srcU[src_uv_idx / 2];
1289 v1 = srcV[src_uv_idx / 2];
1291 u2 = srcU[src_uv_idx / 2];
1292 v2 = srcV[src_uv_idx / 2];
1295 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1296 destY[dest_y_idx + 1] =
1297 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1298 destU[dest_uv_idx] =
1299 CLAMP ((2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1300 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1301 destV[dest_uv_idx] =
1302 CLAMP ((2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1303 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1310 /* 1.3. Now copy the last pixel if one exists and blend it
1311 * with the background because we only fill part of
1312 * the macro pixel. In case this is the last pixel of
1313 * the destination we will a larger part. */
1314 if (j == w - 1 && j == dest_width - 1) {
1315 y1 = srcY[src_y_idx];
1316 u1 = srcU[src_uv_idx / 2];
1317 v1 = srcV[src_uv_idx / 2];
1319 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1320 destU[dest_uv_idx] = CLAMP (
1321 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1323 destV[dest_uv_idx] =
1324 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1326 } else if (j == w - 1) {
1327 y1 = srcY[src_y_idx];
1328 u1 = srcU[src_uv_idx / 2];
1329 v1 = srcV[src_uv_idx / 2];
1331 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1332 destU[dest_uv_idx] = CLAMP (
1333 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1335 destV[dest_uv_idx] =
1336 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1340 destY += dest_strideY;
1341 destY2 += dest_strideY;
1342 destU += dest_strideUV;
1343 destV += dest_strideUV;
1344 srcY += src_strideY;
1345 srcY2 += src_strideY;
1347 if (src_y % 2 == 0) {
1348 srcU += src_strideUV;
1349 srcV += src_strideUV;
1356 /* 2. Copy all macro pixel scanlines, the destination scanline
1357 * now starts at macro pixel boundary. */
1358 for (; i < h - 1; i += 2) {
1359 /* 2.1. Handle the first destination pixel if it doesn't
1360 * start at the macro pixel boundary, i.e. blend with
1361 * the background! */
1365 if (src_y % 2 == 1) {
1366 srcU2 += src_strideUV;
1367 srcV2 += src_strideUV;
1370 if (dest_x % 2 == 1) {
1378 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1379 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1381 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1382 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1384 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1385 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1387 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1388 src_uv_idx = (src_x % 2) + 1;
1391 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1392 src_uv_idx = (src_x % 2);
1395 /* 2.2. Copy all macro pixels from the source to the destination.
1396 * All pixels now start at macro pixel boundary, i.e. no
1397 * blending with the background is necessary. */
1398 for (; j < w - 1; j += 2) {
1399 y1 = srcY[src_y_idx];
1400 y2 = srcY[src_y_idx + 1];
1401 y3 = srcY2[src_y_idx];
1402 y4 = srcY2[src_y_idx + 1];
1404 u1 = srcU[src_uv_idx / 2];
1405 u3 = srcU2[src_uv_idx / 2];
1406 v1 = srcV[src_uv_idx / 2];
1407 v3 = srcV2[src_uv_idx / 2];
1409 u2 = srcU[src_uv_idx / 2];
1410 u4 = srcU2[src_uv_idx / 2];
1411 v2 = srcV[src_uv_idx / 2];
1412 v4 = srcV2[src_uv_idx / 2];
1415 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1416 destY[dest_y_idx + 1] =
1417 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1418 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1419 destY2[dest_y_idx + 1] =
1420 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1422 destU[dest_uv_idx] = CLAMP (
1423 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1424 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1425 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1426 destV[dest_uv_idx] = CLAMP (
1427 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1428 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1429 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1436 /* 2.3. Now copy the last pixel if one exists and blend it
1437 * with the background because we only fill part of
1438 * the macro pixel. In case this is the last pixel of
1439 * the destination we will a larger part. */
1440 if (j == w - 1 && j == dest_width - 1) {
1441 y1 = srcY[src_y_idx];
1442 y2 = srcY2[src_y_idx];
1444 u1 = srcU[src_uv_idx / 2];
1445 u2 = srcU2[src_uv_idx / 2];
1447 v1 = srcV[src_uv_idx / 2];
1448 v2 = srcV2[src_uv_idx / 2];
1450 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1451 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1452 destU[dest_uv_idx] = CLAMP (
1453 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1454 u2, v2)) / 2, 0, 255);
1455 destV[dest_uv_idx] = CLAMP (
1456 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1457 u2, v2)) / 2, 0, 255);
1458 } else if (j == w - 1) {
1459 y1 = srcY[src_y_idx];
1460 y2 = srcY2[src_y_idx];
1462 u1 = srcU[src_uv_idx / 2];
1463 u2 = srcU2[src_uv_idx / 2];
1465 v1 = srcV[src_uv_idx / 2];
1466 v2 = srcV2[src_uv_idx / 2];
1468 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1469 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1470 destU[dest_uv_idx] = CLAMP (
1471 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1472 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1473 destV[dest_uv_idx] = CLAMP (
1474 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1475 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1478 destY += 2 * dest_strideY;
1479 destY2 += 2 * dest_strideY;
1480 destU += dest_strideUV;
1481 destV += dest_strideUV;
1482 srcY += 2 * src_strideY;
1483 srcY2 += 2 * src_strideY;
1486 srcU += src_strideUV;
1487 srcV += src_strideUV;
1490 /* 3. Handle the last scanline if one exists. This again
1491 * doesn't start at macro pixel boundary but should
1492 * only fill the upper part of the macro pixels. */
1493 if (i == h - 1 && i == dest_height - 1) {
1494 /* 3.1. Handle the first destination pixel if it doesn't
1495 * start at the macro pixel boundary, i.e. blend with
1496 * the background! */
1497 if (dest_x % 2 == 1) {
1502 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1504 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
1506 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
1509 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1510 src_uv_idx = (src_x % 2) + 1;
1513 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1514 src_uv_idx = (src_x % 2);
1517 /* 3.2. Copy all macro pixels from the source to the destination
1518 * but blend with the background because we're only filling
1519 * the upper part of the macro pixels. */
1520 for (; j < w - 1; j += 2) {
1521 y1 = srcY[src_y_idx];
1522 y2 = srcY[src_y_idx + 1];
1524 u1 = srcU[src_uv_idx / 2];
1525 v1 = srcV[src_uv_idx / 2];
1527 u2 = srcU[src_uv_idx / 2];
1528 v2 = srcV[src_uv_idx / 2];
1531 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1532 destY[dest_y_idx + 1] =
1533 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1535 destU[dest_uv_idx] = CLAMP (
1536 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1537 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1538 destV[dest_uv_idx] = CLAMP (
1539 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1540 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1547 /* 3.3. Now copy the last pixel if one exists and blend it
1548 * with the background because we only fill part of
1549 * the macro pixel. In case this is the last pixel of
1550 * the destination we will a larger part. */
1551 if (j == w - 1 && j == dest_width - 1) {
1552 y1 = srcY[src_y_idx];
1553 u1 = srcU[src_uv_idx / 2];
1554 v1 = srcV[src_uv_idx / 2];
1556 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1557 destU[dest_uv_idx] = CLAMP (
1558 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1560 destV[dest_uv_idx] =
1561 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1563 } else if (j == w - 1) {
1564 y1 = srcY[src_y_idx];
1565 u1 = srcU[src_uv_idx / 2];
1566 v1 = srcV[src_uv_idx / 2];
1568 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1569 destU[dest_uv_idx] = CLAMP (
1570 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1572 destV[dest_uv_idx] =
1573 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1576 } else if (i == h - 1) {
1577 /* 3.1. Handle the first destination pixel if it doesn't
1578 * start at the macro pixel boundary, i.e. blend with
1579 * the background! */
1580 if (dest_x % 2 == 1) {
1585 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1587 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1590 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1594 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1595 src_uv_idx = (src_x % 2) + 1;
1598 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1599 src_uv_idx = (src_x % 2);
1602 /* 3.2. Copy all macro pixels from the source to the destination
1603 * but blend with the background because we're only filling
1604 * the upper part of the macro pixels. */
1605 for (; j < w - 1; j += 2) {
1606 y1 = srcY[src_y_idx];
1607 y2 = srcY[src_y_idx + 1];
1609 u1 = srcU[src_uv_idx / 2];
1610 v1 = srcV[src_uv_idx / 2];
1612 u2 = srcU[src_uv_idx / 2];
1613 v2 = srcV[src_uv_idx / 2];
1616 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1617 destY[dest_y_idx + 1] =
1618 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1620 destU[dest_uv_idx] = CLAMP (
1621 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1622 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1623 destV[dest_uv_idx] = CLAMP (
1624 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1625 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1632 /* 3.3. Now copy the last pixel if one exists and blend it
1633 * with the background because we only fill part of
1634 * the macro pixel. In case this is the last pixel of
1635 * the destination we will a larger part. */
1636 if (j == w - 1 && j == dest_width - 1) {
1637 y1 = srcY[src_y_idx];
1638 u1 = srcU[src_uv_idx / 2];
1639 v1 = srcV[src_uv_idx / 2];
1641 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1642 destU[dest_uv_idx] = CLAMP (
1643 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1645 destV[dest_uv_idx] =
1646 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1648 } else if (j == w - 1) {
1649 y1 = srcY[src_y_idx];
1650 u1 = srcU[src_uv_idx / 2];
1651 v1 = srcV[src_uv_idx / 2];
1653 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1654 destU[dest_uv_idx] = CLAMP (
1655 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1657 destV[dest_uv_idx] =
1658 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1665 copy_i420_ayuv (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
1666 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
1667 gint dest_y, GstVideoFormat src_format, const guint8 * src,
1668 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
1671 const guint8 *srcY, *srcU, *srcV;
1672 gint src_strideY, src_strideUV;
1676 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 0, src_width);
1678 gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, 1, src_width);
1681 src + gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, 0,
1682 src_width, src_height);
1684 src + gst_video_format_get_component_offset (src_format, 1,
1685 src_width, src_height);
1687 src + gst_video_format_get_component_offset (src_format, 2,
1688 src_width, src_height);
1690 dest_stride = dest_width * 4;
1692 dest = dest + dest_y * dest_stride + dest_x * 4;
1694 srcY = srcY + src_y * src_strideY + src_x;
1695 srcU = srcU + (src_y / 2) * src_strideUV + src_x / 2;
1696 srcV = srcV + (src_y / 2) * src_strideUV + src_x / 2;
1698 i_alpha = CLAMP (i_alpha, 0, 255);
1700 if (src_sdtv != dest_sdtv) {
1707 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1708 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1710 for (i = 0; i < h; i++) {
1711 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1713 u = srcU[uv_idx / 2];
1714 v = srcV[uv_idx / 2];
1716 y1 = APPLY_MATRIX (matrix, 0, y, u, v);
1717 u1 = APPLY_MATRIX (matrix, 1, y, u, v);
1718 v1 = APPLY_MATRIX (matrix, 2, y, u, v);
1720 dest[4 * j + 0] = i_alpha;
1721 dest[4 * j + 1] = y1;
1722 dest[4 * j + 2] = u1;
1723 dest[4 * j + 3] = v1;
1725 dest += dest_stride;
1728 srcY += src_strideY;
1729 if (src_y % 2 == 0) {
1730 srcU += src_strideUV;
1731 srcV += src_strideUV;
1738 for (i = 0; i < h; i++) {
1739 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1741 u = srcU[uv_idx / 2];
1742 v = srcV[uv_idx / 2];
1744 dest[4 * j + 0] = i_alpha;
1745 dest[4 * j + 1] = y;
1746 dest[4 * j + 2] = u;
1747 dest[4 * j + 3] = v;
1749 dest += dest_stride;
1752 srcY += src_strideY;
1753 if (src_y % 2 == 0) {
1754 srcU += src_strideUV;
1755 srcV += src_strideUV;
1762 fill_rgb32 (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
1763 guint8 * dest, gboolean sdtv, gint width, gint height)
1765 guint32 empty_pixel;
1768 p[0] = gst_video_format_get_component_offset (format, 3, width, height);
1769 p[1] = gst_video_format_get_component_offset (format, 0, width, height);
1770 p[2] = gst_video_format_get_component_offset (format, 1, width, height);
1771 p[3] = gst_video_format_get_component_offset (format, 2, width, height);
1773 b_alpha = CLAMP (b_alpha, 0, 255);
1775 empty_pixel = GUINT32_FROM_LE ((b_alpha << (p[0] * 8)) |
1776 (rgb_colors_R[fill_type] << (p[1] * 8)) |
1777 (rgb_colors_G[fill_type] << (p[2] * 8)) |
1778 (rgb_colors_B[fill_type] << (p[3] * 8)));
1780 orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
1784 fill_rgb24 (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
1785 guint8 * dest, gboolean sdtv, gint width, gint height)
1787 gint dest_stride = GST_ROUND_UP_4 (width * 3);
1791 p[0] = gst_video_format_get_component_offset (format, 3, width, height);
1792 p[1] = gst_video_format_get_component_offset (format, 0, width, height);
1793 p[2] = gst_video_format_get_component_offset (format, 1, width, height);
1794 p[3] = gst_video_format_get_component_offset (format, 2, width, height);
1796 for (i = 0; i < height; i++) {
1797 for (j = 0; j < width; j++) {
1798 dest[3 * j + p[1]] = rgb_colors_R[fill_type];
1799 dest[3 * j + p[2]] = rgb_colors_G[fill_type];
1800 dest[3 * j + p[3]] = rgb_colors_B[fill_type];
1802 dest += dest_stride;
1807 copy_rgb32 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
1808 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
1809 gint dest_y, GstVideoFormat src_format, const guint8 * src,
1810 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
1814 gint src_stride, dest_stride;
1815 gboolean in_alpha, out_alpha;
1816 gint in_bpp, out_bpp;
1819 gboolean packed_out = (dest_format == GST_VIDEO_FORMAT_RGB
1820 || dest_format == GST_VIDEO_FORMAT_BGR);
1821 gboolean packed_in = (src_format == GST_VIDEO_FORMAT_RGB
1822 || src_format == GST_VIDEO_FORMAT_BGR);
1824 src_stride = (packed_in) ? GST_ROUND_UP_4 (3 * src_width) : 4 * src_width;
1825 dest_stride = (packed_out) ? GST_ROUND_UP_4 (3 * dest_width) : 4 * dest_width;
1826 in_bpp = (packed_in) ? 3 : 4;
1827 out_bpp = (packed_out) ? 3 : 4;
1829 out_alpha = gst_video_format_has_alpha (dest_format);
1831 gst_video_format_get_component_offset (dest_format, 3, dest_width,
1834 gst_video_format_get_component_offset (dest_format, 0, dest_width,
1837 gst_video_format_get_component_offset (dest_format, 1, dest_width,
1840 gst_video_format_get_component_offset (dest_format, 2, dest_width,
1843 in_alpha = gst_video_format_has_alpha (src_format);
1845 gst_video_format_get_component_offset (src_format, 3, src_width,
1848 gst_video_format_get_component_offset (src_format, 0, src_width,
1851 gst_video_format_get_component_offset (src_format, 1, src_width,
1854 gst_video_format_get_component_offset (src_format, 2, src_width,
1857 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
1858 src = src + src_y * src_stride + src_x * in_bpp;
1860 if (in_alpha && out_alpha) {
1862 for (i = 0; i < h; i++) {
1863 for (j = 0; j < w; j += 4) {
1864 dest[j + p_out[0]] = (src[j + p_in[0]] * i_alpha) >> 8;
1865 dest[j + p_out[1]] = src[j + p_in[1]];
1866 dest[j + p_out[2]] = src[j + p_in[2]];
1867 dest[j + p_out[3]] = src[j + p_in[3]];
1869 dest += dest_stride;
1872 } else if (out_alpha && !packed_in) {
1874 i_alpha = CLAMP (i_alpha, 0, 255);
1876 for (i = 0; i < h; i++) {
1877 for (j = 0; j < w; j += 4) {
1878 dest[j + p_out[0]] = i_alpha;
1879 dest[j + p_out[1]] = src[j + p_in[1]];
1880 dest[j + p_out[2]] = src[j + p_in[2]];
1881 dest[j + p_out[3]] = src[j + p_in[3]];
1883 dest += dest_stride;
1886 } else if (out_alpha && packed_in) {
1887 i_alpha = CLAMP (i_alpha, 0, 255);
1889 for (i = 0; i < h; i++) {
1890 for (j = 0; j < w; j++) {
1891 dest[4 * j + p_out[0]] = i_alpha;
1892 dest[4 * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1893 dest[4 * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1894 dest[4 * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1896 dest += dest_stride;
1899 } else if (!packed_out && !packed_in) {
1901 for (i = 0; i < h; i++) {
1902 for (j = 0; j < w; j += 4) {
1903 dest[j + p_out[1]] = src[j + p_in[1]];
1904 dest[j + p_out[2]] = src[j + p_in[2]];
1905 dest[j + p_out[3]] = src[j + p_in[3]];
1907 dest += dest_stride;
1911 for (i = 0; i < h; i++) {
1912 for (j = 0; j < w; j++) {
1913 dest[out_bpp * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1914 dest[out_bpp * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1915 dest[out_bpp * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1917 dest += dest_stride;
1924 copy_rgb32_ayuv (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
1925 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
1926 gint dest_y, GstVideoFormat src_format, const guint8 * src,
1927 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
1931 gint src_stride, dest_stride;
1935 gboolean packed_in = (src_format == GST_VIDEO_FORMAT_RGB
1936 || src_format == GST_VIDEO_FORMAT_BGR);
1942 src_stride = (packed_in) ? GST_ROUND_UP_4 (3 * src_width) : 4 * src_width;
1943 dest_stride = 4 * dest_width;
1944 in_bpp = (packed_in) ? 3 : 4;
1946 in_alpha = gst_video_format_has_alpha (src_format);
1948 gst_video_format_get_component_offset (src_format, 3, src_width,
1951 gst_video_format_get_component_offset (src_format, 0, src_width,
1954 gst_video_format_get_component_offset (src_format, 1, src_width,
1957 gst_video_format_get_component_offset (src_format, 2, src_width,
1961 (dest_sdtv) ? cog_rgb_to_ycbcr_matrix_8bit_sdtv :
1962 cog_rgb_to_ycbcr_matrix_8bit_hdtv, 12 * sizeof (gint));
1964 dest = dest + dest_y * dest_stride + dest_x * 4;
1965 src = src + src_y * src_stride + src_x * in_bpp;
1969 for (i = 0; i < h; i++) {
1970 for (j = 0; j < w; j += 4) {
1971 a = (src[j + p_in[0]] * i_alpha) >> 8;
1972 r = src[j + p_in[1]];
1973 g = src[j + p_in[2]];
1974 b = src[j + p_in[3]];
1976 y = APPLY_MATRIX (matrix, 0, r, g, b);
1977 u = APPLY_MATRIX (matrix, 1, r, g, b);
1978 v = APPLY_MATRIX (matrix, 2, r, g, b);
1981 dest[j + 1] = CLAMP (y, 0, 255);
1982 dest[j + 2] = CLAMP (u, 0, 255);
1983 dest[j + 3] = CLAMP (v, 0, 255);
1985 dest += dest_stride;
1988 } else if (!packed_in) {
1990 i_alpha = CLAMP (i_alpha, 0, 255);
1992 for (i = 0; i < h; i++) {
1993 for (j = 0; j < w; j += 4) {
1995 r = src[j + p_in[1]];
1996 g = src[j + p_in[2]];
1997 b = src[j + p_in[3]];
1999 y = APPLY_MATRIX (matrix, 0, r, g, b);
2000 u = APPLY_MATRIX (matrix, 1, r, g, b);
2001 v = APPLY_MATRIX (matrix, 2, r, g, b);
2004 dest[j + 1] = CLAMP (y, 0, 255);
2005 dest[j + 2] = CLAMP (u, 0, 255);
2006 dest[j + 3] = CLAMP (v, 0, 255);
2008 dest += dest_stride;
2012 i_alpha = CLAMP (i_alpha, 0, 255);
2014 for (i = 0; i < h; i++) {
2015 for (j = 0; j < w; j++) {
2017 r = src[in_bpp * j + p_in[1]];
2018 g = src[in_bpp * j + p_in[2]];
2019 b = src[in_bpp * j + p_in[3]];
2021 y = APPLY_MATRIX (matrix, 0, r, g, b);
2022 u = APPLY_MATRIX (matrix, 1, r, g, b);
2023 v = APPLY_MATRIX (matrix, 2, r, g, b);
2025 dest[4 * j + 0] = a;
2026 dest[4 * j + 1] = CLAMP (y, 0, 255);
2027 dest[4 * j + 2] = CLAMP (u, 0, 255);
2028 dest[4 * j + 3] = CLAMP (v, 0, 255);
2030 dest += dest_stride;
2037 copy_ayuv_rgb32 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
2038 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
2039 gint dest_y, GstVideoFormat src_format, const guint8 * src,
2040 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
2044 gint src_stride, dest_stride;
2048 gboolean packed_out = (dest_format == GST_VIDEO_FORMAT_RGB
2049 || dest_format == GST_VIDEO_FORMAT_BGR);
2055 dest_stride = (packed_out) ? GST_ROUND_UP_4 (3 * dest_width) : 4 * dest_width;
2056 src_stride = 4 * src_width;
2057 out_bpp = (packed_out) ? 3 : 4;
2059 out_alpha = gst_video_format_has_alpha (dest_format);
2061 gst_video_format_get_component_offset (dest_format, 3, dest_width,
2064 gst_video_format_get_component_offset (dest_format, 0, dest_width,
2067 gst_video_format_get_component_offset (dest_format, 1, dest_width,
2070 gst_video_format_get_component_offset (dest_format, 2, dest_width,
2074 (src_sdtv) ? cog_ycbcr_to_rgb_matrix_8bit_sdtv :
2075 cog_ycbcr_to_rgb_matrix_8bit_hdtv, 12 * sizeof (gint));
2077 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
2078 src = src + src_y * src_stride + src_x * 4;
2082 for (i = 0; i < h; i++) {
2083 for (j = 0; j < w; j += 4) {
2084 a = (src[j + 0] * i_alpha) >> 8;
2089 r = APPLY_MATRIX (matrix, 0, y, u, v);
2090 g = APPLY_MATRIX (matrix, 1, y, u, v);
2091 b = APPLY_MATRIX (matrix, 2, y, u, v);
2093 dest[j + p_out[0]] = a;
2094 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2095 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2096 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2098 dest += dest_stride;
2101 } else if (!packed_out) {
2103 for (i = 0; i < h; i++) {
2104 for (j = 0; j < w; j += 4) {
2109 r = APPLY_MATRIX (matrix, 0, y, u, v);
2110 g = APPLY_MATRIX (matrix, 1, y, u, v);
2111 b = APPLY_MATRIX (matrix, 2, y, u, v);
2113 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2114 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2115 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2117 dest += dest_stride;
2121 for (i = 0; i < h; i++) {
2122 for (j = 0; j < w; j++) {
2127 r = APPLY_MATRIX (matrix, 0, y, u, v);
2128 g = APPLY_MATRIX (matrix, 1, y, u, v);
2129 b = APPLY_MATRIX (matrix, 2, y, u, v);
2131 dest[out_bpp * j + p_out[1]] = CLAMP (r, 0, 255);
2132 dest[out_bpp * j + p_out[2]] = CLAMP (g, 0, 255);
2133 dest[out_bpp * j + p_out[3]] = CLAMP (b, 0, 255);
2135 dest += dest_stride;
2142 fill_gray (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
2143 guint8 * dest, gboolean sdtv, gint width, gint height)
2148 if (format == GST_VIDEO_FORMAT_GRAY8) {
2149 guint8 val = yuv_sdtv_colors_Y[fill_type];
2151 dest_stride = GST_ROUND_UP_4 (width);
2152 for (i = 0; i < height; i++) {
2153 memset (dest, val, width);
2154 dest += dest_stride;
2157 guint16 val = yuv_sdtv_colors_Y[fill_type] << 8;
2159 dest_stride = GST_ROUND_UP_4 (width * 2);
2160 if (format == GST_VIDEO_FORMAT_GRAY16_BE) {
2161 for (i = 0; i < height; i++) {
2162 for (j = 0; j < width; j++) {
2163 GST_WRITE_UINT16_BE (dest + 2 * j, val);
2165 dest += dest_stride;
2168 for (i = 0; i < height; i++) {
2169 for (j = 0; j < width; j++) {
2170 GST_WRITE_UINT16_LE (dest + 2 * j, val);
2172 dest += dest_stride;
2179 copy_packed_simple (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
2180 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
2181 gint dest_y, GstVideoFormat src_format, const guint8 * src,
2182 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
2186 gint src_stride, dest_stride;
2187 gint pixel_stride, row_size;
2189 src_stride = gst_video_format_get_row_stride (src_format, 0, src_width);
2190 dest_stride = gst_video_format_get_row_stride (dest_format, 0, dest_width);
2191 pixel_stride = gst_video_format_get_pixel_stride (dest_format, 0);
2192 row_size = w * pixel_stride;
2194 dest = dest + dest_y * dest_stride + dest_x * pixel_stride;
2195 src = src + src_y * src_stride + src_x * pixel_stride;
2197 for (i = 0; i < h; i++) {
2198 memcpy (dest, src, row_size);
2199 dest += dest_stride;
2205 fill_yuy2 (GstVideoBoxFill fill_type, guint b_alpha, GstVideoFormat format,
2206 guint8 * dest, gboolean sdtv, gint width, gint height)
2210 gint stride = gst_video_format_get_row_stride (format, 0, width);
2212 y = (sdtv) ? yuv_sdtv_colors_Y[fill_type] : yuv_hdtv_colors_Y[fill_type];
2213 u = (sdtv) ? yuv_sdtv_colors_U[fill_type] : yuv_hdtv_colors_U[fill_type];
2214 v = (sdtv) ? yuv_sdtv_colors_V[fill_type] : yuv_hdtv_colors_V[fill_type];
2216 width = width + (width % 2);
2218 if (format == GST_VIDEO_FORMAT_YUY2) {
2219 for (i = 0; i < height; i++) {
2220 for (j = 0; j < width; j += 2) {
2221 dest[j * 2 + 0] = y;
2222 dest[j * 2 + 1] = u;
2223 dest[j * 2 + 2] = y;
2224 dest[j * 2 + 3] = v;
2229 } else if (format == GST_VIDEO_FORMAT_YVYU) {
2230 for (i = 0; i < height; i++) {
2231 for (j = 0; j < width; j += 2) {
2232 dest[j * 2 + 0] = y;
2233 dest[j * 2 + 1] = v;
2234 dest[j * 2 + 2] = y;
2235 dest[j * 2 + 3] = u;
2241 for (i = 0; i < height; i++) {
2242 for (j = 0; j < width; j += 2) {
2243 dest[j * 2 + 0] = u;
2244 dest[j * 2 + 1] = y;
2245 dest[j * 2 + 2] = v;
2246 dest[j * 2 + 3] = y;
2255 copy_yuy2_yuy2 (guint i_alpha, GstVideoFormat dest_format, guint8 * dest,
2256 gboolean dest_sdtv, gint dest_width, gint dest_height, gint dest_x,
2257 gint dest_y, GstVideoFormat src_format, const guint8 * src,
2258 gboolean src_sdtv, gint src_width, gint src_height, gint src_x, gint src_y,
2262 gint src_stride, dest_stride;
2264 src_stride = gst_video_format_get_row_stride (src_format, 0, src_width);
2265 dest_stride = gst_video_format_get_row_stride (dest_format, 0, dest_width);
2267 dest_x = (dest_x & ~1);
2268 src_x = (src_x & ~1);
2272 dest = dest + dest_y * dest_stride + dest_x * 2;
2273 src = src + src_y * src_stride + src_x * 2;
2275 if (src_sdtv != dest_sdtv) {
2281 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
2282 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
2284 if (src_format == GST_VIDEO_FORMAT_YUY2) {
2285 for (i = 0; i < h; i++) {
2286 for (j = 0; j < w; j += 2) {
2287 y1 = src[j * 2 + 0];
2288 y2 = src[j * 2 + 2];
2289 u1 = u2 = src[j * 2 + 1];
2290 v1 = v2 = src[j * 2 + 3];
2292 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2293 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2294 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2295 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2297 dest += dest_stride;
2300 } else if (src_format == GST_VIDEO_FORMAT_YVYU) {
2301 for (i = 0; i < h; i++) {
2302 for (j = 0; j < w; j += 2) {
2303 y1 = src[j * 2 + 0];
2304 y2 = src[j * 2 + 2];
2305 v1 = v2 = src[j * 2 + 1];
2306 u1 = u2 = src[j * 2 + 3];
2308 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2309 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 2, y1, u1, v1);
2310 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2311 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 1, y2, u2, v2);
2313 dest += dest_stride;
2317 for (i = 0; i < h; i++) {
2318 for (j = 0; j < w; j += 2) {
2319 u1 = u2 = src[j * 2 + 0];
2320 v1 = v2 = src[j * 2 + 2];
2321 y1 = src[j * 2 + 1];
2322 y2 = src[j * 2 + 3];
2324 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2325 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2326 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2327 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2329 dest += dest_stride;
2334 for (i = 0; i < h; i++) {
2335 memcpy (dest, src, w * 2);
2336 dest += dest_stride;
2342 #define DEFAULT_LEFT 0
2343 #define DEFAULT_RIGHT 0
2344 #define DEFAULT_TOP 0
2345 #define DEFAULT_BOTTOM 0
2346 #define DEFAULT_FILL_TYPE VIDEO_BOX_FILL_BLACK
2347 #define DEFAULT_ALPHA 1.0
2348 #define DEFAULT_BORDER_ALPHA 1.0
2364 static GstStaticPadTemplate gst_video_box_src_template =
2365 GST_STATIC_PAD_TEMPLATE ("src",
2368 GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("AYUV") ";"
2369 GST_VIDEO_CAPS_ARGB ";" GST_VIDEO_CAPS_BGRA ";"
2370 GST_VIDEO_CAPS_ABGR ";" GST_VIDEO_CAPS_RGBA ";"
2371 GST_VIDEO_CAPS_xRGB ";" GST_VIDEO_CAPS_BGRx ";"
2372 GST_VIDEO_CAPS_xBGR ";" GST_VIDEO_CAPS_RGBx ";"
2373 GST_VIDEO_CAPS_RGB ";" GST_VIDEO_CAPS_BGR ";"
2374 GST_VIDEO_CAPS_YUV ("Y444") ";"
2375 GST_VIDEO_CAPS_YUV ("Y42B") ";"
2376 GST_VIDEO_CAPS_YUV ("YUY2") ";"
2377 GST_VIDEO_CAPS_YUV ("YVYU") ";"
2378 GST_VIDEO_CAPS_YUV ("UYVY") ";"
2379 GST_VIDEO_CAPS_YUV ("I420") ";"
2380 GST_VIDEO_CAPS_YUV ("YV12") ";"
2381 GST_VIDEO_CAPS_YUV ("Y41B") ";"
2382 GST_VIDEO_CAPS_GRAY8 ";"
2383 GST_VIDEO_CAPS_GRAY16 ("BIG_ENDIAN") ";"
2384 GST_VIDEO_CAPS_GRAY16 ("LITTLE_ENDIAN"))
2387 static GstStaticPadTemplate gst_video_box_sink_template =
2388 GST_STATIC_PAD_TEMPLATE ("sink",
2391 GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("AYUV") ";"
2392 GST_VIDEO_CAPS_ARGB ";" GST_VIDEO_CAPS_BGRA ";"
2393 GST_VIDEO_CAPS_ABGR ";" GST_VIDEO_CAPS_RGBA ";"
2394 GST_VIDEO_CAPS_xRGB ";" GST_VIDEO_CAPS_BGRx ";"
2395 GST_VIDEO_CAPS_xBGR ";" GST_VIDEO_CAPS_RGBx ";"
2396 GST_VIDEO_CAPS_RGB ";" GST_VIDEO_CAPS_BGR ";"
2397 GST_VIDEO_CAPS_YUV ("Y444") ";"
2398 GST_VIDEO_CAPS_YUV ("Y42B") ";"
2399 GST_VIDEO_CAPS_YUV ("YUY2") ";"
2400 GST_VIDEO_CAPS_YUV ("YVYU") ";"
2401 GST_VIDEO_CAPS_YUV ("UYVY") ";"
2402 GST_VIDEO_CAPS_YUV ("I420") ";"
2403 GST_VIDEO_CAPS_YUV ("YV12") ";"
2404 GST_VIDEO_CAPS_YUV ("Y41B") ";"
2405 GST_VIDEO_CAPS_GRAY8 ";"
2406 GST_VIDEO_CAPS_GRAY16 ("BIG_ENDIAN") ";"
2407 GST_VIDEO_CAPS_GRAY16 ("LITTLE_ENDIAN"))
2410 GST_BOILERPLATE (GstVideoBox, gst_video_box, GstBaseTransform,
2411 GST_TYPE_BASE_TRANSFORM);
2413 static void gst_video_box_set_property (GObject * object, guint prop_id,
2414 const GValue * value, GParamSpec * pspec);
2415 static void gst_video_box_get_property (GObject * object, guint prop_id,
2416 GValue * value, GParamSpec * pspec);
2418 static gboolean gst_video_box_recalc_transform (GstVideoBox * video_box);
2419 static GstCaps *gst_video_box_transform_caps (GstBaseTransform * trans,
2420 GstPadDirection direction, GstCaps * from);
2421 static gboolean gst_video_box_set_caps (GstBaseTransform * trans,
2422 GstCaps * in, GstCaps * out);
2423 static gboolean gst_video_box_get_unit_size (GstBaseTransform * trans,
2424 GstCaps * caps, guint * size);
2425 static GstFlowReturn gst_video_box_transform (GstBaseTransform * trans,
2426 GstBuffer * in, GstBuffer * out);
2427 static void gst_video_box_before_transform (GstBaseTransform * trans,
2429 static void gst_video_box_fixate_caps (GstBaseTransform * trans,
2430 GstPadDirection direction, GstCaps * caps, GstCaps * othercaps);
2431 static gboolean gst_video_box_src_event (GstBaseTransform * trans,
2434 #define GST_TYPE_VIDEO_BOX_FILL (gst_video_box_fill_get_type())
2436 gst_video_box_fill_get_type (void)
2438 static GType video_box_fill_type = 0;
2439 static const GEnumValue video_box_fill[] = {
2440 {VIDEO_BOX_FILL_BLACK, "Black", "black"},
2441 {VIDEO_BOX_FILL_GREEN, "Green", "green"},
2442 {VIDEO_BOX_FILL_BLUE, "Blue", "blue"},
2443 {VIDEO_BOX_FILL_RED, "Red", "red"},
2444 {VIDEO_BOX_FILL_YELLOW, "Yellow", "yellow"},
2445 {VIDEO_BOX_FILL_WHITE, "White", "white"},
2449 if (!video_box_fill_type) {
2450 video_box_fill_type =
2451 g_enum_register_static ("GstVideoBoxFill", video_box_fill);
2453 return video_box_fill_type;
2458 gst_video_box_base_init (gpointer g_class)
2460 GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
2462 gst_element_class_set_details_simple (element_class, "Video box filter",
2463 "Filter/Effect/Video",
2464 "Resizes a video by adding borders or cropping",
2465 "Wim Taymans <wim@fluendo.com>");
2467 gst_element_class_add_pad_template (element_class,
2468 gst_static_pad_template_get (&gst_video_box_sink_template));
2469 gst_element_class_add_pad_template (element_class,
2470 gst_static_pad_template_get (&gst_video_box_src_template));
2474 gst_video_box_finalize (GObject * object)
2476 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2478 if (video_box->mutex) {
2479 g_mutex_free (video_box->mutex);
2480 video_box->mutex = NULL;
2483 G_OBJECT_CLASS (parent_class)->finalize (object);
2487 gst_video_box_class_init (GstVideoBoxClass * klass)
2489 GObjectClass *gobject_class = (GObjectClass *) klass;
2490 GstBaseTransformClass *trans_class = (GstBaseTransformClass *) klass;
2492 gobject_class->set_property = gst_video_box_set_property;
2493 gobject_class->get_property = gst_video_box_get_property;
2494 gobject_class->finalize = gst_video_box_finalize;
2496 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_FILL_TYPE,
2497 g_param_spec_enum ("fill", "Fill", "How to fill the borders",
2498 GST_TYPE_VIDEO_BOX_FILL, DEFAULT_FILL_TYPE,
2499 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2500 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_LEFT,
2501 g_param_spec_int ("left", "Left",
2502 "Pixels to box at left (<0 = add a border)", G_MININT, G_MAXINT,
2504 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2505 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_RIGHT,
2506 g_param_spec_int ("right", "Right",
2507 "Pixels to box at right (<0 = add a border)", G_MININT, G_MAXINT,
2509 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2510 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_TOP,
2511 g_param_spec_int ("top", "Top",
2512 "Pixels to box at top (<0 = add a border)", G_MININT, G_MAXINT,
2514 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2515 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BOTTOM,
2516 g_param_spec_int ("bottom", "Bottom",
2517 "Pixels to box at bottom (<0 = add a border)", G_MININT, G_MAXINT,
2519 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2520 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_ALPHA,
2521 g_param_spec_double ("alpha", "Alpha", "Alpha value picture", 0.0, 1.0,
2523 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2524 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BORDER_ALPHA,
2525 g_param_spec_double ("border-alpha", "Border Alpha",
2526 "Alpha value of the border", 0.0, 1.0, DEFAULT_BORDER_ALPHA,
2527 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2529 * GstVideoBox:autocrop
2531 * If set to %TRUE videobox will automatically crop/pad the input
2532 * video to be centered in the output.
2536 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_AUTOCROP,
2537 g_param_spec_boolean ("autocrop", "Auto crop",
2538 "Auto crop", FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
2540 trans_class->transform = GST_DEBUG_FUNCPTR (gst_video_box_transform);
2541 trans_class->before_transform =
2542 GST_DEBUG_FUNCPTR (gst_video_box_before_transform);
2543 trans_class->transform_caps =
2544 GST_DEBUG_FUNCPTR (gst_video_box_transform_caps);
2545 trans_class->set_caps = GST_DEBUG_FUNCPTR (gst_video_box_set_caps);
2546 trans_class->get_unit_size = GST_DEBUG_FUNCPTR (gst_video_box_get_unit_size);
2547 trans_class->fixate_caps = GST_DEBUG_FUNCPTR (gst_video_box_fixate_caps);
2548 trans_class->src_event = GST_DEBUG_FUNCPTR (gst_video_box_src_event);
2552 gst_video_box_init (GstVideoBox * video_box, GstVideoBoxClass * g_class)
2554 video_box->box_right = DEFAULT_RIGHT;
2555 video_box->box_left = DEFAULT_LEFT;
2556 video_box->box_top = DEFAULT_TOP;
2557 video_box->box_bottom = DEFAULT_BOTTOM;
2558 video_box->crop_right = 0;
2559 video_box->crop_left = 0;
2560 video_box->crop_top = 0;
2561 video_box->crop_bottom = 0;
2562 video_box->fill_type = DEFAULT_FILL_TYPE;
2563 video_box->alpha = DEFAULT_ALPHA;
2564 video_box->border_alpha = DEFAULT_BORDER_ALPHA;
2565 video_box->autocrop = FALSE;
2567 video_box->mutex = g_mutex_new ();
2571 gst_video_box_set_property (GObject * object, guint prop_id,
2572 const GValue * value, GParamSpec * pspec)
2574 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2576 g_mutex_lock (video_box->mutex);
2579 video_box->box_left = g_value_get_int (value);
2580 if (video_box->box_left < 0) {
2581 video_box->border_left = -video_box->box_left;
2582 video_box->crop_left = 0;
2584 video_box->border_left = 0;
2585 video_box->crop_left = video_box->box_left;
2589 video_box->box_right = g_value_get_int (value);
2590 if (video_box->box_right < 0) {
2591 video_box->border_right = -video_box->box_right;
2592 video_box->crop_right = 0;
2594 video_box->border_right = 0;
2595 video_box->crop_right = video_box->box_right;
2599 video_box->box_top = g_value_get_int (value);
2600 if (video_box->box_top < 0) {
2601 video_box->border_top = -video_box->box_top;
2602 video_box->crop_top = 0;
2604 video_box->border_top = 0;
2605 video_box->crop_top = video_box->box_top;
2609 video_box->box_bottom = g_value_get_int (value);
2610 if (video_box->box_bottom < 0) {
2611 video_box->border_bottom = -video_box->box_bottom;
2612 video_box->crop_bottom = 0;
2614 video_box->border_bottom = 0;
2615 video_box->crop_bottom = video_box->box_bottom;
2618 case PROP_FILL_TYPE:
2619 video_box->fill_type = g_value_get_enum (value);
2622 video_box->alpha = g_value_get_double (value);
2624 case PROP_BORDER_ALPHA:
2625 video_box->border_alpha = g_value_get_double (value);
2628 video_box->autocrop = g_value_get_boolean (value);
2631 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2634 gst_video_box_recalc_transform (video_box);
2636 GST_DEBUG_OBJECT (video_box, "Calling reconfigure");
2637 gst_base_transform_reconfigure (GST_BASE_TRANSFORM_CAST (video_box));
2639 g_mutex_unlock (video_box->mutex);
2643 gst_video_box_autocrop (GstVideoBox * video_box)
2645 gint crop_w = video_box->in_width - video_box->out_width;
2646 gint crop_h = video_box->in_height - video_box->out_height;
2648 video_box->box_left = crop_w / 2;
2649 if (video_box->box_left < 0) {
2650 video_box->border_left = -video_box->box_left;
2651 video_box->crop_left = 0;
2653 video_box->border_left = 0;
2654 video_box->crop_left = video_box->box_left;
2657 /* Round down/up for odd width differences */
2663 video_box->box_right = crop_w / 2;
2664 if (video_box->box_right < 0) {
2665 video_box->border_right = -video_box->box_right;
2666 video_box->crop_right = 0;
2668 video_box->border_right = 0;
2669 video_box->crop_right = video_box->box_right;
2672 video_box->box_top = crop_h / 2;
2673 if (video_box->box_top < 0) {
2674 video_box->border_top = -video_box->box_top;
2675 video_box->crop_top = 0;
2677 video_box->border_top = 0;
2678 video_box->crop_top = video_box->box_top;
2681 /* Round down/up for odd height differences */
2686 video_box->box_bottom = crop_h / 2;
2688 if (video_box->box_bottom < 0) {
2689 video_box->border_bottom = -video_box->box_bottom;
2690 video_box->crop_bottom = 0;
2692 video_box->border_bottom = 0;
2693 video_box->crop_bottom = video_box->box_bottom;
2698 gst_video_box_get_property (GObject * object, guint prop_id, GValue * value,
2701 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2705 g_value_set_int (value, video_box->box_left);
2708 g_value_set_int (value, video_box->box_right);
2711 g_value_set_int (value, video_box->box_top);
2714 g_value_set_int (value, video_box->box_bottom);
2716 case PROP_FILL_TYPE:
2717 g_value_set_enum (value, video_box->fill_type);
2720 g_value_set_double (value, video_box->alpha);
2722 case PROP_BORDER_ALPHA:
2723 g_value_set_double (value, video_box->border_alpha);
2726 g_value_set_boolean (value, video_box->autocrop);
2729 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2735 gst_video_box_transform_dimension (gint val, gint delta)
2737 gint64 new_val = (gint64) val + (gint64) delta;
2739 new_val = CLAMP (new_val, 1, G_MAXINT);
2741 return (gint) new_val;
2745 gst_video_box_transform_dimension_value (const GValue * src_val,
2746 gint delta, GValue * dest_val)
2748 gboolean ret = TRUE;
2750 g_value_init (dest_val, G_VALUE_TYPE (src_val));
2752 if (G_VALUE_HOLDS_INT (src_val)) {
2753 gint ival = g_value_get_int (src_val);
2755 ival = gst_video_box_transform_dimension (ival, delta);
2756 g_value_set_int (dest_val, ival);
2757 } else if (GST_VALUE_HOLDS_INT_RANGE (src_val)) {
2758 gint min = gst_value_get_int_range_min (src_val);
2759 gint max = gst_value_get_int_range_max (src_val);
2761 min = gst_video_box_transform_dimension (min, delta);
2762 max = gst_video_box_transform_dimension (max, delta);
2765 g_value_unset (dest_val);
2767 gst_value_set_int_range (dest_val, min, max);
2769 } else if (GST_VALUE_HOLDS_LIST (src_val)) {
2772 for (i = 0; i < gst_value_list_get_size (src_val); ++i) {
2773 const GValue *list_val;
2774 GValue newval = { 0, };
2776 list_val = gst_value_list_get_value (src_val, i);
2777 if (gst_video_box_transform_dimension_value (list_val, delta, &newval))
2778 gst_value_list_append_value (dest_val, &newval);
2779 g_value_unset (&newval);
2782 if (gst_value_list_get_size (dest_val) == 0) {
2783 g_value_unset (dest_val);
2787 g_value_unset (dest_val);
2795 gst_video_box_transform_caps (GstBaseTransform * trans,
2796 GstPadDirection direction, GstCaps * from)
2798 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
2800 const GstCaps *templ;
2802 GstStructure *structure;
2805 to = gst_caps_copy (from);
2806 /* Just to be sure... */
2807 gst_caps_truncate (to);
2808 structure = gst_caps_get_structure (to, 0);
2810 /* Transform width/height */
2811 if (video_box->autocrop) {
2812 gst_structure_remove_field (structure, "width");
2813 gst_structure_remove_field (structure, "height");
2815 gint dw = 0, dh = 0;
2817 GValue w_val = { 0, };
2818 GValue h_val = { 0, };
2820 /* calculate width and height */
2821 if (direction == GST_PAD_SINK) {
2822 dw -= video_box->box_left;
2823 dw -= video_box->box_right;
2825 dw += video_box->box_left;
2826 dw += video_box->box_right;
2829 if (direction == GST_PAD_SINK) {
2830 dh -= video_box->box_top;
2831 dh -= video_box->box_bottom;
2833 dh += video_box->box_top;
2834 dh += video_box->box_bottom;
2837 v = gst_structure_get_value (structure, "width");
2838 if (!gst_video_box_transform_dimension_value (v, dw, &w_val)) {
2839 GST_WARNING_OBJECT (video_box, "could not tranform width value with dw=%d"
2840 ", caps structure=%" GST_PTR_FORMAT, dw, structure);
2841 gst_caps_unref (to);
2842 to = gst_caps_new_empty ();
2845 gst_structure_set_value (structure, "width", &w_val);
2847 v = gst_structure_get_value (structure, "height");
2848 if (!gst_video_box_transform_dimension_value (v, dh, &h_val)) {
2849 g_value_unset (&w_val);
2850 GST_WARNING_OBJECT (video_box,
2851 "could not tranform height value with dh=%d" ", caps structure=%"
2852 GST_PTR_FORMAT, dh, structure);
2853 gst_caps_unref (to);
2854 to = gst_caps_new_empty ();
2857 gst_structure_set_value (structure, "height", &h_val);
2858 g_value_unset (&w_val);
2859 g_value_unset (&h_val);
2862 /* Supported conversions:
2869 * AYUV->xRGB (24bpp, 32bpp, incl. alpha)
2870 * xRGB->xRGB (24bpp, 32bpp, from/to all variants, incl. alpha)
2871 * xRGB->AYUV (24bpp, 32bpp, incl. alpha)
2873 * Passthrough only for everything else.
2875 name = gst_structure_get_name (structure);
2876 if (g_str_equal (name, "video/x-raw-yuv")) {
2879 if (gst_structure_get_fourcc (structure, "format", &fourcc) &&
2880 (fourcc == GST_STR_FOURCC ("AYUV") ||
2881 fourcc == GST_STR_FOURCC ("I420") ||
2882 fourcc == GST_STR_FOURCC ("YV12"))) {
2883 GValue list = { 0, };
2884 GValue val = { 0, };
2887 /* get rid of format */
2888 gst_structure_remove_field (structure, "format");
2889 gst_structure_remove_field (structure, "color-matrix");
2890 gst_structure_remove_field (structure, "chroma-site");
2892 s2 = gst_structure_copy (structure);
2894 g_value_init (&list, GST_TYPE_LIST);
2895 g_value_init (&val, GST_TYPE_FOURCC);
2896 gst_value_set_fourcc (&val, GST_STR_FOURCC ("AYUV"));
2897 gst_value_list_append_value (&list, &val);
2898 g_value_reset (&val);
2899 gst_value_set_fourcc (&val, GST_STR_FOURCC ("I420"));
2900 gst_value_list_append_value (&list, &val);
2901 g_value_reset (&val);
2902 gst_value_set_fourcc (&val, GST_STR_FOURCC ("YV12"));
2903 gst_value_list_append_value (&list, &val);
2904 g_value_unset (&val);
2905 gst_structure_set_value (structure, "format", &list);
2906 g_value_unset (&list);
2908 /* We can only convert to RGB if input is AYUV */
2909 if (fourcc == GST_STR_FOURCC ("AYUV")) {
2910 gst_structure_set_name (s2, "video/x-raw-rgb");
2911 g_value_init (&list, GST_TYPE_LIST);
2912 g_value_init (&val, G_TYPE_INT);
2913 g_value_set_int (&val, 32);
2914 gst_value_list_append_value (&list, &val);
2915 g_value_reset (&val);
2916 g_value_set_int (&val, 24);
2917 gst_value_list_append_value (&list, &val);
2918 g_value_unset (&val);
2919 gst_structure_set_value (s2, "depth", &list);
2920 gst_structure_set_value (s2, "bpp", &list);
2921 g_value_unset (&list);
2922 gst_caps_append_structure (to, s2);
2924 gst_structure_free (s2);
2927 } else if (g_str_equal (name, "video/x-raw-rgb")) {
2930 if (gst_structure_get_int (structure, "bpp", &bpp) &&
2931 (bpp == 32 || bpp == 24)) {
2932 GValue list = { 0, };
2933 GValue val = { 0, };
2936 /* get rid of format */
2937 gst_structure_remove_field (structure, "depth");
2938 gst_structure_remove_field (structure, "bpp");
2939 gst_structure_remove_field (structure, "red_mask");
2940 gst_structure_remove_field (structure, "green_mask");
2941 gst_structure_remove_field (structure, "blue_mask");
2942 gst_structure_remove_field (structure, "alpha_mask");
2944 s2 = gst_structure_copy (structure);
2946 g_value_init (&list, GST_TYPE_LIST);
2947 g_value_init (&val, G_TYPE_INT);
2948 g_value_set_int (&val, 32);
2949 gst_value_list_append_value (&list, &val);
2950 g_value_reset (&val);
2951 g_value_set_int (&val, 24);
2952 gst_value_list_append_value (&list, &val);
2953 g_value_unset (&val);
2954 gst_structure_set_value (structure, "depth", &list);
2955 gst_structure_set_value (structure, "bpp", &list);
2956 g_value_unset (&list);
2958 gst_structure_set_name (s2, "video/x-raw-yuv");
2959 gst_structure_set (s2, "format", GST_TYPE_FOURCC, GST_STR_FOURCC ("AYUV"),
2961 gst_caps_append_structure (to, s2);
2965 /* filter against set allowed caps on the pad */
2966 other = (direction == GST_PAD_SINK) ? trans->srcpad : trans->sinkpad;
2968 templ = gst_pad_get_pad_template_caps (other);
2969 ret = gst_caps_intersect (to, templ);
2970 gst_caps_unref (to);
2972 GST_DEBUG_OBJECT (video_box, "direction %d, transformed %" GST_PTR_FORMAT
2973 " to %" GST_PTR_FORMAT, direction, from, ret);
2979 gst_video_box_recalc_transform (GstVideoBox * video_box)
2981 gboolean res = TRUE;
2983 /* if we have the same format in and out and we don't need to perform any
2984 * cropping at all, we can just operate in passthrough mode */
2985 if (video_box->in_format == video_box->out_format &&
2986 video_box->box_left == 0 && video_box->box_right == 0 &&
2987 video_box->box_top == 0 && video_box->box_bottom == 0 &&
2988 video_box->in_sdtv == video_box->out_sdtv) {
2990 GST_LOG_OBJECT (video_box, "we are using passthrough");
2991 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
2994 GST_LOG_OBJECT (video_box, "we are not using passthrough");
2995 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
3002 gst_video_box_select_processing_functions (GstVideoBox * video_box)
3004 switch (video_box->out_format) {
3005 case GST_VIDEO_FORMAT_AYUV:
3006 video_box->fill = fill_ayuv;
3007 switch (video_box->in_format) {
3008 case GST_VIDEO_FORMAT_AYUV:
3009 video_box->copy = copy_ayuv_ayuv;
3011 case GST_VIDEO_FORMAT_I420:
3012 case GST_VIDEO_FORMAT_YV12:
3013 video_box->copy = copy_i420_ayuv;
3015 case GST_VIDEO_FORMAT_ARGB:
3016 case GST_VIDEO_FORMAT_ABGR:
3017 case GST_VIDEO_FORMAT_RGBA:
3018 case GST_VIDEO_FORMAT_BGRA:
3019 case GST_VIDEO_FORMAT_xRGB:
3020 case GST_VIDEO_FORMAT_xBGR:
3021 case GST_VIDEO_FORMAT_RGBx:
3022 case GST_VIDEO_FORMAT_BGRx:
3023 case GST_VIDEO_FORMAT_RGB:
3024 case GST_VIDEO_FORMAT_BGR:
3025 video_box->copy = copy_rgb32_ayuv;
3031 case GST_VIDEO_FORMAT_I420:
3032 case GST_VIDEO_FORMAT_YV12:
3033 video_box->fill = fill_planar_yuv;
3034 switch (video_box->in_format) {
3035 case GST_VIDEO_FORMAT_AYUV:
3036 video_box->copy = copy_ayuv_i420;
3038 case GST_VIDEO_FORMAT_I420:
3039 case GST_VIDEO_FORMAT_YV12:
3040 video_box->copy = copy_i420_i420;
3046 case GST_VIDEO_FORMAT_ARGB:
3047 case GST_VIDEO_FORMAT_ABGR:
3048 case GST_VIDEO_FORMAT_RGBA:
3049 case GST_VIDEO_FORMAT_BGRA:
3050 case GST_VIDEO_FORMAT_xRGB:
3051 case GST_VIDEO_FORMAT_xBGR:
3052 case GST_VIDEO_FORMAT_RGBx:
3053 case GST_VIDEO_FORMAT_BGRx:
3054 case GST_VIDEO_FORMAT_RGB:
3055 case GST_VIDEO_FORMAT_BGR:
3056 video_box->fill = (video_box->out_format == GST_VIDEO_FORMAT_BGR
3057 || video_box->out_format ==
3058 GST_VIDEO_FORMAT_RGB) ? fill_rgb24 : fill_rgb32;
3059 switch (video_box->in_format) {
3060 case GST_VIDEO_FORMAT_ARGB:
3061 case GST_VIDEO_FORMAT_ABGR:
3062 case GST_VIDEO_FORMAT_RGBA:
3063 case GST_VIDEO_FORMAT_BGRA:
3064 case GST_VIDEO_FORMAT_xRGB:
3065 case GST_VIDEO_FORMAT_xBGR:
3066 case GST_VIDEO_FORMAT_RGBx:
3067 case GST_VIDEO_FORMAT_BGRx:
3068 case GST_VIDEO_FORMAT_RGB:
3069 case GST_VIDEO_FORMAT_BGR:
3070 video_box->copy = copy_rgb32;
3072 case GST_VIDEO_FORMAT_AYUV:
3073 video_box->copy = copy_ayuv_rgb32;
3078 case GST_VIDEO_FORMAT_GRAY8:
3079 case GST_VIDEO_FORMAT_GRAY16_BE:
3080 case GST_VIDEO_FORMAT_GRAY16_LE:
3081 video_box->fill = fill_gray;
3082 switch (video_box->in_format) {
3083 case GST_VIDEO_FORMAT_GRAY8:
3084 case GST_VIDEO_FORMAT_GRAY16_BE:
3085 case GST_VIDEO_FORMAT_GRAY16_LE:
3086 video_box->copy = copy_packed_simple;
3092 case GST_VIDEO_FORMAT_YUY2:
3093 case GST_VIDEO_FORMAT_YVYU:
3094 case GST_VIDEO_FORMAT_UYVY:
3095 video_box->fill = fill_yuy2;
3096 switch (video_box->in_format) {
3097 case GST_VIDEO_FORMAT_YUY2:
3098 case GST_VIDEO_FORMAT_YVYU:
3099 case GST_VIDEO_FORMAT_UYVY:
3100 video_box->copy = copy_yuy2_yuy2;
3106 case GST_VIDEO_FORMAT_Y444:
3107 case GST_VIDEO_FORMAT_Y42B:
3108 case GST_VIDEO_FORMAT_Y41B:
3109 video_box->fill = fill_planar_yuv;
3110 switch (video_box->in_format) {
3111 case GST_VIDEO_FORMAT_Y444:
3112 video_box->copy = copy_y444_y444;
3114 case GST_VIDEO_FORMAT_Y42B:
3115 video_box->copy = copy_y42b_y42b;
3117 case GST_VIDEO_FORMAT_Y41B:
3118 video_box->copy = copy_y41b_y41b;
3128 return video_box->fill != NULL && video_box->copy != NULL;
3132 gst_video_box_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out)
3134 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3136 const gchar *matrix;
3138 g_mutex_lock (video_box->mutex);
3141 gst_video_format_parse_caps (in, &video_box->in_format,
3142 &video_box->in_width, &video_box->in_height);
3144 gst_video_format_parse_caps (out, &video_box->out_format,
3145 &video_box->out_width, &video_box->out_height);
3147 matrix = gst_video_parse_caps_color_matrix (in);
3148 video_box->in_sdtv = matrix ? g_str_equal (matrix, "sdtv") : TRUE;
3149 matrix = gst_video_parse_caps_color_matrix (out);
3150 video_box->out_sdtv = matrix ? g_str_equal (matrix, "sdtv") : TRUE;
3152 /* something wrong getting the caps */
3156 GST_DEBUG_OBJECT (trans, "Input w: %d h: %d", video_box->in_width,
3157 video_box->in_height);
3158 GST_DEBUG_OBJECT (trans, "Output w: %d h: %d", video_box->out_width,
3159 video_box->out_height);
3161 if (video_box->autocrop)
3162 gst_video_box_autocrop (video_box);
3164 /* recalc the transformation strategy */
3165 ret = gst_video_box_recalc_transform (video_box);
3168 ret = gst_video_box_select_processing_functions (video_box);
3169 g_mutex_unlock (video_box->mutex);
3176 GST_DEBUG_OBJECT (video_box,
3177 "Invalid caps: %" GST_PTR_FORMAT " -> %" GST_PTR_FORMAT, in, out);
3178 g_mutex_unlock (video_box->mutex);
3184 gst_video_box_get_unit_size (GstBaseTransform * trans, GstCaps * caps,
3187 GstVideoFormat format;
3193 ret = gst_video_format_parse_caps (caps, &format, &width, &height);
3195 GST_ERROR_OBJECT (trans, "Invalid caps: %" GST_PTR_FORMAT, caps);
3199 *size = gst_video_format_get_size (format, width, height);
3201 GST_LOG_OBJECT (trans, "Returning from _unit_size %d", *size);
3207 gst_video_box_fixate_caps (GstBaseTransform * trans,
3208 GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
3214 ret = gst_video_format_parse_caps (caps, NULL, &width, &height);
3218 s = gst_caps_get_structure (othercaps, 0);
3219 gst_structure_fixate_field_nearest_int (s, "width", width);
3220 gst_structure_fixate_field_nearest_int (s, "height", height);
3224 gst_video_box_src_event (GstBaseTransform * trans, GstEvent * event)
3226 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3227 GstStructure *new_structure;
3228 const GstStructure *structure;
3229 const gchar *event_name;
3233 GST_OBJECT_LOCK (video_box);
3234 if (GST_EVENT_TYPE (event) == GST_EVENT_NAVIGATION &&
3235 (video_box->box_left != 0 || video_box->box_top != 0)) {
3236 structure = gst_event_get_structure (event);
3237 event_name = gst_structure_get_string (structure, "event");
3240 (strcmp (event_name, "mouse-move") == 0 ||
3241 strcmp (event_name, "mouse-button-press") == 0 ||
3242 strcmp (event_name, "mouse-button-release") == 0)) {
3243 if (gst_structure_get_double (structure, "pointer_x", &pointer_x) &&
3244 gst_structure_get_double (structure, "pointer_y", &pointer_y)) {
3245 gdouble new_pointer_x, new_pointer_y;
3246 GstEvent *new_event;
3248 new_pointer_x = pointer_x + video_box->box_left;
3249 new_pointer_y = pointer_y + video_box->box_top;
3251 new_structure = gst_structure_copy (structure);
3252 gst_structure_set (new_structure,
3253 "pointer_x", G_TYPE_DOUBLE, (gdouble) (new_pointer_x),
3254 "pointer_y", G_TYPE_DOUBLE, (gdouble) (new_pointer_y), NULL);
3256 new_event = gst_event_new_navigation (new_structure);
3257 gst_event_unref (event);
3260 GST_WARNING_OBJECT (video_box, "Failed to read navigation event");
3264 GST_OBJECT_UNLOCK (video_box);
3266 return GST_BASE_TRANSFORM_CLASS (parent_class)->src_event (trans, event);
3270 gst_video_box_process (GstVideoBox * video_box, const guint8 * src,
3273 guint b_alpha = CLAMP (video_box->border_alpha * 256, 0, 256);
3274 guint i_alpha = CLAMP (video_box->alpha * 256, 0, 256);
3275 GstVideoBoxFill fill_type = video_box->fill_type;
3276 gint br, bl, bt, bb, crop_w, crop_h;
3281 br = video_box->box_right;
3282 bl = video_box->box_left;
3283 bt = video_box->box_top;
3284 bb = video_box->box_bottom;
3286 if (br >= 0 && bl >= 0) {
3287 crop_w = video_box->in_width - (br + bl);
3288 } else if (br >= 0 && bl < 0) {
3289 crop_w = video_box->in_width - (br);
3290 } else if (br < 0 && bl >= 0) {
3291 crop_w = video_box->in_width - (bl);
3292 } else if (br < 0 && bl < 0) {
3293 crop_w = video_box->in_width;
3296 if (bb >= 0 && bt >= 0) {
3297 crop_h = video_box->in_height - (bb + bt);
3298 } else if (bb >= 0 && bt < 0) {
3299 crop_h = video_box->in_height - (bb);
3300 } else if (bb < 0 && bt >= 0) {
3301 crop_h = video_box->in_height - (bt);
3302 } else if (bb < 0 && bt < 0) {
3303 crop_h = video_box->in_height;
3306 GST_DEBUG_OBJECT (video_box, "Borders are: L:%d, R:%d, T:%d, B:%d", bl, br,
3308 GST_DEBUG_OBJECT (video_box, "Alpha value is: %u (frame) %u (border)",
3311 if (crop_h < 0 || crop_w < 0) {
3312 video_box->fill (fill_type, b_alpha, video_box->out_format, dest,
3313 video_box->out_sdtv, video_box->out_width, video_box->out_height);
3314 } else if (bb == 0 && bt == 0 && br == 0 && bl == 0) {
3315 video_box->copy (i_alpha, video_box->out_format, dest, video_box->out_sdtv,
3316 video_box->out_width, video_box->out_height, 0, 0, video_box->in_format,
3317 src, video_box->in_sdtv, video_box->in_width, video_box->in_height, 0,
3320 gint src_x = 0, src_y = 0;
3321 gint dest_x = 0, dest_y = 0;
3323 /* Fill everything if a border should be added somewhere */
3324 if (bt < 0 || bb < 0 || br < 0 || bl < 0)
3325 video_box->fill (fill_type, b_alpha, video_box->out_format, dest,
3326 video_box->out_sdtv, video_box->out_width, video_box->out_height);
3343 video_box->copy (i_alpha, video_box->out_format, dest, video_box->out_sdtv,
3344 video_box->out_width, video_box->out_height, dest_x, dest_y,
3345 video_box->in_format, src, video_box->in_sdtv, video_box->in_width,
3346 video_box->in_height, src_x, src_y, crop_w, crop_h);
3349 GST_LOG_OBJECT (video_box, "image created");
3353 gst_video_box_before_transform (GstBaseTransform * trans, GstBuffer * in)
3355 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3356 GstClockTime timestamp, stream_time;
3358 timestamp = GST_BUFFER_TIMESTAMP (in);
3360 gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME, timestamp);
3362 GST_DEBUG_OBJECT (video_box, "sync to %" GST_TIME_FORMAT,
3363 GST_TIME_ARGS (timestamp));
3365 if (GST_CLOCK_TIME_IS_VALID (stream_time))
3366 gst_object_sync_values (GST_OBJECT (video_box), stream_time);
3369 static GstFlowReturn
3370 gst_video_box_transform (GstBaseTransform * trans, GstBuffer * in,
3373 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3374 const guint8 *indata;
3377 indata = GST_BUFFER_DATA (in);
3378 outdata = GST_BUFFER_DATA (out);
3380 g_mutex_lock (video_box->mutex);
3381 gst_video_box_process (video_box, indata, outdata);
3382 g_mutex_unlock (video_box->mutex);
3386 /* FIXME: 0.11 merge with videocrop plugin */
3388 plugin_init (GstPlugin * plugin)
3390 GST_DEBUG_CATEGORY_INIT (videobox_debug, "videobox", 0,
3391 "Resizes a video by adding borders or cropping");
3393 return gst_element_register (plugin, "videobox", GST_RANK_NONE,
3394 GST_TYPE_VIDEO_BOX);
3397 GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
3400 "resizes a video by adding borders or cropping",
3401 plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)