2 * Copyright (C) 1999 Erik Walthinsen <omega@cse.ogi.edu>
3 * Copyright (C) 2006 Tim-Philipp Müller <tim centricular net>
4 * Copyright (C) 2010 Sebastian Dröge <sebastian.droege@collabora.co.uk>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
19 * Boston, MA 02110-1301, USA.
22 * SECTION:element-videobox
23 * @see_also: #GstVideoCrop
25 * This plugin crops or enlarges the image. It takes 4 values as input, a
26 * top, bottom, left and right offset. Positive values will crop that much
27 * pixels from the respective border of the image, negative values will add
28 * that much pixels. When pixels are added, you can specify their color.
29 * Some predefined colors are usable with an enum property.
31 * The plugin is alpha channel aware and will try to negotiate with a format
32 * that supports alpha channels first. When alpha channel is active two
33 * other properties, alpha and border_alpha can be used to set the alpha
34 * values of the inner picture and the border respectively. an alpha value of
35 * 0.0 means total transparency, 1.0 is opaque.
37 * The videobox plugin has many uses such as doing a mosaic of pictures,
38 * letterboxing video, cutting out pieces of video, picture in picture, etc..
40 * Setting autocrop to true changes the behavior of the plugin so that
41 * caps determine crop properties rather than the other way around: given
42 * input and output dimensions, the crop values are selected so that the
43 * smaller frame is effectively centered in the larger frame. This
44 * involves either cropping or padding.
46 * If you use autocrop there is little point in setting the other
47 * properties manually because they will be overriden if the caps change,
48 * but nothing stops you from doing so.
52 * gst-launch-1.0 videotestsrc ! videobox autocrop=true ! \
53 * "video/x-raw, width=600, height=400" ! videoconvert ! ximagesink
61 #include "gstvideobox.h"
62 #include "gstvideoboxorc.h"
67 GST_DEBUG_CATEGORY_STATIC (videobox_debug);
68 #define GST_CAT_DEFAULT videobox_debug
70 /* From videotestsrc.c */
71 static const guint8 yuv_sdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
72 { 16, 145, 41, 81, 210, 235 };
73 static const guint8 yuv_sdtv_colors_U[VIDEO_BOX_FILL_LAST] =
74 { 128, 54, 240, 90, 16, 128 };
75 static const guint8 yuv_sdtv_colors_V[VIDEO_BOX_FILL_LAST] =
76 { 128, 34, 110, 240, 146, 128 };
78 static const guint8 yuv_hdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
79 { 16, 173, 32, 63, 219, 235 };
80 static const guint8 yuv_hdtv_colors_U[VIDEO_BOX_FILL_LAST] =
81 { 128, 42, 240, 102, 16, 128 };
82 static const guint8 yuv_hdtv_colors_V[VIDEO_BOX_FILL_LAST] =
83 { 128, 26, 118, 240, 138, 128 };
85 static const guint8 rgb_colors_R[VIDEO_BOX_FILL_LAST] =
86 { 0, 0, 0, 255, 255, 255 };
87 static const guint8 rgb_colors_G[VIDEO_BOX_FILL_LAST] =
88 { 0, 255, 0, 0, 255, 255 };
89 static const guint8 rgb_colors_B[VIDEO_BOX_FILL_LAST] =
90 { 0, 0, 255, 0, 0, 255 };
92 /* Generated by -bad/ext/cog/generate_tables */
93 static const int cog_ycbcr_to_rgb_matrix_8bit_hdtv[] = {
95 298, -55, -136, 19681,
99 static const int cog_ycbcr_to_rgb_matrix_8bit_sdtv[] = {
101 298, -100, -208, 34707,
105 static const gint cog_rgb_to_ycbcr_matrix_8bit_hdtv[] = {
107 -26, -87, 112, 32768,
108 112, -102, -10, 32768,
111 static const gint cog_rgb_to_ycbcr_matrix_8bit_sdtv[] = {
113 -38, -74, 112, 32768,
114 112, -94, -18, 32768,
117 static const gint cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit[] = {
118 256, -30, -53, 10600,
123 static const gint cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit[] = {
129 static const gint cog_identity_matrix_8bit[] = {
135 #define APPLY_MATRIX(m,o,v1,v2,v3) ((m[o*4] * v1 + m[o*4+1] * v2 + m[o*4+2] * v3 + m[o*4+3]) >> 8)
138 fill_ayuv (GstVideoBoxFill fill_type, guint b_alpha,
139 GstVideoFrame * frame, gboolean sdtv)
146 width = GST_VIDEO_FRAME_WIDTH (frame);
147 height = GST_VIDEO_FRAME_HEIGHT (frame);
149 b_alpha = CLAMP (b_alpha, 0, 255);
152 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
153 (yuv_sdtv_colors_Y[fill_type] << 16) |
154 (yuv_sdtv_colors_U[fill_type] << 8) | yuv_sdtv_colors_V[fill_type]);
156 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
157 (yuv_hdtv_colors_Y[fill_type] << 16) |
158 (yuv_hdtv_colors_U[fill_type] << 8) | yuv_hdtv_colors_V[fill_type]);
160 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
161 stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
163 if (G_LIKELY (stride == 4 * width))
164 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
166 for (; height; --height) {
167 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width);
174 copy_ayuv_ayuv (guint i_alpha, GstVideoFrame * dest_frame,
175 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
176 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
183 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
184 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
186 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
187 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
189 dest = dest + dest_y * dest_stride + dest_x * 4;
190 src = src + src_y * src_stride + src_x * 4;
194 if (dest_sdtv != src_sdtv) {
199 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
200 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
202 for (i = 0; i < h; i++) {
203 for (j = 0; j < w; j += 4) {
205 dest[j] = (src[j] * i_alpha) >> 8;
209 dest[j + 1] = APPLY_MATRIX (matrix, 0, y, u, v);
210 dest[j + 2] = APPLY_MATRIX (matrix, 1, y, u, v);
211 dest[j + 3] = APPLY_MATRIX (matrix, 2, y, u, v);
217 for (i = 0; i < h; i++) {
218 for (j = 0; j < w; j += 4) {
220 dest[j] = (src[j] * i_alpha) >> 8;
221 dest[j + 1] = src[j + 1];
222 dest[j + 2] = src[j + 2];
223 dest[j + 3] = src[j + 3];
232 copy_ayuv_i420 (guint i_alpha, GstVideoFrame * dest_frame,
233 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
234 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
237 guint8 *destY, *destY2, *destU, *destV;
238 gint dest_strideY, dest_strideU, dest_strideV;
247 gint dest_height, src_height, dest_width;
249 dest_height = GST_VIDEO_FRAME_HEIGHT (dest_frame);
250 dest_width = GST_VIDEO_FRAME_WIDTH (dest_frame);
251 src_height = GST_VIDEO_FRAME_HEIGHT (src_frame);
253 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest_frame, 0);
254 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest_frame, 1);
255 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest_frame, 2);
257 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
259 destY = GST_VIDEO_FRAME_COMP_DATA (dest_frame, 0);
260 destU = GST_VIDEO_FRAME_COMP_DATA (dest_frame, 1);
261 destV = GST_VIDEO_FRAME_COMP_DATA (dest_frame, 2);
263 destY = destY + dest_y * dest_strideY + dest_x;
264 destY2 = (dest_y < dest_height) ? destY + dest_strideY : destY;
265 destU = destU + (dest_y / 2) * dest_strideU + dest_x / 2;
266 destV = destV + (dest_y / 2) * dest_strideV + dest_x / 2;
268 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
269 src = src + src_y * src_stride + src_x * 4;
270 src2 = (src_y < src_height) ? src + src_stride : src;
275 if (src_sdtv != dest_sdtv)
277 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
278 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
280 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
282 /* 1. Handle the first destination scanline specially if it
283 * doesn't start at the macro pixel boundary, i.e. blend
284 * with the background! */
285 if (dest_y % 2 == 1) {
286 /* 1.1. Handle the first destination pixel if it doesn't
287 * start at the macro pixel boundary, i.e. blend with
289 if (dest_x % 2 == 1) {
294 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
296 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
299 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
309 /* 1.2. Copy all macro pixels from the source to the destination
310 * but blend with the background because we're only filling
311 * the lower part of the macro pixels. */
312 for (; j < w - 1; j += 2) {
313 y1 = src[4 * y_idx + 1];
314 y2 = src[4 * y_idx + 4 + 1];
316 u1 = src[4 * y_idx + 2];
317 u2 = src[4 * y_idx + 4 + 2];
319 v1 = src[4 * y_idx + 3];
320 v2 = src[4 * y_idx + 4 + 3];
322 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
323 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
324 destU[uv_idx] = CLAMP (
325 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
326 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
327 destV[uv_idx] = CLAMP (
328 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
329 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
335 /* 1.3. Now copy the last pixel if one exists and blend it
336 * with the background because we only fill part of
337 * the macro pixel. In case this is the last pixel of
338 * the destination we will a larger part. */
339 if (j == w - 1 && j == dest_width - 1) {
340 y1 = src[4 * y_idx + 1];
341 u1 = src[4 * y_idx + 2];
342 v1 = src[4 * y_idx + 3];
344 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
345 destU[uv_idx] = CLAMP (
346 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
347 destV[uv_idx] = CLAMP (
348 (destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
349 } else if (j == w - 1) {
350 y1 = src[4 * y_idx + 1];
351 u1 = src[4 * y_idx + 2];
352 v1 = src[4 * y_idx + 3];
354 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
355 destU[uv_idx] = CLAMP (
356 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
359 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4,
363 destY += dest_strideY;
364 destY2 += dest_strideY;
365 destU += dest_strideU;
366 destV += dest_strideV;
374 /* 2. Copy all macro pixel scanlines, the destination scanline
375 * now starts at macro pixel boundary. */
376 for (; i < h - 1; i += 2) {
377 /* 2.1. Handle the first destination pixel if it doesn't
378 * start at the macro pixel boundary, i.e. blend with
380 if (dest_x % 2 == 1) {
382 y2 = src2[4 * 0 + 1];
384 u2 = src2[4 * 0 + 2];
386 v2 = src2[4 * 0 + 3];
388 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
389 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
391 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
392 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
394 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
395 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
403 /* 2.2. Copy all macro pixels from the source to the destination.
404 * All pixels now start at macro pixel boundary, i.e. no
405 * blending with the background is necessary. */
406 for (; j < w - 1; j += 2) {
407 y1 = src[4 * y_idx + 1];
408 y2 = src[4 * y_idx + 4 + 1];
409 y3 = src2[4 * y_idx + 1];
410 y4 = src2[4 * y_idx + 4 + 1];
412 u1 = src[4 * y_idx + 2];
413 u2 = src[4 * y_idx + 4 + 2];
414 u3 = src2[4 * y_idx + 2];
415 u4 = src2[4 * y_idx + 4 + 2];
417 v1 = src[4 * y_idx + 3];
418 v2 = src[4 * y_idx + 4 + 3];
419 v3 = src2[4 * y_idx + 3];
420 v4 = src2[4 * y_idx + 4 + 3];
422 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
423 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
424 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
425 destY2[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
427 destU[uv_idx] = CLAMP (
428 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
429 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
430 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
431 destV[uv_idx] = CLAMP (
432 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
433 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
434 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
440 /* 2.3. Now copy the last pixel if one exists and blend it
441 * with the background because we only fill part of
442 * the macro pixel. In case this is the last pixel of
443 * the destination we will a larger part. */
444 if (j == w - 1 && j == dest_width - 1) {
445 y1 = src[4 * y_idx + 1];
446 y2 = src2[4 * y_idx + 1];
448 u1 = src[4 * y_idx + 2];
449 u2 = src2[4 * y_idx + 2];
451 v1 = src[4 * y_idx + 3];
452 v2 = src2[4 * y_idx + 3];
454 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
455 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
456 destU[uv_idx] = CLAMP (
457 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
458 u2, v2)) / 2, 0, 255);
459 destV[uv_idx] = CLAMP (
460 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
461 u2, v2)) / 2, 0, 255);
462 } else if (j == w - 1) {
463 y1 = src[4 * y_idx + 1];
464 y2 = src2[4 * y_idx + 1];
466 u1 = src[4 * y_idx + 2];
467 u2 = src2[4 * y_idx + 2];
469 v1 = src[4 * y_idx + 3];
470 v2 = src2[4 * y_idx + 3];
472 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
473 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
474 destU[uv_idx] = CLAMP (
475 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
476 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
477 destV[uv_idx] = CLAMP (
478 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
479 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
482 destY += 2 * dest_strideY;
483 destY2 += 2 * dest_strideY;
484 destU += dest_strideU;
485 destV += dest_strideV;
486 src += 2 * src_stride;
487 src2 += 2 * src_stride;
490 /* 3. Handle the last scanline if one exists. This again
491 * doesn't start at macro pixel boundary but should
492 * only fill the upper part of the macro pixels. */
493 if (i == h - 1 && i == dest_height - 1) {
494 /* 3.1. Handle the first destination pixel if it doesn't
495 * start at the macro pixel boundary, i.e. blend with
497 if (dest_x % 2 == 1) {
502 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
504 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
506 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
515 /* 3.2. Copy all macro pixels from the source to the destination
516 * but blend with the background because we're only filling
517 * the upper part of the macro pixels. */
518 for (; j < w - 1; j += 2) {
519 y1 = src[4 * y_idx + 1];
520 y2 = src[4 * y_idx + 4 + 1];
522 u1 = src[4 * y_idx + 2];
523 u2 = src[4 * y_idx + 4 + 2];
525 v1 = src[4 * y_idx + 3];
526 v2 = src[4 * y_idx + 4 + 3];
528 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
529 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
531 destU[uv_idx] = CLAMP (
532 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
533 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
534 destV[uv_idx] = CLAMP (
535 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
536 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
542 /* 3.3. Now copy the last pixel if one exists and blend it
543 * with the background because we only fill part of
544 * the macro pixel. In case this is the last pixel of
545 * the destination we will a larger part. */
546 if (j == w - 1 && j == dest_width - 1) {
547 y1 = src[4 * y_idx + 1];
548 u1 = src[4 * y_idx + 2];
549 v1 = src[4 * y_idx + 3];
551 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
552 destU[uv_idx] = CLAMP (
553 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
554 destV[uv_idx] = CLAMP (
555 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
556 } else if (j == w - 1) {
557 y1 = src[4 * y_idx + 1];
558 u1 = src[4 * y_idx + 2];
559 v1 = src[4 * y_idx + 3];
561 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
562 destU[uv_idx] = CLAMP (
563 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
566 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
569 } else if (i == h - 1) {
570 /* 3.1. Handle the first destination pixel if it doesn't
571 * start at the macro pixel boundary, i.e. blend with
573 if (dest_x % 2 == 1) {
578 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
580 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
583 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
593 /* 3.2. Copy all macro pixels from the source to the destination
594 * but blend with the background because we're only filling
595 * the upper part of the macro pixels. */
596 for (; j < w - 1; j += 2) {
597 y1 = src[4 * y_idx + 1];
598 y2 = src[4 * y_idx + 4 + 1];
600 u1 = src[4 * y_idx + 2];
601 u2 = src[4 * y_idx + 4 + 2];
603 v1 = src[4 * y_idx + 3];
604 v2 = src[4 * y_idx + 4 + 3];
606 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
607 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
609 destU[uv_idx] = CLAMP (
610 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
611 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
612 destV[uv_idx] = CLAMP (
613 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
614 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
620 /* 3.3. Now copy the last pixel if one exists and blend it
621 * with the background because we only fill part of
622 * the macro pixel. In case this is the last pixel of
623 * the destination we will a larger part. */
624 if (j == w - 1 && j == dest_width - 1) {
625 y1 = src[4 * y_idx + 1];
626 u1 = src[4 * y_idx + 2];
627 v1 = src[4 * y_idx + 3];
629 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
630 destU[uv_idx] = CLAMP (
631 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
632 destV[uv_idx] = CLAMP (
633 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
634 } else if (j == w - 1) {
635 y1 = src[4 * y_idx + 1];
636 u1 = src[4 * y_idx + 2];
637 v1 = src[4 * y_idx + 3];
639 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
640 destU[uv_idx] = CLAMP (
641 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
644 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
651 fill_planar_yuv (GstVideoBoxFill fill_type, guint b_alpha,
652 GstVideoFrame * frame, gboolean sdtv)
654 guint8 empty_pixel[3];
655 guint8 *destY, *destU, *destV;
656 gint strideY, strideU, strideV;
657 gint heightY, heightU, heightV;
658 gint widthY, widthU, widthV;
661 empty_pixel[0] = yuv_sdtv_colors_Y[fill_type];
662 empty_pixel[1] = yuv_sdtv_colors_U[fill_type];
663 empty_pixel[2] = yuv_sdtv_colors_V[fill_type];
665 empty_pixel[0] = yuv_hdtv_colors_Y[fill_type];
666 empty_pixel[1] = yuv_hdtv_colors_U[fill_type];
667 empty_pixel[2] = yuv_hdtv_colors_V[fill_type];
670 strideY = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
671 strideU = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1);
672 strideV = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2);
674 destY = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
675 destU = GST_VIDEO_FRAME_COMP_DATA (frame, 1);
676 destV = GST_VIDEO_FRAME_COMP_DATA (frame, 2);
678 widthY = GST_VIDEO_FRAME_COMP_WIDTH (frame, 0);
679 widthU = GST_VIDEO_FRAME_COMP_WIDTH (frame, 1);
680 widthV = GST_VIDEO_FRAME_COMP_WIDTH (frame, 2);
682 heightY = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 0);
683 heightU = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 1);
684 heightV = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 2);
686 if (strideY == widthY) {
687 memset (destY, empty_pixel[0], strideY * heightY);
688 } else if (heightY) {
689 for (; heightY; --heightY) {
690 memset (destY, empty_pixel[0], widthY);
694 if (strideU == widthU) {
695 memset (destU, empty_pixel[1], strideU * heightU);
696 } else if (heightU) {
697 for (; heightU; --heightU) {
698 memset (destU, empty_pixel[1], widthU);
702 if (strideV == widthV) {
703 memset (destV, empty_pixel[2], strideV * heightV);
704 } else if (heightV) {
705 for (; heightV; --heightV) {
706 memset (destV, empty_pixel[2], widthV);
713 copy_y444_y444 (guint i_alpha, GstVideoFrame * dest,
714 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
715 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
718 guint8 *destY, *destU, *destV;
719 const guint8 *srcY, *srcU, *srcV;
720 gint dest_strideY, dest_strideU, dest_strideV;
721 gint src_strideY, src_strideU, src_strideV;
723 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
724 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
725 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
727 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
728 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
729 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
731 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
732 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
733 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
735 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
736 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
737 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
739 destY = destY + dest_y * dest_strideY + dest_x;
740 destU = destU + dest_y * dest_strideU + dest_x;
741 destV = destV + dest_y * dest_strideV + dest_x;
743 srcY = srcY + src_y * src_strideY + src_x;
744 srcU = srcU + src_y * src_strideU + src_x;
745 srcV = srcV + src_y * src_strideV + src_x;
747 if (src_sdtv != dest_sdtv) {
752 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
753 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
755 for (i = 0; i < h; i++) {
756 for (j = 0; j < w; j++) {
757 y = APPLY_MATRIX (matrix, 0, srcY[j], srcU[j], srcV[j]);
758 u = APPLY_MATRIX (matrix, 1, srcY[j], srcU[j], srcV[j]);
759 v = APPLY_MATRIX (matrix, 2, srcY[j], srcU[j], srcV[j]);
765 destY += dest_strideY;
766 destU += dest_strideU;
767 destV += dest_strideV;
774 for (i = 0; i < h; i++) {
775 memcpy (destY, srcY, w);
776 memcpy (destU, srcU, w);
777 memcpy (destV, srcV, w);
779 destY += dest_strideY;
780 destU += dest_strideU;
781 destV += dest_strideV;
791 copy_y42b_y42b (guint i_alpha, GstVideoFrame * dest,
792 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
793 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
796 guint8 *destY, *destU, *destV;
797 const guint8 *srcY, *srcU, *srcV;
798 gint dest_strideY, dest_strideU, dest_strideV;
799 gint src_strideY, src_strideU, src_strideV;
800 gint src_y_idx, src_uv_idx;
801 gint dest_y_idx, dest_uv_idx;
808 dest_width = GST_VIDEO_FRAME_WIDTH (dest);
810 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
811 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
812 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
814 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
815 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
816 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
818 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
819 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
820 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
822 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
823 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
824 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
826 destY = destY + dest_y * dest_strideY + dest_x;
827 destU = destU + dest_y * dest_strideU + dest_x / 2;
828 destV = destV + dest_y * dest_strideV + dest_x / 2;
830 srcY = srcY + src_y * src_strideY + src_x;
831 srcU = srcU + src_y * src_strideU + src_x / 2;
832 srcV = srcV + src_y * src_strideV + src_x / 2;
837 if (src_sdtv != dest_sdtv)
839 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
840 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
842 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
844 /* 1. Copy all macro pixel scanlines, the destination scanline
845 * now starts at macro pixel boundary. */
846 for (i = dest_y; i < h; i++) {
847 /* 1.1. Handle the first destination pixel if it doesn't
848 * start at the macro pixel boundary, i.e. blend with
850 if (dest_x % 2 == 1) {
855 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
857 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
859 (destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
861 src_y_idx = dest_y_idx = dest_uv_idx = 1;
862 src_uv_idx = (src_x % 2) + 1;
865 src_y_idx = dest_y_idx = dest_uv_idx = 0;
866 src_uv_idx = (src_x % 2);
869 /* 1.2. Copy all macro pixels from the source to the destination.
870 * All pixels now start at macro pixel boundary, i.e. no
871 * blending with the background is necessary. */
872 for (; j < w - 1; j += 2) {
873 y1 = srcY[src_y_idx];
874 y2 = srcY[src_y_idx + 1];
876 u1 = srcU[src_uv_idx / 2];
877 v1 = srcV[src_uv_idx / 2];
879 u2 = srcU[src_uv_idx / 2];
880 v2 = srcV[src_uv_idx / 2];
883 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
884 destY[dest_y_idx + 1] =
885 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
887 destU[dest_uv_idx] = CLAMP (
888 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
889 u2, v2)) / 2, 0, 255);
890 destV[dest_uv_idx] = CLAMP (
891 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
892 u2, v2)) / 2, 0, 255);
899 /* 1.3. Now copy the last pixel if one exists and blend it
900 * with the background because we only fill part of
901 * the macro pixel. In case this is the last pixel of
902 * the destination we will a larger part. */
903 if (j == w - 1 && j == dest_width - 1) {
904 y1 = srcY[src_y_idx];
905 u1 = srcU[src_uv_idx / 2];
906 v1 = srcV[src_uv_idx / 2];
908 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
909 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
910 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
911 } else if (j == w - 1) {
912 y1 = srcY[src_y_idx];
913 u1 = srcU[src_uv_idx / 2];
914 v1 = srcV[src_uv_idx / 2];
916 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
917 destU[dest_uv_idx] = CLAMP (
918 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
920 destV[dest_uv_idx] = CLAMP (
921 (destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
925 destY += dest_strideY;
926 destU += dest_strideU;
927 destV += dest_strideV;
936 copy_y41b_y41b (guint i_alpha, GstVideoFrame * dest,
937 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
938 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
941 guint8 *destY, *destU, *destV;
942 const guint8 *srcY, *srcU, *srcV;
943 gint dest_strideY, dest_strideU, dest_strideV;
944 gint src_strideY, src_strideU, src_strideV;
945 gint src_y_idx, src_uv_idx;
946 gint dest_y_idx, dest_uv_idx;
953 dest_width = GST_VIDEO_FRAME_WIDTH (dest);
955 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
956 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
957 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
959 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
960 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
961 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
963 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
964 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
965 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
967 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
968 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
969 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
971 destY = destY + dest_y * dest_strideY + dest_x;
972 destU = destU + dest_y * dest_strideU + dest_x / 4;
973 destV = destV + dest_y * dest_strideV + dest_x / 4;
975 srcY = srcY + src_y * src_strideY + src_x;
976 srcU = srcU + src_y * src_strideU + src_x / 4;
977 srcV = srcV + src_y * src_strideV + src_x / 4;
982 if (src_sdtv != dest_sdtv)
984 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
985 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
987 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
989 /* 1. Copy all macro pixel scanlines, the destination scanline
990 * now starts at macro pixel boundary. */
991 for (i = dest_y; i < h; i++) {
992 /* 1.1. Handle the first destination pixel if it doesn't
993 * start at the macro pixel boundary, i.e. blend with
995 if (dest_x % 4 == 1) {
1002 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1003 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1004 destY[2] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1007 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1008 v1) + APPLY_MATRIX (matrix, 1, y2, u1,
1009 v1) + APPLY_MATRIX (matrix, 1, y3, u1, v1)) / 4, 0, 255);
1011 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1012 v1) + APPLY_MATRIX (matrix, 2, y2, u1,
1013 v1) + APPLY_MATRIX (matrix, 2, y3, u1, v1)) / 4, 0, 255);
1016 src_y_idx = dest_y_idx = 3;
1018 src_uv_idx = (src_x % 4) + 3;
1019 } else if (dest_x % 4 == 2) {
1025 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1026 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1029 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1030 v1) + APPLY_MATRIX (matrix, 1, y2, u1, v1)) / 4, 0, 255);
1032 CLAMP ((2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1033 v1) + APPLY_MATRIX (matrix, 2, y2, u1, v1)) / 4, 0, 255);
1036 src_y_idx = dest_y_idx = 2;
1038 src_uv_idx = (src_x % 4) + 2;
1039 } else if (dest_x % 4 == 3) {
1044 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1047 (3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0, 255);
1049 (3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0, 255);
1052 src_y_idx = dest_y_idx = 1;
1054 src_uv_idx = (src_x % 4) + 1;
1057 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1058 src_uv_idx = (src_x % 4);
1061 /* 1.2. Copy all macro pixels from the source to the destination.
1062 * All pixels now start at macro pixel boundary, i.e. no
1063 * blending with the background is necessary. */
1064 for (; j < w - 3; j += 4) {
1065 y1 = srcY[src_y_idx];
1066 y2 = srcY[src_y_idx + 1];
1067 y3 = srcY[src_y_idx + 2];
1068 y4 = srcY[src_y_idx + 3];
1070 u1 = srcU[src_uv_idx / 4];
1071 v1 = srcV[src_uv_idx / 4];
1073 u2 = srcU[src_uv_idx / 4];
1074 v2 = srcV[src_uv_idx / 4];
1076 u3 = srcU[src_uv_idx / 4];
1077 v3 = srcV[src_uv_idx / 4];
1079 u4 = srcU[src_uv_idx / 4];
1080 v4 = srcV[src_uv_idx / 4];
1083 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1084 destY[dest_y_idx + 1] =
1085 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1086 destY[dest_y_idx + 2] =
1087 CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1088 destY[dest_y_idx + 3] =
1089 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1091 destU[dest_uv_idx] = CLAMP (
1092 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1093 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1094 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1095 destV[dest_uv_idx] =
1096 CLAMP ((APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix,
1097 2, y2, u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1098 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1105 /* 1.3. Now copy the last pixel if one exists and blend it
1106 * with the background because we only fill part of
1107 * the macro pixel. In case this is the last pixel of
1108 * the destination we will a larger part. */
1109 if (j == w - 1 && j == dest_width - 1) {
1110 y1 = srcY[src_y_idx];
1111 u1 = srcU[src_uv_idx / 4];
1112 v1 = srcV[src_uv_idx / 4];
1114 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1115 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1116 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1117 } else if (j == w - 1) {
1118 y1 = srcY[src_y_idx];
1119 u1 = srcU[src_uv_idx / 4];
1120 v1 = srcV[src_uv_idx / 4];
1122 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1123 destU[dest_uv_idx] = CLAMP (
1124 (destU[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1126 destV[dest_uv_idx] = CLAMP (
1127 (destV[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1129 } else if (j == w - 2 && j == dest_width - 2) {
1130 y1 = srcY[src_y_idx];
1131 y2 = srcY[src_y_idx + 1];
1132 u1 = srcU[src_uv_idx / 4];
1133 v1 = srcV[src_uv_idx / 4];
1135 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1136 destY[dest_y_idx + 1] =
1137 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1138 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1139 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1140 } else if (j == w - 2) {
1141 y1 = srcY[src_y_idx];
1142 y2 = srcY[src_y_idx + 1];
1143 u1 = srcU[src_uv_idx / 4];
1144 v1 = srcV[src_uv_idx / 4];
1146 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1147 destY[dest_y_idx + 1] =
1148 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1149 destU[dest_uv_idx] =
1150 CLAMP ((destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1152 destV[dest_uv_idx] =
1153 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1155 } else if (j == w - 3 && j == dest_width - 3) {
1156 y1 = srcY[src_y_idx];
1157 y2 = srcY[src_y_idx + 1];
1158 y3 = srcY[src_y_idx + 2];
1159 u1 = srcU[src_uv_idx / 4];
1160 v1 = srcV[src_uv_idx / 4];
1162 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1163 destY[dest_y_idx + 1] =
1164 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1165 destY[dest_y_idx + 2] =
1166 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1167 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1168 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1169 } else if (j == w - 3) {
1170 y1 = srcY[src_y_idx];
1171 y2 = srcY[src_y_idx + 1];
1172 y3 = srcY[src_y_idx + 2];
1173 u1 = srcU[src_uv_idx / 4];
1174 v1 = srcV[src_uv_idx / 4];
1176 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1177 destY[dest_y_idx + 1] =
1178 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1179 destY[dest_y_idx + 2] =
1180 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1181 destU[dest_uv_idx] =
1182 CLAMP ((3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1184 destV[dest_uv_idx] =
1185 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1189 destY += dest_strideY;
1190 destU += dest_strideU;
1191 destV += dest_strideV;
1192 srcY += src_strideY;
1193 srcU += src_strideU;
1194 srcV += src_strideV;
1199 copy_i420_i420 (guint i_alpha, GstVideoFrame * dest,
1200 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
1201 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1204 guint8 *destY, *destU, *destV;
1205 const guint8 *srcY, *srcU, *srcV;
1207 const guint8 *srcY2, *srcU2, *srcV2;
1208 gint dest_strideY, dest_strideU, dest_strideV;
1209 gint src_strideY, src_strideU, src_strideV;
1210 gint src_y_idx, src_uv_idx;
1211 gint dest_y_idx, dest_uv_idx;
1213 gint y1, y2, y3, y4;
1214 gint u1, u2, u3, u4;
1215 gint v1, v2, v3, v4;
1216 gint dest_width, dest_height;
1218 dest_width = GST_VIDEO_FRAME_WIDTH (dest);
1219 dest_height = GST_VIDEO_FRAME_HEIGHT (dest);
1221 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
1222 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
1223 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
1225 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
1226 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
1227 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
1229 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
1230 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
1231 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
1233 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
1234 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
1235 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
1237 destY = destY + dest_y * dest_strideY + dest_x;
1238 destU = destU + (dest_y / 2) * dest_strideU + dest_x / 2;
1239 destV = destV + (dest_y / 2) * dest_strideV + dest_x / 2;
1241 srcY = srcY + src_y * src_strideY + src_x;
1242 srcU = srcU + (src_y / 2) * src_strideU + src_x / 2;
1243 srcV = srcV + (src_y / 2) * src_strideV + src_x / 2;
1245 destY2 = destY + dest_strideY;
1246 srcY2 = srcY + src_strideY;
1251 if (src_sdtv != dest_sdtv)
1253 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1254 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1256 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
1258 /* 1. Handle the first destination scanline specially if it
1259 * doesn't start at the macro pixel boundary, i.e. blend
1260 * with the background! */
1261 if (dest_y % 2 == 1) {
1262 /* 1.1. Handle the first destination pixel if it doesn't
1263 * start at the macro pixel boundary, i.e. blend with
1264 * the background! */
1265 if (dest_x % 2 == 1) {
1270 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1272 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1275 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1279 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1280 src_uv_idx = (src_x % 2) + 1;
1283 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1284 src_uv_idx = (src_x % 2);
1287 /* 1.2. Copy all macro pixels from the source to the destination
1288 * but blend with the background because we're only filling
1289 * the lower part of the macro pixels. */
1290 for (; j < w - 1; j += 2) {
1291 y1 = srcY[src_y_idx];
1292 y2 = srcY[src_y_idx + 1];
1294 u1 = srcU[src_uv_idx / 2];
1295 v1 = srcV[src_uv_idx / 2];
1297 u2 = srcU[src_uv_idx / 2];
1298 v2 = srcV[src_uv_idx / 2];
1301 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1302 destY[dest_y_idx + 1] =
1303 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1304 destU[dest_uv_idx] =
1305 CLAMP ((2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1306 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1307 destV[dest_uv_idx] =
1308 CLAMP ((2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1309 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1316 /* 1.3. Now copy the last pixel if one exists and blend it
1317 * with the background because we only fill part of
1318 * the macro pixel. In case this is the last pixel of
1319 * the destination we will a larger part. */
1320 if (j == w - 1 && j == dest_width - 1) {
1321 y1 = srcY[src_y_idx];
1322 u1 = srcU[src_uv_idx / 2];
1323 v1 = srcV[src_uv_idx / 2];
1325 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1326 destU[dest_uv_idx] = CLAMP (
1327 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1329 destV[dest_uv_idx] =
1330 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1332 } else if (j == w - 1) {
1333 y1 = srcY[src_y_idx];
1334 u1 = srcU[src_uv_idx / 2];
1335 v1 = srcV[src_uv_idx / 2];
1337 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1338 destU[dest_uv_idx] = CLAMP (
1339 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1341 destV[dest_uv_idx] =
1342 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1346 destY += dest_strideY;
1347 destY2 += dest_strideY;
1348 destU += dest_strideU;
1349 destV += dest_strideV;
1350 srcY += src_strideY;
1351 srcY2 += src_strideY;
1353 if (src_y % 2 == 0) {
1354 srcU += src_strideU;
1355 srcV += src_strideV;
1362 /* 2. Copy all macro pixel scanlines, the destination scanline
1363 * now starts at macro pixel boundary. */
1364 for (; i < h - 1; i += 2) {
1365 /* 2.1. Handle the first destination pixel if it doesn't
1366 * start at the macro pixel boundary, i.e. blend with
1367 * the background! */
1371 if (src_y % 2 == 1) {
1372 srcU2 += src_strideU;
1373 srcV2 += src_strideV;
1376 if (dest_x % 2 == 1) {
1384 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1385 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1387 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1388 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1390 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1391 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1393 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1394 src_uv_idx = (src_x % 2) + 1;
1397 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1398 src_uv_idx = (src_x % 2);
1401 /* 2.2. Copy all macro pixels from the source to the destination.
1402 * All pixels now start at macro pixel boundary, i.e. no
1403 * blending with the background is necessary. */
1404 for (; j < w - 1; j += 2) {
1405 y1 = srcY[src_y_idx];
1406 y2 = srcY[src_y_idx + 1];
1407 y3 = srcY2[src_y_idx];
1408 y4 = srcY2[src_y_idx + 1];
1410 u1 = srcU[src_uv_idx / 2];
1411 u3 = srcU2[src_uv_idx / 2];
1412 v1 = srcV[src_uv_idx / 2];
1413 v3 = srcV2[src_uv_idx / 2];
1415 u2 = srcU[src_uv_idx / 2];
1416 u4 = srcU2[src_uv_idx / 2];
1417 v2 = srcV[src_uv_idx / 2];
1418 v4 = srcV2[src_uv_idx / 2];
1421 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1422 destY[dest_y_idx + 1] =
1423 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1424 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1425 destY2[dest_y_idx + 1] =
1426 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1428 destU[dest_uv_idx] = CLAMP (
1429 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1430 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1431 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1432 destV[dest_uv_idx] = CLAMP (
1433 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1434 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1435 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1442 /* 2.3. Now copy the last pixel if one exists and blend it
1443 * with the background because we only fill part of
1444 * the macro pixel. In case this is the last pixel of
1445 * the destination we will a larger part. */
1446 if (j == w - 1 && j == dest_width - 1) {
1447 y1 = srcY[src_y_idx];
1448 y2 = srcY2[src_y_idx];
1450 u1 = srcU[src_uv_idx / 2];
1451 u2 = srcU2[src_uv_idx / 2];
1453 v1 = srcV[src_uv_idx / 2];
1454 v2 = srcV2[src_uv_idx / 2];
1456 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1457 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1458 destU[dest_uv_idx] = CLAMP (
1459 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1460 u2, v2)) / 2, 0, 255);
1461 destV[dest_uv_idx] = CLAMP (
1462 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1463 u2, v2)) / 2, 0, 255);
1464 } else if (j == w - 1) {
1465 y1 = srcY[src_y_idx];
1466 y2 = srcY2[src_y_idx];
1468 u1 = srcU[src_uv_idx / 2];
1469 u2 = srcU2[src_uv_idx / 2];
1471 v1 = srcV[src_uv_idx / 2];
1472 v2 = srcV2[src_uv_idx / 2];
1474 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1475 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1476 destU[dest_uv_idx] = CLAMP (
1477 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1478 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1479 destV[dest_uv_idx] = CLAMP (
1480 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1481 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1484 destY += 2 * dest_strideY;
1485 destY2 += 2 * dest_strideY;
1486 destU += dest_strideU;
1487 destV += dest_strideV;
1488 srcY += 2 * src_strideY;
1489 srcY2 += 2 * src_strideY;
1492 srcU += src_strideU;
1493 srcV += src_strideV;
1496 /* 3. Handle the last scanline if one exists. This again
1497 * doesn't start at macro pixel boundary but should
1498 * only fill the upper part of the macro pixels. */
1499 if (i == h - 1 && i == dest_height - 1) {
1500 /* 3.1. Handle the first destination pixel if it doesn't
1501 * start at the macro pixel boundary, i.e. blend with
1502 * the background! */
1503 if (dest_x % 2 == 1) {
1508 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1510 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
1512 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
1515 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1516 src_uv_idx = (src_x % 2) + 1;
1519 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1520 src_uv_idx = (src_x % 2);
1523 /* 3.2. Copy all macro pixels from the source to the destination
1524 * but blend with the background because we're only filling
1525 * the upper part of the macro pixels. */
1526 for (; j < w - 1; j += 2) {
1527 y1 = srcY[src_y_idx];
1528 y2 = srcY[src_y_idx + 1];
1530 u1 = srcU[src_uv_idx / 2];
1531 v1 = srcV[src_uv_idx / 2];
1533 u2 = srcU[src_uv_idx / 2];
1534 v2 = srcV[src_uv_idx / 2];
1537 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1538 destY[dest_y_idx + 1] =
1539 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1541 destU[dest_uv_idx] = CLAMP (
1542 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1543 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1544 destV[dest_uv_idx] = CLAMP (
1545 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1546 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1553 /* 3.3. Now copy the last pixel if one exists and blend it
1554 * with the background because we only fill part of
1555 * the macro pixel. In case this is the last pixel of
1556 * the destination we will a larger part. */
1557 if (j == w - 1 && j == dest_width - 1) {
1558 y1 = srcY[src_y_idx];
1559 u1 = srcU[src_uv_idx / 2];
1560 v1 = srcV[src_uv_idx / 2];
1562 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1563 destU[dest_uv_idx] = CLAMP (
1564 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1566 destV[dest_uv_idx] =
1567 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1569 } else if (j == w - 1) {
1570 y1 = srcY[src_y_idx];
1571 u1 = srcU[src_uv_idx / 2];
1572 v1 = srcV[src_uv_idx / 2];
1574 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1575 destU[dest_uv_idx] = CLAMP (
1576 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1578 destV[dest_uv_idx] =
1579 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1582 } else if (i == h - 1) {
1583 /* 3.1. Handle the first destination pixel if it doesn't
1584 * start at the macro pixel boundary, i.e. blend with
1585 * the background! */
1586 if (dest_x % 2 == 1) {
1591 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1593 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1596 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1600 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1601 src_uv_idx = (src_x % 2) + 1;
1604 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1605 src_uv_idx = (src_x % 2);
1608 /* 3.2. Copy all macro pixels from the source to the destination
1609 * but blend with the background because we're only filling
1610 * the upper part of the macro pixels. */
1611 for (; j < w - 1; j += 2) {
1612 y1 = srcY[src_y_idx];
1613 y2 = srcY[src_y_idx + 1];
1615 u1 = srcU[src_uv_idx / 2];
1616 v1 = srcV[src_uv_idx / 2];
1618 u2 = srcU[src_uv_idx / 2];
1619 v2 = srcV[src_uv_idx / 2];
1622 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1623 destY[dest_y_idx + 1] =
1624 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1626 destU[dest_uv_idx] = CLAMP (
1627 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1628 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1629 destV[dest_uv_idx] = CLAMP (
1630 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1631 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1638 /* 3.3. Now copy the last pixel if one exists and blend it
1639 * with the background because we only fill part of
1640 * the macro pixel. In case this is the last pixel of
1641 * the destination we will a larger part. */
1642 if (j == w - 1 && j == dest_width - 1) {
1643 y1 = srcY[src_y_idx];
1644 u1 = srcU[src_uv_idx / 2];
1645 v1 = srcV[src_uv_idx / 2];
1647 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1648 destU[dest_uv_idx] = CLAMP (
1649 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1651 destV[dest_uv_idx] =
1652 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1654 } else if (j == w - 1) {
1655 y1 = srcY[src_y_idx];
1656 u1 = srcU[src_uv_idx / 2];
1657 v1 = srcV[src_uv_idx / 2];
1659 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1660 destU[dest_uv_idx] = CLAMP (
1661 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1663 destV[dest_uv_idx] =
1664 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1671 copy_i420_ayuv (guint i_alpha, GstVideoFrame * dest_frame,
1672 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
1673 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1675 const guint8 *srcY, *srcU, *srcV;
1676 gint src_strideY, src_strideU, src_strideV;
1680 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src_frame, 0);
1681 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src_frame, 1);
1682 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src_frame, 2);
1684 srcY = GST_VIDEO_FRAME_COMP_DATA (src_frame, 0);
1685 srcU = GST_VIDEO_FRAME_COMP_DATA (src_frame, 1);
1686 srcV = GST_VIDEO_FRAME_COMP_DATA (src_frame, 2);
1688 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
1690 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
1691 dest = dest + dest_y * dest_stride + dest_x * 4;
1693 srcY = srcY + src_y * src_strideY + src_x;
1694 srcU = srcU + (src_y / 2) * src_strideU + src_x / 2;
1695 srcV = srcV + (src_y / 2) * src_strideV + src_x / 2;
1697 i_alpha = CLAMP (i_alpha, 0, 255);
1699 if (src_sdtv != dest_sdtv) {
1706 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1707 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1709 for (i = 0; i < h; i++) {
1710 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1712 u = srcU[uv_idx / 2];
1713 v = srcV[uv_idx / 2];
1715 y1 = APPLY_MATRIX (matrix, 0, y, u, v);
1716 u1 = APPLY_MATRIX (matrix, 1, y, u, v);
1717 v1 = APPLY_MATRIX (matrix, 2, y, u, v);
1719 dest[4 * j + 0] = i_alpha;
1720 dest[4 * j + 1] = y1;
1721 dest[4 * j + 2] = u1;
1722 dest[4 * j + 3] = v1;
1724 dest += dest_stride;
1727 srcY += src_strideY;
1728 if (src_y % 2 == 0) {
1729 srcU += src_strideU;
1730 srcV += src_strideV;
1737 for (i = 0; i < h; i++) {
1738 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1740 u = srcU[uv_idx / 2];
1741 v = srcV[uv_idx / 2];
1743 dest[4 * j + 0] = i_alpha;
1744 dest[4 * j + 1] = y;
1745 dest[4 * j + 2] = u;
1746 dest[4 * j + 3] = v;
1748 dest += dest_stride;
1751 srcY += src_strideY;
1752 if (src_y % 2 == 0) {
1753 srcU += src_strideU;
1754 srcV += src_strideV;
1761 fill_rgb32 (GstVideoBoxFill fill_type, guint b_alpha,
1762 GstVideoFrame * frame, gboolean sdtv)
1764 guint32 empty_pixel;
1770 width = GST_VIDEO_FRAME_WIDTH (frame);
1771 height = GST_VIDEO_FRAME_HEIGHT (frame);
1773 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
1774 stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
1776 p[0] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 3);
1777 p[1] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 0);
1778 p[2] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 1);
1779 p[3] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 2);
1781 b_alpha = CLAMP (b_alpha, 0, 255);
1783 if (GST_VIDEO_FRAME_N_COMPONENTS (frame) == 4) {
1784 empty_pixel = GUINT32_FROM_LE ((b_alpha << (p[0] * 8)) |
1785 (rgb_colors_R[fill_type] << (p[1] * 8)) |
1786 (rgb_colors_G[fill_type] << (p[2] * 8)) |
1787 (rgb_colors_B[fill_type] << (p[3] * 8)));
1789 empty_pixel = GUINT32_FROM_LE (
1790 (rgb_colors_R[fill_type] << (p[1] * 8)) |
1791 (rgb_colors_G[fill_type] << (p[2] * 8)) |
1792 (rgb_colors_B[fill_type] << (p[3] * 8)));
1795 if (stride == width * 4) {
1796 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
1797 } else if (height) {
1798 for (; height; --height) {
1799 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width);
1806 fill_rgb24 (GstVideoBoxFill fill_type, guint b_alpha,
1807 GstVideoFrame * frame, gboolean sdtv)
1815 width = GST_VIDEO_FRAME_WIDTH (frame);
1816 height = GST_VIDEO_FRAME_HEIGHT (frame);
1818 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
1819 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
1821 p[1] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 0);
1822 p[2] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 1);
1823 p[3] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 2);
1825 for (i = 0; i < height; i++) {
1826 for (j = 0; j < width; j++) {
1827 dest[3 * j + p[1]] = rgb_colors_R[fill_type];
1828 dest[3 * j + p[2]] = rgb_colors_G[fill_type];
1829 dest[3 * j + p[3]] = rgb_colors_B[fill_type];
1831 dest += dest_stride;
1836 copy_rgb32 (guint i_alpha, GstVideoFrame * dest_frame,
1837 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
1838 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1841 gint src_stride, dest_stride;
1842 gboolean in_alpha, out_alpha;
1843 gint in_bpp, out_bpp;
1846 gboolean packed_out;
1850 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
1851 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
1852 in_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (src_frame, 0);
1853 out_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (dest_frame, 0);
1854 packed_in = (in_bpp < 4);
1855 packed_out = (out_bpp < 4);
1857 out_alpha = GST_VIDEO_INFO_HAS_ALPHA (&dest_frame->info);
1858 p_out[0] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 3);
1859 p_out[1] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 0);
1860 p_out[2] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 1);
1861 p_out[3] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 2);
1863 in_alpha = GST_VIDEO_INFO_HAS_ALPHA (&src_frame->info);
1864 p_in[0] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 3);
1865 p_in[1] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 0);
1866 p_in[2] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 1);
1867 p_in[3] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 2);
1869 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
1870 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
1871 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
1872 src = src + src_y * src_stride + src_x * in_bpp;
1874 if (in_alpha && out_alpha) {
1876 for (i = 0; i < h; i++) {
1877 for (j = 0; j < w; j += 4) {
1878 dest[j + p_out[0]] = (src[j + p_in[0]] * i_alpha) >> 8;
1879 dest[j + p_out[1]] = src[j + p_in[1]];
1880 dest[j + p_out[2]] = src[j + p_in[2]];
1881 dest[j + p_out[3]] = src[j + p_in[3]];
1883 dest += dest_stride;
1886 } else if (out_alpha && !packed_in) {
1888 i_alpha = CLAMP (i_alpha, 0, 255);
1890 for (i = 0; i < h; i++) {
1891 for (j = 0; j < w; j += 4) {
1892 dest[j + p_out[0]] = i_alpha;
1893 dest[j + p_out[1]] = src[j + p_in[1]];
1894 dest[j + p_out[2]] = src[j + p_in[2]];
1895 dest[j + p_out[3]] = src[j + p_in[3]];
1897 dest += dest_stride;
1900 } else if (out_alpha && packed_in) {
1901 i_alpha = CLAMP (i_alpha, 0, 255);
1903 for (i = 0; i < h; i++) {
1904 for (j = 0; j < w; j++) {
1905 dest[4 * j + p_out[0]] = i_alpha;
1906 dest[4 * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1907 dest[4 * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1908 dest[4 * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1910 dest += dest_stride;
1913 } else if (!packed_out && !packed_in) {
1915 for (i = 0; i < h; i++) {
1916 for (j = 0; j < w; j += 4) {
1917 dest[j + p_out[1]] = src[j + p_in[1]];
1918 dest[j + p_out[2]] = src[j + p_in[2]];
1919 dest[j + p_out[3]] = src[j + p_in[3]];
1921 dest += dest_stride;
1925 for (i = 0; i < h; i++) {
1926 for (j = 0; j < w; j++) {
1927 dest[out_bpp * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1928 dest[out_bpp * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1929 dest[out_bpp * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1931 dest += dest_stride;
1938 copy_rgb32_ayuv (guint i_alpha, GstVideoFrame * dest_frame,
1939 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
1940 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1943 gint src_stride, dest_stride;
1954 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
1955 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
1956 in_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (src_frame, 0);
1957 packed_in = (in_bpp < 4);
1959 in_alpha = GST_VIDEO_INFO_HAS_ALPHA (&src_frame->info);
1960 p_in[0] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 3);
1961 p_in[1] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 0);
1962 p_in[2] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 1);
1963 p_in[3] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 2);
1966 (dest_sdtv) ? cog_rgb_to_ycbcr_matrix_8bit_sdtv :
1967 cog_rgb_to_ycbcr_matrix_8bit_hdtv, 12 * sizeof (gint));
1969 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
1970 dest = dest + dest_y * dest_stride + dest_x * 4;
1971 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
1972 src = src + src_y * src_stride + src_x * in_bpp;
1976 for (i = 0; i < h; i++) {
1977 for (j = 0; j < w; j += 4) {
1978 a = (src[j + p_in[0]] * i_alpha) >> 8;
1979 r = src[j + p_in[1]];
1980 g = src[j + p_in[2]];
1981 b = src[j + p_in[3]];
1983 y = APPLY_MATRIX (matrix, 0, r, g, b);
1984 u = APPLY_MATRIX (matrix, 1, r, g, b);
1985 v = APPLY_MATRIX (matrix, 2, r, g, b);
1988 dest[j + 1] = CLAMP (y, 0, 255);
1989 dest[j + 2] = CLAMP (u, 0, 255);
1990 dest[j + 3] = CLAMP (v, 0, 255);
1992 dest += dest_stride;
1995 } else if (!packed_in) {
1997 i_alpha = CLAMP (i_alpha, 0, 255);
1999 for (i = 0; i < h; i++) {
2000 for (j = 0; j < w; j += 4) {
2002 r = src[j + p_in[1]];
2003 g = src[j + p_in[2]];
2004 b = src[j + p_in[3]];
2006 y = APPLY_MATRIX (matrix, 0, r, g, b);
2007 u = APPLY_MATRIX (matrix, 1, r, g, b);
2008 v = APPLY_MATRIX (matrix, 2, r, g, b);
2011 dest[j + 1] = CLAMP (y, 0, 255);
2012 dest[j + 2] = CLAMP (u, 0, 255);
2013 dest[j + 3] = CLAMP (v, 0, 255);
2015 dest += dest_stride;
2019 i_alpha = CLAMP (i_alpha, 0, 255);
2021 for (i = 0; i < h; i++) {
2022 for (j = 0; j < w; j++) {
2024 r = src[in_bpp * j + p_in[1]];
2025 g = src[in_bpp * j + p_in[2]];
2026 b = src[in_bpp * j + p_in[3]];
2028 y = APPLY_MATRIX (matrix, 0, r, g, b);
2029 u = APPLY_MATRIX (matrix, 1, r, g, b);
2030 v = APPLY_MATRIX (matrix, 2, r, g, b);
2032 dest[4 * j + 0] = a;
2033 dest[4 * j + 1] = CLAMP (y, 0, 255);
2034 dest[4 * j + 2] = CLAMP (u, 0, 255);
2035 dest[4 * j + 3] = CLAMP (v, 0, 255);
2037 dest += dest_stride;
2044 copy_ayuv_rgb32 (guint i_alpha, GstVideoFrame * dest_frame,
2045 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
2046 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
2049 gint src_stride, dest_stride;
2053 gboolean packed_out;
2060 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
2061 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
2062 out_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (dest_frame, 0);
2063 packed_out = (out_bpp < 4);
2065 out_alpha = GST_VIDEO_INFO_HAS_ALPHA (&dest_frame->info);
2066 p_out[0] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 3);
2067 p_out[1] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 0);
2068 p_out[2] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 1);
2069 p_out[3] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 2);
2072 (src_sdtv) ? cog_ycbcr_to_rgb_matrix_8bit_sdtv :
2073 cog_ycbcr_to_rgb_matrix_8bit_hdtv, 12 * sizeof (gint));
2075 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
2076 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
2077 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
2078 src = src + src_y * src_stride + src_x * 4;
2082 for (i = 0; i < h; i++) {
2083 for (j = 0; j < w; j += 4) {
2084 a = (src[j + 0] * i_alpha) >> 8;
2089 r = APPLY_MATRIX (matrix, 0, y, u, v);
2090 g = APPLY_MATRIX (matrix, 1, y, u, v);
2091 b = APPLY_MATRIX (matrix, 2, y, u, v);
2093 dest[j + p_out[0]] = a;
2094 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2095 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2096 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2098 dest += dest_stride;
2101 } else if (!packed_out) {
2103 for (i = 0; i < h; i++) {
2104 for (j = 0; j < w; j += 4) {
2109 r = APPLY_MATRIX (matrix, 0, y, u, v);
2110 g = APPLY_MATRIX (matrix, 1, y, u, v);
2111 b = APPLY_MATRIX (matrix, 2, y, u, v);
2113 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2114 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2115 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2117 dest += dest_stride;
2121 for (i = 0; i < h; i++) {
2122 for (j = 0; j < w; j++) {
2127 r = APPLY_MATRIX (matrix, 0, y, u, v);
2128 g = APPLY_MATRIX (matrix, 1, y, u, v);
2129 b = APPLY_MATRIX (matrix, 2, y, u, v);
2131 dest[out_bpp * j + p_out[1]] = CLAMP (r, 0, 255);
2132 dest[out_bpp * j + p_out[2]] = CLAMP (g, 0, 255);
2133 dest[out_bpp * j + p_out[3]] = CLAMP (b, 0, 255);
2135 dest += dest_stride;
2142 fill_gray (GstVideoBoxFill fill_type, guint b_alpha,
2143 GstVideoFrame * frame, gboolean sdtv)
2149 GstVideoFormat format;
2151 format = GST_VIDEO_FRAME_FORMAT (frame);
2153 width = GST_VIDEO_FRAME_WIDTH (frame);
2154 height = GST_VIDEO_FRAME_HEIGHT (frame);
2156 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
2157 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
2159 if (format == GST_VIDEO_FORMAT_GRAY8) {
2160 guint8 val = yuv_sdtv_colors_Y[fill_type];
2162 for (i = 0; i < height; i++) {
2163 memset (dest, val, width);
2164 dest += dest_stride;
2167 guint16 val = yuv_sdtv_colors_Y[fill_type] << 8;
2169 if (format == GST_VIDEO_FORMAT_GRAY16_BE) {
2170 for (i = 0; i < height; i++) {
2171 for (j = 0; j < width; j++) {
2172 GST_WRITE_UINT16_BE (dest + 2 * j, val);
2174 dest += dest_stride;
2177 for (i = 0; i < height; i++) {
2178 for (j = 0; j < width; j++) {
2179 GST_WRITE_UINT16_LE (dest + 2 * j, val);
2181 dest += dest_stride;
2188 copy_packed_simple (guint i_alpha, GstVideoFrame * dest_frame,
2189 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
2190 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
2193 gint src_stride, dest_stride;
2194 gint pixel_stride, row_size;
2197 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
2198 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
2199 pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (dest_frame, 0);
2200 row_size = w * pixel_stride;
2202 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
2203 dest = dest + dest_y * dest_stride + dest_x * pixel_stride;
2204 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
2205 src = src + src_y * src_stride + src_x * pixel_stride;
2207 for (i = 0; i < h; i++) {
2208 memcpy (dest, src, row_size);
2209 dest += dest_stride;
2215 fill_yuy2 (GstVideoBoxFill fill_type, guint b_alpha,
2216 GstVideoFrame * frame, gboolean sdtv)
2223 GstVideoFormat format;
2225 format = GST_VIDEO_FRAME_FORMAT (frame);
2227 width = GST_VIDEO_FRAME_WIDTH (frame);
2228 height = GST_VIDEO_FRAME_HEIGHT (frame);
2230 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
2231 stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
2233 y = (sdtv) ? yuv_sdtv_colors_Y[fill_type] : yuv_hdtv_colors_Y[fill_type];
2234 u = (sdtv) ? yuv_sdtv_colors_U[fill_type] : yuv_hdtv_colors_U[fill_type];
2235 v = (sdtv) ? yuv_sdtv_colors_V[fill_type] : yuv_hdtv_colors_V[fill_type];
2237 width = width + (width % 2);
2239 if (format == GST_VIDEO_FORMAT_YUY2) {
2240 for (i = 0; i < height; i++) {
2241 for (j = 0; j < width; j += 2) {
2242 dest[j * 2 + 0] = y;
2243 dest[j * 2 + 1] = u;
2244 dest[j * 2 + 2] = y;
2245 dest[j * 2 + 3] = v;
2250 } else if (format == GST_VIDEO_FORMAT_YVYU) {
2251 for (i = 0; i < height; i++) {
2252 for (j = 0; j < width; j += 2) {
2253 dest[j * 2 + 0] = y;
2254 dest[j * 2 + 1] = v;
2255 dest[j * 2 + 2] = y;
2256 dest[j * 2 + 3] = u;
2262 for (i = 0; i < height; i++) {
2263 for (j = 0; j < width; j += 2) {
2264 dest[j * 2 + 0] = u;
2265 dest[j * 2 + 1] = y;
2266 dest[j * 2 + 2] = v;
2267 dest[j * 2 + 3] = y;
2276 copy_yuy2_yuy2 (guint i_alpha, GstVideoFrame * dest_frame,
2277 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
2278 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
2281 gint src_stride, dest_stride;
2283 GstVideoFormat src_format;
2285 src_format = GST_VIDEO_FRAME_FORMAT (src_frame);
2287 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
2288 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
2290 dest_x = (dest_x & ~1);
2291 src_x = (src_x & ~1);
2295 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
2296 dest = dest + dest_y * dest_stride + dest_x * 2;
2297 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
2298 src = src + src_y * src_stride + src_x * 2;
2300 if (src_sdtv != dest_sdtv) {
2306 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
2307 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
2309 if (src_format == GST_VIDEO_FORMAT_YUY2) {
2310 for (i = 0; i < h; i++) {
2311 for (j = 0; j < w; j += 2) {
2312 y1 = src[j * 2 + 0];
2313 y2 = src[j * 2 + 2];
2314 u1 = u2 = src[j * 2 + 1];
2315 v1 = v2 = src[j * 2 + 3];
2317 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2318 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2319 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2320 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2322 dest += dest_stride;
2325 } else if (src_format == GST_VIDEO_FORMAT_YVYU) {
2326 for (i = 0; i < h; i++) {
2327 for (j = 0; j < w; j += 2) {
2328 y1 = src[j * 2 + 0];
2329 y2 = src[j * 2 + 2];
2330 v1 = v2 = src[j * 2 + 1];
2331 u1 = u2 = src[j * 2 + 3];
2333 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2334 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 2, y1, u1, v1);
2335 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2336 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 1, y2, u2, v2);
2338 dest += dest_stride;
2342 for (i = 0; i < h; i++) {
2343 for (j = 0; j < w; j += 2) {
2344 u1 = u2 = src[j * 2 + 0];
2345 v1 = v2 = src[j * 2 + 2];
2346 y1 = src[j * 2 + 1];
2347 y2 = src[j * 2 + 3];
2349 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2350 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2351 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2352 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2354 dest += dest_stride;
2359 for (i = 0; i < h; i++) {
2360 memcpy (dest, src, w * 2);
2361 dest += dest_stride;
2367 #define DEFAULT_LEFT 0
2368 #define DEFAULT_RIGHT 0
2369 #define DEFAULT_TOP 0
2370 #define DEFAULT_BOTTOM 0
2371 #define DEFAULT_FILL_TYPE VIDEO_BOX_FILL_BLACK
2372 #define DEFAULT_ALPHA 1.0
2373 #define DEFAULT_BORDER_ALPHA 1.0
2389 static GstStaticPadTemplate gst_video_box_src_template =
2390 GST_STATIC_PAD_TEMPLATE ("src",
2393 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("{ AYUV, "
2394 "ARGB, BGRA, ABGR, RGBA, xRGB, BGRx, xBGR, RGBx, RGB, BGR, "
2395 "Y444, Y42B, YUY2, YVYU, UYVY, I420, YV12, Y41B, "
2396 "GRAY8, GRAY16_BE, GRAY16_LE } "))
2399 static GstStaticPadTemplate gst_video_box_sink_template =
2400 GST_STATIC_PAD_TEMPLATE ("sink",
2403 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("{ AYUV, "
2404 "ARGB, BGRA, ABGR, RGBA, xRGB, BGRx, xBGR, RGBx, RGB, BGR, "
2405 "Y444, Y42B, YUY2, YVYU, UYVY, I420, YV12, Y41B, "
2406 "GRAY8, GRAY16_BE, GRAY16_LE } "))
2409 #define gst_video_box_parent_class parent_class
2410 G_DEFINE_TYPE (GstVideoBox, gst_video_box, GST_TYPE_VIDEO_FILTER);
2412 static void gst_video_box_set_property (GObject * object, guint prop_id,
2413 const GValue * value, GParamSpec * pspec);
2414 static void gst_video_box_get_property (GObject * object, guint prop_id,
2415 GValue * value, GParamSpec * pspec);
2417 static gboolean gst_video_box_recalc_transform (GstVideoBox * video_box);
2418 static GstCaps *gst_video_box_transform_caps (GstBaseTransform * trans,
2419 GstPadDirection direction, GstCaps * from, GstCaps * filter);
2420 static void gst_video_box_before_transform (GstBaseTransform * trans,
2422 static gboolean gst_video_box_src_event (GstBaseTransform * trans,
2425 static gboolean gst_video_box_set_info (GstVideoFilter * vfilter, GstCaps * in,
2426 GstVideoInfo * in_info, GstCaps * out, GstVideoInfo * out_info);
2427 static GstFlowReturn gst_video_box_transform_frame (GstVideoFilter * vfilter,
2428 GstVideoFrame * in_frame, GstVideoFrame * out_frame);
2430 #define GST_TYPE_VIDEO_BOX_FILL (gst_video_box_fill_get_type())
2432 gst_video_box_fill_get_type (void)
2434 static GType video_box_fill_type = 0;
2435 static const GEnumValue video_box_fill[] = {
2436 {VIDEO_BOX_FILL_BLACK, "Black", "black"},
2437 {VIDEO_BOX_FILL_GREEN, "Green", "green"},
2438 {VIDEO_BOX_FILL_BLUE, "Blue", "blue"},
2439 {VIDEO_BOX_FILL_RED, "Red", "red"},
2440 {VIDEO_BOX_FILL_YELLOW, "Yellow", "yellow"},
2441 {VIDEO_BOX_FILL_WHITE, "White", "white"},
2445 if (!video_box_fill_type) {
2446 video_box_fill_type =
2447 g_enum_register_static ("GstVideoBoxFill", video_box_fill);
2449 return video_box_fill_type;
2453 gst_video_box_finalize (GObject * object)
2455 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2457 g_mutex_clear (&video_box->mutex);
2459 G_OBJECT_CLASS (parent_class)->finalize (object);
2463 gst_video_box_class_init (GstVideoBoxClass * klass)
2465 GObjectClass *gobject_class = (GObjectClass *) klass;
2466 GstElementClass *element_class = (GstElementClass *) (klass);
2467 GstBaseTransformClass *trans_class = (GstBaseTransformClass *) klass;
2468 GstVideoFilterClass *vfilter_class = (GstVideoFilterClass *) klass;
2470 gobject_class->set_property = gst_video_box_set_property;
2471 gobject_class->get_property = gst_video_box_get_property;
2472 gobject_class->finalize = gst_video_box_finalize;
2474 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_FILL_TYPE,
2475 g_param_spec_enum ("fill", "Fill", "How to fill the borders",
2476 GST_TYPE_VIDEO_BOX_FILL, DEFAULT_FILL_TYPE,
2477 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2478 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_LEFT,
2479 g_param_spec_int ("left", "Left",
2480 "Pixels to box at left (<0 = add a border)", G_MININT, G_MAXINT,
2482 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2483 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_RIGHT,
2484 g_param_spec_int ("right", "Right",
2485 "Pixels to box at right (<0 = add a border)", G_MININT, G_MAXINT,
2487 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2488 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_TOP,
2489 g_param_spec_int ("top", "Top",
2490 "Pixels to box at top (<0 = add a border)", G_MININT, G_MAXINT,
2492 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2493 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BOTTOM,
2494 g_param_spec_int ("bottom", "Bottom",
2495 "Pixels to box at bottom (<0 = add a border)", G_MININT, G_MAXINT,
2497 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2498 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_ALPHA,
2499 g_param_spec_double ("alpha", "Alpha", "Alpha value picture", 0.0, 1.0,
2501 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2502 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BORDER_ALPHA,
2503 g_param_spec_double ("border-alpha", "Border Alpha",
2504 "Alpha value of the border", 0.0, 1.0, DEFAULT_BORDER_ALPHA,
2505 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2507 * GstVideoBox:autocrop:
2509 * If set to %TRUE videobox will automatically crop/pad the input
2510 * video to be centered in the output.
2512 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_AUTOCROP,
2513 g_param_spec_boolean ("autocrop", "Auto crop",
2514 "Auto crop", FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
2516 trans_class->before_transform =
2517 GST_DEBUG_FUNCPTR (gst_video_box_before_transform);
2518 trans_class->transform_caps =
2519 GST_DEBUG_FUNCPTR (gst_video_box_transform_caps);
2520 trans_class->src_event = GST_DEBUG_FUNCPTR (gst_video_box_src_event);
2522 vfilter_class->set_info = GST_DEBUG_FUNCPTR (gst_video_box_set_info);
2523 vfilter_class->transform_frame =
2524 GST_DEBUG_FUNCPTR (gst_video_box_transform_frame);
2526 gst_element_class_set_static_metadata (element_class, "Video box filter",
2527 "Filter/Effect/Video",
2528 "Resizes a video by adding borders or cropping",
2529 "Wim Taymans <wim@fluendo.com>");
2531 gst_element_class_add_static_pad_template (element_class,
2532 &gst_video_box_sink_template);
2533 gst_element_class_add_static_pad_template (element_class,
2534 &gst_video_box_src_template);
2538 gst_video_box_init (GstVideoBox * video_box)
2540 video_box->box_right = DEFAULT_RIGHT;
2541 video_box->box_left = DEFAULT_LEFT;
2542 video_box->box_top = DEFAULT_TOP;
2543 video_box->box_bottom = DEFAULT_BOTTOM;
2544 video_box->crop_right = 0;
2545 video_box->crop_left = 0;
2546 video_box->crop_top = 0;
2547 video_box->crop_bottom = 0;
2548 video_box->fill_type = DEFAULT_FILL_TYPE;
2549 video_box->alpha = DEFAULT_ALPHA;
2550 video_box->border_alpha = DEFAULT_BORDER_ALPHA;
2551 video_box->autocrop = FALSE;
2553 g_mutex_init (&video_box->mutex);
2557 gst_video_box_set_property (GObject * object, guint prop_id,
2558 const GValue * value, GParamSpec * pspec)
2560 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2562 g_mutex_lock (&video_box->mutex);
2565 video_box->box_left = g_value_get_int (value);
2566 if (video_box->box_left < 0) {
2567 video_box->border_left = -video_box->box_left;
2568 video_box->crop_left = 0;
2570 video_box->border_left = 0;
2571 video_box->crop_left = video_box->box_left;
2575 video_box->box_right = g_value_get_int (value);
2576 if (video_box->box_right < 0) {
2577 video_box->border_right = -video_box->box_right;
2578 video_box->crop_right = 0;
2580 video_box->border_right = 0;
2581 video_box->crop_right = video_box->box_right;
2585 video_box->box_top = g_value_get_int (value);
2586 if (video_box->box_top < 0) {
2587 video_box->border_top = -video_box->box_top;
2588 video_box->crop_top = 0;
2590 video_box->border_top = 0;
2591 video_box->crop_top = video_box->box_top;
2595 video_box->box_bottom = g_value_get_int (value);
2596 if (video_box->box_bottom < 0) {
2597 video_box->border_bottom = -video_box->box_bottom;
2598 video_box->crop_bottom = 0;
2600 video_box->border_bottom = 0;
2601 video_box->crop_bottom = video_box->box_bottom;
2604 case PROP_FILL_TYPE:
2605 video_box->fill_type = g_value_get_enum (value);
2608 video_box->alpha = g_value_get_double (value);
2610 case PROP_BORDER_ALPHA:
2611 video_box->border_alpha = g_value_get_double (value);
2614 video_box->autocrop = g_value_get_boolean (value);
2617 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2620 gst_video_box_recalc_transform (video_box);
2622 GST_DEBUG_OBJECT (video_box, "Calling reconfigure");
2623 gst_base_transform_reconfigure_src (GST_BASE_TRANSFORM_CAST (video_box));
2625 g_mutex_unlock (&video_box->mutex);
2629 gst_video_box_autocrop (GstVideoBox * video_box)
2631 gint crop_w = video_box->in_width - video_box->out_width;
2632 gint crop_h = video_box->in_height - video_box->out_height;
2634 video_box->box_left = crop_w / 2;
2635 if (video_box->box_left < 0) {
2636 video_box->border_left = -video_box->box_left;
2637 video_box->crop_left = 0;
2639 video_box->border_left = 0;
2640 video_box->crop_left = video_box->box_left;
2643 /* Round down/up for odd width differences */
2649 video_box->box_right = crop_w / 2;
2650 if (video_box->box_right < 0) {
2651 video_box->border_right = -video_box->box_right;
2652 video_box->crop_right = 0;
2654 video_box->border_right = 0;
2655 video_box->crop_right = video_box->box_right;
2658 video_box->box_top = crop_h / 2;
2659 if (video_box->box_top < 0) {
2660 video_box->border_top = -video_box->box_top;
2661 video_box->crop_top = 0;
2663 video_box->border_top = 0;
2664 video_box->crop_top = video_box->box_top;
2667 /* Round down/up for odd height differences */
2672 video_box->box_bottom = crop_h / 2;
2674 if (video_box->box_bottom < 0) {
2675 video_box->border_bottom = -video_box->box_bottom;
2676 video_box->crop_bottom = 0;
2678 video_box->border_bottom = 0;
2679 video_box->crop_bottom = video_box->box_bottom;
2684 gst_video_box_get_property (GObject * object, guint prop_id, GValue * value,
2687 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2691 g_value_set_int (value, video_box->box_left);
2694 g_value_set_int (value, video_box->box_right);
2697 g_value_set_int (value, video_box->box_top);
2700 g_value_set_int (value, video_box->box_bottom);
2702 case PROP_FILL_TYPE:
2703 g_value_set_enum (value, video_box->fill_type);
2706 g_value_set_double (value, video_box->alpha);
2708 case PROP_BORDER_ALPHA:
2709 g_value_set_double (value, video_box->border_alpha);
2712 g_value_set_boolean (value, video_box->autocrop);
2715 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2721 gst_video_box_transform_dimension (gint val, gint delta)
2723 gint64 new_val = (gint64) val + (gint64) delta;
2725 new_val = CLAMP (new_val, 1, G_MAXINT);
2727 return (gint) new_val;
2731 gst_video_box_transform_dimension_value (const GValue * src_val,
2732 gint delta, GValue * dest_val)
2734 gboolean ret = TRUE;
2736 g_value_init (dest_val, G_VALUE_TYPE (src_val));
2738 if (G_VALUE_HOLDS_INT (src_val)) {
2739 gint ival = g_value_get_int (src_val);
2741 ival = gst_video_box_transform_dimension (ival, delta);
2742 g_value_set_int (dest_val, ival);
2743 } else if (GST_VALUE_HOLDS_INT_RANGE (src_val)) {
2744 gint min = gst_value_get_int_range_min (src_val);
2745 gint max = gst_value_get_int_range_max (src_val);
2747 min = gst_video_box_transform_dimension (min, delta);
2748 max = gst_video_box_transform_dimension (max, delta);
2751 g_value_unset (dest_val);
2753 gst_value_set_int_range (dest_val, min, max);
2755 } else if (GST_VALUE_HOLDS_LIST (src_val)) {
2758 for (i = 0; i < gst_value_list_get_size (src_val); ++i) {
2759 const GValue *list_val;
2760 GValue newval = { 0, };
2762 list_val = gst_value_list_get_value (src_val, i);
2763 if (gst_video_box_transform_dimension_value (list_val, delta, &newval))
2764 gst_value_list_append_value (dest_val, &newval);
2765 g_value_unset (&newval);
2768 if (gst_value_list_get_size (dest_val) == 0) {
2769 g_value_unset (dest_val);
2773 g_value_unset (dest_val);
2781 gst_video_box_transform_caps (GstBaseTransform * trans,
2782 GstPadDirection direction, GstCaps * from, GstCaps * filter)
2784 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
2787 GstStructure *structure;
2791 to = gst_caps_new_empty ();
2792 for (i = 0; i < gst_caps_get_size (from); i++) {
2793 const GValue *fval, *lval;
2794 GValue list = { 0, };
2795 GValue val = { 0, };
2796 gboolean seen_yuv = FALSE, seen_rgb = FALSE;
2799 structure = gst_structure_copy (gst_caps_get_structure (from, i));
2801 /* Transform width/height */
2802 if (video_box->autocrop) {
2803 gst_structure_remove_field (structure, "width");
2804 gst_structure_remove_field (structure, "height");
2806 gint dw = 0, dh = 0;
2808 GValue w_val = { 0, };
2809 GValue h_val = { 0, };
2811 /* calculate width and height */
2812 if (direction == GST_PAD_SINK) {
2813 dw -= video_box->box_left;
2814 dw -= video_box->box_right;
2816 dw += video_box->box_left;
2817 dw += video_box->box_right;
2820 if (direction == GST_PAD_SINK) {
2821 dh -= video_box->box_top;
2822 dh -= video_box->box_bottom;
2824 dh += video_box->box_top;
2825 dh += video_box->box_bottom;
2828 v = gst_structure_get_value (structure, "width");
2829 if (!gst_video_box_transform_dimension_value (v, dw, &w_val)) {
2830 GST_WARNING_OBJECT (video_box,
2831 "could not tranform width value with dw=%d" ", caps structure=%"
2832 GST_PTR_FORMAT, dw, structure);
2835 gst_structure_set_value (structure, "width", &w_val);
2837 v = gst_structure_get_value (structure, "height");
2838 if (!gst_video_box_transform_dimension_value (v, dh, &h_val)) {
2839 g_value_unset (&w_val);
2840 GST_WARNING_OBJECT (video_box,
2841 "could not tranform height value with dh=%d" ", caps structure=%"
2842 GST_PTR_FORMAT, dh, structure);
2845 gst_structure_set_value (structure, "height", &h_val);
2846 g_value_unset (&w_val);
2847 g_value_unset (&h_val);
2850 /* Supported conversions:
2857 * AYUV->xRGB (24bpp, 32bpp, incl. alpha)
2858 * xRGB->xRGB (24bpp, 32bpp, from/to all variants, incl. alpha)
2859 * xRGB->AYUV (24bpp, 32bpp, incl. alpha)
2861 * Passthrough only for everything else.
2863 fval = gst_structure_get_value (structure, "format");
2864 if (fval && GST_VALUE_HOLDS_LIST (fval)) {
2865 for (j = 0; j < gst_value_list_get_size (fval); j++) {
2866 lval = gst_value_list_get_value (fval, j);
2867 if ((str = g_value_get_string (lval))) {
2868 if (strcmp (str, "AYUV") == 0) {
2872 } else if (strstr (str, "RGB") || strstr (str, "BGR")) {
2874 } else if (strcmp (str, "I420") == 0 || strcmp (str, "YV12") == 0) {
2879 } else if (fval && G_VALUE_HOLDS_STRING (fval)) {
2880 if ((str = g_value_get_string (fval))) {
2881 if (strcmp (str, "AYUV") == 0) {
2884 } else if (strstr (str, "RGB") || strstr (str, "BGR")) {
2886 } else if (strcmp (str, "I420") == 0 || strcmp (str, "YV12") == 0) {
2892 if (seen_yuv || seen_rgb) {
2893 g_value_init (&list, GST_TYPE_LIST);
2895 g_value_init (&val, G_TYPE_STRING);
2896 g_value_set_string (&val, "AYUV");
2897 gst_value_list_append_value (&list, &val);
2898 g_value_unset (&val);
2901 g_value_init (&val, G_TYPE_STRING);
2902 g_value_set_string (&val, "I420");
2903 gst_value_list_append_value (&list, &val);
2904 g_value_reset (&val);
2905 g_value_set_string (&val, "YV12");
2906 gst_value_list_append_value (&list, &val);
2907 g_value_unset (&val);
2910 g_value_init (&val, G_TYPE_STRING);
2911 g_value_set_string (&val, "RGBx");
2912 gst_value_list_append_value (&list, &val);
2913 g_value_reset (&val);
2914 g_value_set_string (&val, "BGRx");
2915 gst_value_list_append_value (&list, &val);
2916 g_value_reset (&val);
2917 g_value_set_string (&val, "xRGB");
2918 gst_value_list_append_value (&list, &val);
2919 g_value_reset (&val);
2920 g_value_set_string (&val, "xBGR");
2921 gst_value_list_append_value (&list, &val);
2922 g_value_reset (&val);
2923 g_value_set_string (&val, "RGBA");
2924 gst_value_list_append_value (&list, &val);
2925 g_value_reset (&val);
2926 g_value_set_string (&val, "BGRA");
2927 gst_value_list_append_value (&list, &val);
2928 g_value_reset (&val);
2929 g_value_set_string (&val, "ARGB");
2930 gst_value_list_append_value (&list, &val);
2931 g_value_reset (&val);
2932 g_value_set_string (&val, "ABGR");
2933 gst_value_list_append_value (&list, &val);
2934 g_value_reset (&val);
2935 g_value_set_string (&val, "RGB");
2936 gst_value_list_append_value (&list, &val);
2937 g_value_reset (&val);
2938 g_value_set_string (&val, "BGR");
2939 gst_value_list_append_value (&list, &val);
2940 g_value_unset (&val);
2942 gst_value_list_merge (&val, fval, &list);
2943 gst_structure_set_value (structure, "format", &val);
2944 g_value_unset (&val);
2945 g_value_unset (&list);
2948 gst_structure_remove_field (structure, "colorimetry");
2949 gst_structure_remove_field (structure, "chroma-site");
2951 gst_caps_append_structure (to, structure);
2954 /* filter against set allowed caps on the pad */
2955 other = (direction == GST_PAD_SINK) ? trans->srcpad : trans->sinkpad;
2956 templ = gst_pad_get_pad_template_caps (other);
2957 ret = gst_caps_intersect (to, templ);
2958 gst_caps_unref (to);
2959 gst_caps_unref (templ);
2961 GST_DEBUG_OBJECT (video_box, "direction %d, transformed %" GST_PTR_FORMAT
2962 " to %" GST_PTR_FORMAT, direction, from, ret);
2964 if (ret && filter) {
2965 GstCaps *intersection;
2967 GST_DEBUG_OBJECT (video_box, "Using filter caps %" GST_PTR_FORMAT, filter);
2969 gst_caps_intersect_full (filter, ret, GST_CAPS_INTERSECT_FIRST);
2970 gst_caps_unref (ret);
2972 GST_DEBUG_OBJECT (video_box, "Intersection %" GST_PTR_FORMAT, ret);
2980 gst_structure_free (structure);
2981 gst_caps_unref (to);
2982 to = gst_caps_new_empty ();
2988 gst_video_box_recalc_transform (GstVideoBox * video_box)
2990 gboolean res = TRUE;
2992 /* if we have the same format in and out and we don't need to perform any
2993 * cropping at all, we can just operate in passthrough mode */
2994 if (video_box->in_format == video_box->out_format &&
2995 video_box->box_left == 0 && video_box->box_right == 0 &&
2996 video_box->box_top == 0 && video_box->box_bottom == 0 &&
2997 video_box->in_sdtv == video_box->out_sdtv) {
2999 GST_LOG_OBJECT (video_box, "we are using passthrough");
3000 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
3003 GST_LOG_OBJECT (video_box, "we are not using passthrough");
3004 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
3011 gst_video_box_select_processing_functions (GstVideoBox * video_box)
3013 switch (video_box->out_format) {
3014 case GST_VIDEO_FORMAT_AYUV:
3015 video_box->fill = fill_ayuv;
3016 switch (video_box->in_format) {
3017 case GST_VIDEO_FORMAT_AYUV:
3018 video_box->copy = copy_ayuv_ayuv;
3020 case GST_VIDEO_FORMAT_I420:
3021 case GST_VIDEO_FORMAT_YV12:
3022 video_box->copy = copy_i420_ayuv;
3024 case GST_VIDEO_FORMAT_ARGB:
3025 case GST_VIDEO_FORMAT_ABGR:
3026 case GST_VIDEO_FORMAT_RGBA:
3027 case GST_VIDEO_FORMAT_BGRA:
3028 case GST_VIDEO_FORMAT_xRGB:
3029 case GST_VIDEO_FORMAT_xBGR:
3030 case GST_VIDEO_FORMAT_RGBx:
3031 case GST_VIDEO_FORMAT_BGRx:
3032 case GST_VIDEO_FORMAT_RGB:
3033 case GST_VIDEO_FORMAT_BGR:
3034 video_box->copy = copy_rgb32_ayuv;
3040 case GST_VIDEO_FORMAT_I420:
3041 case GST_VIDEO_FORMAT_YV12:
3042 video_box->fill = fill_planar_yuv;
3043 switch (video_box->in_format) {
3044 case GST_VIDEO_FORMAT_AYUV:
3045 video_box->copy = copy_ayuv_i420;
3047 case GST_VIDEO_FORMAT_I420:
3048 case GST_VIDEO_FORMAT_YV12:
3049 video_box->copy = copy_i420_i420;
3055 case GST_VIDEO_FORMAT_ARGB:
3056 case GST_VIDEO_FORMAT_ABGR:
3057 case GST_VIDEO_FORMAT_RGBA:
3058 case GST_VIDEO_FORMAT_BGRA:
3059 case GST_VIDEO_FORMAT_xRGB:
3060 case GST_VIDEO_FORMAT_xBGR:
3061 case GST_VIDEO_FORMAT_RGBx:
3062 case GST_VIDEO_FORMAT_BGRx:
3063 case GST_VIDEO_FORMAT_RGB:
3064 case GST_VIDEO_FORMAT_BGR:
3065 video_box->fill = (video_box->out_format == GST_VIDEO_FORMAT_BGR
3066 || video_box->out_format ==
3067 GST_VIDEO_FORMAT_RGB) ? fill_rgb24 : fill_rgb32;
3068 switch (video_box->in_format) {
3069 case GST_VIDEO_FORMAT_ARGB:
3070 case GST_VIDEO_FORMAT_ABGR:
3071 case GST_VIDEO_FORMAT_RGBA:
3072 case GST_VIDEO_FORMAT_BGRA:
3073 case GST_VIDEO_FORMAT_xRGB:
3074 case GST_VIDEO_FORMAT_xBGR:
3075 case GST_VIDEO_FORMAT_RGBx:
3076 case GST_VIDEO_FORMAT_BGRx:
3077 case GST_VIDEO_FORMAT_RGB:
3078 case GST_VIDEO_FORMAT_BGR:
3079 video_box->copy = copy_rgb32;
3081 case GST_VIDEO_FORMAT_AYUV:
3082 video_box->copy = copy_ayuv_rgb32;
3087 case GST_VIDEO_FORMAT_GRAY8:
3088 case GST_VIDEO_FORMAT_GRAY16_BE:
3089 case GST_VIDEO_FORMAT_GRAY16_LE:
3090 video_box->fill = fill_gray;
3091 switch (video_box->in_format) {
3092 case GST_VIDEO_FORMAT_GRAY8:
3093 case GST_VIDEO_FORMAT_GRAY16_BE:
3094 case GST_VIDEO_FORMAT_GRAY16_LE:
3095 video_box->copy = copy_packed_simple;
3101 case GST_VIDEO_FORMAT_YUY2:
3102 case GST_VIDEO_FORMAT_YVYU:
3103 case GST_VIDEO_FORMAT_UYVY:
3104 video_box->fill = fill_yuy2;
3105 switch (video_box->in_format) {
3106 case GST_VIDEO_FORMAT_YUY2:
3107 case GST_VIDEO_FORMAT_YVYU:
3108 case GST_VIDEO_FORMAT_UYVY:
3109 video_box->copy = copy_yuy2_yuy2;
3115 case GST_VIDEO_FORMAT_Y444:
3116 case GST_VIDEO_FORMAT_Y42B:
3117 case GST_VIDEO_FORMAT_Y41B:
3118 video_box->fill = fill_planar_yuv;
3119 switch (video_box->in_format) {
3120 case GST_VIDEO_FORMAT_Y444:
3121 video_box->copy = copy_y444_y444;
3123 case GST_VIDEO_FORMAT_Y42B:
3124 video_box->copy = copy_y42b_y42b;
3126 case GST_VIDEO_FORMAT_Y41B:
3127 video_box->copy = copy_y41b_y41b;
3137 return video_box->fill != NULL && video_box->copy != NULL;
3141 gst_video_box_set_info (GstVideoFilter * vfilter, GstCaps * in,
3142 GstVideoInfo * in_info, GstCaps * out, GstVideoInfo * out_info)
3144 GstVideoBox *video_box = GST_VIDEO_BOX (vfilter);
3147 g_mutex_lock (&video_box->mutex);
3149 video_box->in_format = GST_VIDEO_INFO_FORMAT (in_info);
3150 video_box->in_width = GST_VIDEO_INFO_WIDTH (in_info);
3151 video_box->in_height = GST_VIDEO_INFO_HEIGHT (in_info);
3153 video_box->out_format = GST_VIDEO_INFO_FORMAT (out_info);
3154 video_box->out_width = GST_VIDEO_INFO_WIDTH (out_info);
3155 video_box->out_height = GST_VIDEO_INFO_HEIGHT (out_info);
3157 video_box->in_sdtv =
3158 in_info->colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT601;
3159 video_box->out_sdtv =
3160 out_info->colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT601;
3162 GST_DEBUG_OBJECT (video_box, "Input w: %d h: %d", video_box->in_width,
3163 video_box->in_height);
3164 GST_DEBUG_OBJECT (video_box, "Output w: %d h: %d", video_box->out_width,
3165 video_box->out_height);
3167 if (video_box->autocrop)
3168 gst_video_box_autocrop (video_box);
3170 /* recalc the transformation strategy */
3171 ret = gst_video_box_recalc_transform (video_box);
3174 ret = gst_video_box_select_processing_functions (video_box);
3175 g_mutex_unlock (&video_box->mutex);
3181 gst_video_box_src_event (GstBaseTransform * trans, GstEvent * event)
3183 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3184 GstStructure *new_structure;
3185 const GstStructure *structure;
3186 const gchar *event_name;
3190 GST_OBJECT_LOCK (video_box);
3191 if (GST_EVENT_TYPE (event) == GST_EVENT_NAVIGATION &&
3192 (video_box->box_left != 0 || video_box->box_top != 0)) {
3193 structure = gst_event_get_structure (event);
3194 event_name = gst_structure_get_string (structure, "event");
3197 (strcmp (event_name, "mouse-move") == 0 ||
3198 strcmp (event_name, "mouse-button-press") == 0 ||
3199 strcmp (event_name, "mouse-button-release") == 0)) {
3200 if (gst_structure_get_double (structure, "pointer_x", &pointer_x) &&
3201 gst_structure_get_double (structure, "pointer_y", &pointer_y)) {
3202 gdouble new_pointer_x, new_pointer_y;
3203 GstEvent *new_event;
3205 new_pointer_x = pointer_x + video_box->box_left;
3206 new_pointer_y = pointer_y + video_box->box_top;
3208 new_structure = gst_structure_copy (structure);
3209 gst_structure_set (new_structure,
3210 "pointer_x", G_TYPE_DOUBLE, (gdouble) (new_pointer_x),
3211 "pointer_y", G_TYPE_DOUBLE, (gdouble) (new_pointer_y), NULL);
3213 new_event = gst_event_new_navigation (new_structure);
3214 gst_event_unref (event);
3217 GST_WARNING_OBJECT (video_box, "Failed to read navigation event");
3221 GST_OBJECT_UNLOCK (video_box);
3223 return GST_BASE_TRANSFORM_CLASS (parent_class)->src_event (trans, event);
3227 gst_video_box_process (GstVideoBox * video_box, GstVideoFrame * in,
3228 GstVideoFrame * out)
3230 guint b_alpha = CLAMP (video_box->border_alpha * 256, 0, 255);
3231 guint i_alpha = CLAMP (video_box->alpha * 256, 0, 255);
3232 GstVideoBoxFill fill_type = video_box->fill_type;
3233 gint br, bl, bt, bb, crop_w, crop_h;
3238 br = video_box->box_right;
3239 bl = video_box->box_left;
3240 bt = video_box->box_top;
3241 bb = video_box->box_bottom;
3243 if (br >= 0 && bl >= 0) {
3244 crop_w = video_box->in_width - (br + bl);
3245 } else if (br >= 0 && bl < 0) {
3246 crop_w = video_box->in_width - (br);
3247 } else if (br < 0 && bl >= 0) {
3248 crop_w = video_box->in_width - (bl);
3249 } else if (br < 0 && bl < 0) {
3250 crop_w = video_box->in_width;
3253 if (bb >= 0 && bt >= 0) {
3254 crop_h = video_box->in_height - (bb + bt);
3255 } else if (bb >= 0 && bt < 0) {
3256 crop_h = video_box->in_height - (bb);
3257 } else if (bb < 0 && bt >= 0) {
3258 crop_h = video_box->in_height - (bt);
3259 } else if (bb < 0 && bt < 0) {
3260 crop_h = video_box->in_height;
3263 GST_DEBUG_OBJECT (video_box, "Borders are: L:%d, R:%d, T:%d, B:%d", bl, br,
3265 GST_DEBUG_OBJECT (video_box, "Alpha value is: %u (frame) %u (border)",
3268 if (crop_h < 0 || crop_w < 0) {
3269 video_box->fill (fill_type, b_alpha, out, video_box->out_sdtv);
3270 } else if (bb == 0 && bt == 0 && br == 0 && bl == 0) {
3271 video_box->copy (i_alpha, out, video_box->out_sdtv, 0, 0, in,
3272 video_box->in_sdtv, 0, 0, crop_w, crop_h);
3274 gint src_x = 0, src_y = 0;
3275 gint dest_x = 0, dest_y = 0;
3277 /* Fill everything if a border should be added somewhere */
3278 if (bt < 0 || bb < 0 || br < 0 || bl < 0)
3279 video_box->fill (fill_type, b_alpha, out, video_box->out_sdtv);
3296 video_box->copy (i_alpha, out, video_box->out_sdtv, dest_x, dest_y,
3297 in, video_box->in_sdtv, src_x, src_y, crop_w, crop_h);
3300 GST_LOG_OBJECT (video_box, "image created");
3304 gst_video_box_before_transform (GstBaseTransform * trans, GstBuffer * in)
3306 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3307 GstClockTime timestamp, stream_time;
3309 timestamp = GST_BUFFER_TIMESTAMP (in);
3311 gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME, timestamp);
3313 GST_DEBUG_OBJECT (video_box, "sync to %" GST_TIME_FORMAT,
3314 GST_TIME_ARGS (timestamp));
3316 if (GST_CLOCK_TIME_IS_VALID (stream_time))
3317 gst_object_sync_values (GST_OBJECT (video_box), stream_time);
3320 static GstFlowReturn
3321 gst_video_box_transform_frame (GstVideoFilter * vfilter,
3322 GstVideoFrame * in_frame, GstVideoFrame * out_frame)
3324 GstVideoBox *video_box = GST_VIDEO_BOX (vfilter);
3326 g_mutex_lock (&video_box->mutex);
3327 gst_video_box_process (video_box, in_frame, out_frame);
3328 g_mutex_unlock (&video_box->mutex);
3332 /* FIXME: 0.11 merge with videocrop plugin */
3334 plugin_init (GstPlugin * plugin)
3336 GST_DEBUG_CATEGORY_INIT (videobox_debug, "videobox", 0,
3337 "Resizes a video by adding borders or cropping");
3339 return gst_element_register (plugin, "videobox", GST_RANK_NONE,
3340 GST_TYPE_VIDEO_BOX);
3343 GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
3346 "resizes a video by adding borders or cropping",
3347 plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)