2 * Copyright (C) 2010 David Schleef <ds@schleef.org>
3 * Copyright (C) 2010 Sebastian Dröge <sebastian.droege@collabora.co.uk>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
32 #include "video-converter.h"
38 #include <gst/allocators/gsttizenmemory.h>
41 #include "video-orc.h"
44 * SECTION:videoconverter
45 * @title: GstVideoConverter
46 * @short_description: Generic video conversion
48 * This object is used to convert video frames from one format to another.
49 * The object can perform conversion of:
61 * (c) (convert Y'CbCr to R'G'B')
64 * (f) colorspace convert through XYZ
67 * (i) (convert R'G'B' to Y'CbCr)
68 * (j) chroma downsample
73 * (a) range truncate, range expand
74 * (b) full upsample, 1-1 non-cosited upsample, no upsample
82 * (j) 1-1 cosited downsample, no downsample
86 * 1 : a -> -> -> -> e -> f -> g -> -> -> -> k
87 * 2 : a -> -> -> -> e -> f* -> g -> -> -> -> k
88 * 3 : a -> -> -> -> e* -> f* -> g* -> -> -> -> k
89 * 4 : a -> b -> -> -> e -> f -> g -> -> -> j -> k
90 * 5 : a -> b -> -> -> e* -> f* -> g* -> -> -> j -> k
91 * 6 : a -> b -> c -> d -> e -> f -> g -> h -> i -> j -> k
92 * 7 : a -> b -> c -> d -> e* -> f* -> g* -> h -> i -> j -> k
94 * 8 : a -> b -> c -> d -> e* -> f* -> g* -> h -> i -> j -> k
95 * 9 : a -> b -> c -> d -> e* -> f* -> g* -> h -> i -> j -> k
96 * 10 : a -> b -> c -> d -> e* -> f* -> g* -> h -> i -> j -> k
99 #ifndef GST_DISABLE_GST_DEBUG
100 #define GST_CAT_DEFAULT ensure_debug_category()
101 static GstDebugCategory *
102 ensure_debug_category (void)
104 static gsize cat_gonce = 0;
106 if (g_once_init_enter (&cat_gonce)) {
109 cat_done = (gsize) _gst_debug_category_new ("video-converter", 0,
110 "video-converter object");
112 g_once_init_leave (&cat_gonce, cat_done);
115 return (GstDebugCategory *) cat_gonce;
118 #define ensure_debug_category() /* NOOP */
119 #endif /* GST_DISABLE_GST_DEBUG */
121 typedef void (*GstParallelizedTaskFunc) (gpointer user_data);
123 typedef struct _GstParallelizedTaskRunner GstParallelizedTaskRunner;
124 typedef struct _GstParallelizedTaskThread GstParallelizedTaskThread;
126 struct _GstParallelizedTaskThread
128 GstParallelizedTaskRunner *runner;
133 struct _GstParallelizedTaskRunner
137 GstParallelizedTaskThread *threads;
139 GstParallelizedTaskFunc func;
143 GCond cond_todo, cond_done;
149 gst_parallelized_task_thread_func (gpointer data)
151 GstParallelizedTaskThread *self = data;
156 pthread_t thread = pthread_self ();
161 CPU_SET (self->idx, &cpuset);
162 if ((r = pthread_setaffinity_np (thread, sizeof (cpuset), &cpuset)) != 0)
163 GST_ERROR ("Failed to set thread affinity for thread %d: %s", self->idx,
169 g_mutex_lock (&self->runner->lock);
170 self->runner->n_done++;
171 if (self->runner->n_done == self->runner->n_threads - 1)
172 g_cond_signal (&self->runner->cond_done);
177 while (self->runner->n_todo == -1 && !self->runner->quit)
178 g_cond_wait (&self->runner->cond_todo, &self->runner->lock);
180 if (self->runner->quit)
183 idx = self->runner->n_todo--;
184 g_assert (self->runner->n_todo >= -1);
185 g_mutex_unlock (&self->runner->lock);
187 g_assert (self->runner->func != NULL);
189 self->runner->func (self->runner->task_data[idx]);
191 g_mutex_lock (&self->runner->lock);
192 self->runner->n_done++;
193 if (self->runner->n_done == self->runner->n_threads - 1)
194 g_cond_signal (&self->runner->cond_done);
197 g_mutex_unlock (&self->runner->lock);
203 gst_parallelized_task_runner_free (GstParallelizedTaskRunner * self)
207 g_mutex_lock (&self->lock);
209 g_cond_broadcast (&self->cond_todo);
210 g_mutex_unlock (&self->lock);
212 for (i = 1; i < self->n_threads; i++) {
213 if (!self->threads[i].thread)
216 g_thread_join (self->threads[i].thread);
219 g_mutex_clear (&self->lock);
220 g_cond_clear (&self->cond_todo);
221 g_cond_clear (&self->cond_done);
222 g_free (self->threads);
226 static GstParallelizedTaskRunner *
227 gst_parallelized_task_runner_new (guint n_threads)
229 GstParallelizedTaskRunner *self;
234 n_threads = g_get_num_processors ();
236 self = g_new0 (GstParallelizedTaskRunner, 1);
237 self->n_threads = n_threads;
238 self->threads = g_new0 (GstParallelizedTaskThread, n_threads);
243 g_mutex_init (&self->lock);
244 g_cond_init (&self->cond_todo);
245 g_cond_init (&self->cond_done);
247 /* Set when scheduling a job */
249 self->task_data = NULL;
251 for (i = 0; i < n_threads; i++) {
252 self->threads[i].runner = self;
253 self->threads[i].idx = i;
255 /* First thread is the one calling run() */
257 self->threads[i].thread =
258 g_thread_try_new ("videoconvert", gst_parallelized_task_thread_func,
259 &self->threads[i], &err);
260 if (!self->threads[i].thread)
265 g_mutex_lock (&self->lock);
266 while (self->n_done < self->n_threads - 1)
267 g_cond_wait (&self->cond_done, &self->lock);
269 g_mutex_unlock (&self->lock);
275 GST_ERROR ("Failed to start thread %u: %s", i, err->message);
276 g_clear_error (&err);
278 gst_parallelized_task_runner_free (self);
284 gst_parallelized_task_runner_run (GstParallelizedTaskRunner * self,
285 GstParallelizedTaskFunc func, gpointer * task_data)
287 guint n_threads = self->n_threads;
290 self->task_data = task_data;
293 g_mutex_lock (&self->lock);
294 self->n_todo = self->n_threads - 2;
296 g_cond_broadcast (&self->cond_todo);
297 g_mutex_unlock (&self->lock);
300 self->func (self->task_data[self->n_threads - 1]);
303 g_mutex_lock (&self->lock);
304 while (self->n_done < self->n_threads - 1)
305 g_cond_wait (&self->cond_done, &self->lock);
307 g_mutex_unlock (&self->lock);
311 self->task_data = NULL;
314 typedef struct _GstLineCache GstLineCache;
317 #define SCALE_F ((float) (1 << SCALE))
319 typedef struct _MatrixData MatrixData;
334 void (*matrix_func) (MatrixData * data, gpointer pixels);
337 typedef struct _GammaData GammaData;
341 gpointer gamma_table;
343 void (*gamma_func) (GammaData * data, gpointer dest, gpointer src);
349 ALPHA_MODE_COPY = (1 << 0),
350 ALPHA_MODE_SET = (1 << 1),
351 ALPHA_MODE_MULT = (1 << 2)
361 GDestroyNotify notify;
364 typedef void (*FastConvertFunc) (GstVideoConverter * convert,
365 const GstVideoFrame * src, GstVideoFrame * dest, gint plane);
367 struct _GstVideoConverter
371 GstVideoInfo in_info;
372 GstVideoInfo out_info;
387 gint current_pstride;
390 GstVideoFormat current_format;
393 GstStructure *config;
395 GstParallelizedTaskRunner *conversion_runner;
399 gboolean fill_border;
404 AlphaMode alpha_mode;
406 void (*convert) (GstVideoConverter * convert, const GstVideoFrame * src,
407 GstVideoFrame * dest);
409 /* data for unpack */
410 GstLineCache **unpack_lines;
411 GstVideoFormat unpack_format;
414 gboolean identity_unpack;
417 /* chroma upsample */
418 GstLineCache **upsample_lines;
419 GstVideoChromaResample **upsample;
420 GstVideoChromaResample **upsample_p;
421 GstVideoChromaResample **upsample_i;
426 GstLineCache **to_RGB_lines;
427 MatrixData to_RGB_matrix;
432 GstLineCache **hscale_lines;
433 GstVideoScaler **h_scaler;
435 GstLineCache **vscale_lines;
436 GstVideoScaler **v_scaler;
437 GstVideoScaler **v_scaler_p;
438 GstVideoScaler **v_scaler_i;
442 /* color space conversion */
443 GstLineCache **convert_lines;
444 MatrixData convert_matrix;
448 /* alpha correction */
449 GstLineCache **alpha_lines;
450 void (*alpha_func) (GstVideoConverter * convert, gpointer pixels, gint width);
455 GstLineCache **to_YUV_lines;
456 MatrixData to_YUV_matrix;
458 /* chroma downsample */
459 GstLineCache **downsample_lines;
460 GstVideoChromaResample **downsample;
461 GstVideoChromaResample **downsample_p;
462 GstVideoChromaResample **downsample_i;
467 GstLineCache **dither_lines;
468 GstVideoDither **dither;
471 GstLineCache **pack_lines;
473 GstVideoFormat pack_format;
476 gboolean identity_pack;
478 gconstpointer pack_pal;
481 const GstVideoFrame *src;
485 GstVideoFormat fformat[4];
497 GstVideoScaler **scaler;
501 GstVideoScaler **scaler;
503 FastConvertFunc fconvert[4];
506 typedef gpointer (*GstLineCacheAllocLineFunc) (GstLineCache * cache, gint idx,
508 typedef gboolean (*GstLineCacheNeedLineFunc) (GstLineCache * cache, gint idx,
509 gint out_line, gint in_line, gpointer user_data);
518 gboolean write_input;
520 gboolean alloc_writable;
522 GstLineCacheNeedLineFunc need_line;
524 gpointer need_line_data;
525 GDestroyNotify need_line_notify;
529 GstLineCacheAllocLineFunc alloc_line;
530 gpointer alloc_line_data;
531 GDestroyNotify alloc_line_notify;
534 static GstLineCache *
535 gst_line_cache_new (GstLineCache * prev)
537 GstLineCache *result;
539 result = g_slice_new0 (GstLineCache);
540 result->lines = g_ptr_array_new ();
547 gst_line_cache_clear (GstLineCache * cache)
549 g_return_if_fail (cache != NULL);
551 g_ptr_array_set_size (cache->lines, 0);
556 gst_line_cache_free (GstLineCache * cache)
558 if (cache->need_line_notify)
559 cache->need_line_notify (cache->need_line_data);
560 if (cache->alloc_line_notify)
561 cache->alloc_line_notify (cache->alloc_line_data);
562 gst_line_cache_clear (cache);
563 g_ptr_array_unref (cache->lines);
564 g_slice_free (GstLineCache, cache);
568 gst_line_cache_set_need_line_func (GstLineCache * cache,
569 GstLineCacheNeedLineFunc need_line, gint idx, gpointer user_data,
570 GDestroyNotify notify)
572 cache->need_line = need_line;
573 cache->need_line_idx = idx;
574 cache->need_line_data = user_data;
575 cache->need_line_notify = notify;
579 gst_line_cache_set_alloc_line_func (GstLineCache * cache,
580 GstLineCacheAllocLineFunc alloc_line, gpointer user_data,
581 GDestroyNotify notify)
583 cache->alloc_line = alloc_line;
584 cache->alloc_line_data = user_data;
585 cache->alloc_line_notify = notify;
588 /* keep this much backlog for interlaced video */
592 gst_line_cache_get_lines (GstLineCache * cache, gint idx, gint out_line,
593 gint in_line, gint n_lines)
595 if (cache->first + cache->backlog < in_line) {
597 MIN (in_line - (cache->first + cache->backlog), cache->lines->len);
599 g_ptr_array_remove_range (cache->lines, 0, to_remove);
601 cache->first += to_remove;
602 } else if (in_line < cache->first) {
603 gst_line_cache_clear (cache);
604 cache->first = in_line;
610 if (cache->first <= in_line
611 && in_line + n_lines <= cache->first + (gint) cache->lines->len) {
612 return cache->lines->pdata + (in_line - cache->first);
615 if (cache->need_line == NULL)
618 oline = out_line + cache->first + cache->lines->len - in_line;
620 if (!cache->need_line (cache, idx, oline, cache->first + cache->lines->len,
621 cache->need_line_data))
624 GST_DEBUG ("no lines");
629 gst_line_cache_add_line (GstLineCache * cache, gint idx, gpointer line)
631 if (cache->first + cache->lines->len != idx) {
632 gst_line_cache_clear (cache);
635 g_ptr_array_add (cache->lines, line);
639 gst_line_cache_alloc_line (GstLineCache * cache, gint idx)
643 if (cache->alloc_line)
644 res = cache->alloc_line (cache, idx, cache->alloc_line_data);
651 static void video_converter_generic (GstVideoConverter * convert,
652 const GstVideoFrame * src, GstVideoFrame * dest);
653 static gboolean video_converter_lookup_fastpath (GstVideoConverter * convert);
654 static void video_converter_compute_matrix (GstVideoConverter * convert);
655 static void video_converter_compute_resample (GstVideoConverter * convert,
658 static gpointer get_dest_line (GstLineCache * cache, gint idx,
661 static gboolean do_unpack_lines (GstLineCache * cache, gint idx, gint out_line,
662 gint in_line, gpointer user_data);
663 static gboolean do_downsample_lines (GstLineCache * cache, gint idx,
664 gint out_line, gint in_line, gpointer user_data);
665 static gboolean do_convert_to_RGB_lines (GstLineCache * cache, gint idx,
666 gint out_line, gint in_line, gpointer user_data);
667 static gboolean do_convert_lines (GstLineCache * cache, gint idx, gint out_line,
668 gint in_line, gpointer user_data);
669 static gboolean do_alpha_lines (GstLineCache * cache, gint idx, gint out_line,
670 gint in_line, gpointer user_data);
671 static gboolean do_convert_to_YUV_lines (GstLineCache * cache, gint idx,
672 gint out_line, gint in_line, gpointer user_data);
673 static gboolean do_upsample_lines (GstLineCache * cache, gint idx,
674 gint out_line, gint in_line, gpointer user_data);
675 static gboolean do_vscale_lines (GstLineCache * cache, gint idx, gint out_line,
676 gint in_line, gpointer user_data);
677 static gboolean do_hscale_lines (GstLineCache * cache, gint idx, gint out_line,
678 gint in_line, gpointer user_data);
679 static gboolean do_dither_lines (GstLineCache * cache, gint idx, gint out_line,
680 gint in_line, gpointer user_data);
682 static ConverterAlloc *
683 converter_alloc_new (guint stride, guint n_lines, gpointer user_data,
684 GDestroyNotify notify)
686 ConverterAlloc *alloc;
688 GST_DEBUG ("stride %d, n_lines %d", stride, n_lines);
689 alloc = g_slice_new0 (ConverterAlloc);
690 alloc->data = g_malloc (stride * n_lines);
691 alloc->stride = stride;
692 alloc->n_lines = n_lines;
694 alloc->user_data = user_data;
695 alloc->notify = notify;
701 converter_alloc_free (ConverterAlloc * alloc)
704 alloc->notify (alloc->user_data);
705 g_free (alloc->data);
706 g_slice_free (ConverterAlloc, alloc);
710 setup_border_alloc (GstVideoConverter * convert, ConverterAlloc * alloc)
714 if (convert->borderline) {
715 for (i = 0; i < alloc->n_lines; i++)
716 memcpy (&alloc->data[i * alloc->stride], convert->borderline,
722 get_temp_line (GstLineCache * cache, gint idx, gpointer user_data)
724 ConverterAlloc *alloc = user_data;
727 GST_DEBUG ("get temp line %d (%p %d)", idx, alloc, alloc->idx);
728 tmpline = &alloc->data[alloc->stride * alloc->idx];
729 alloc->idx = (alloc->idx + 1) % alloc->n_lines;
735 get_border_temp_line (GstLineCache * cache, gint idx, gpointer user_data)
737 ConverterAlloc *alloc = user_data;
738 GstVideoConverter *convert = alloc->user_data;
741 GST_DEBUG ("get temp line %d (%p %d)", idx, alloc, alloc->idx);
742 tmpline = &alloc->data[alloc->stride * alloc->idx] +
743 (convert->out_x * convert->pack_pstride);
744 alloc->idx = (alloc->idx + 1) % alloc->n_lines;
750 get_opt_int (GstVideoConverter * convert, const gchar * opt, gint def)
753 if (!gst_structure_get_int (convert->config, opt, &res))
759 get_opt_uint (GstVideoConverter * convert, const gchar * opt, guint def)
762 if (!gst_structure_get_uint (convert->config, opt, &res))
768 get_opt_double (GstVideoConverter * convert, const gchar * opt, gdouble def)
771 if (!gst_structure_get_double (convert->config, opt, &res))
777 get_opt_bool (GstVideoConverter * convert, const gchar * opt, gboolean def)
780 if (!gst_structure_get_boolean (convert->config, opt, &res))
786 get_opt_enum (GstVideoConverter * convert, const gchar * opt, GType type,
790 if (!gst_structure_get_enum (convert->config, opt, type, &res))
795 #define DEFAULT_OPT_FILL_BORDER TRUE
796 #define DEFAULT_OPT_ALPHA_VALUE 1.0
797 /* options copy, set, mult */
798 #define DEFAULT_OPT_ALPHA_MODE GST_VIDEO_ALPHA_MODE_COPY
799 #define DEFAULT_OPT_BORDER_ARGB 0xff000000
800 /* options full, input-only, output-only, none */
801 #define DEFAULT_OPT_MATRIX_MODE GST_VIDEO_MATRIX_MODE_FULL
803 #define DEFAULT_OPT_GAMMA_MODE GST_VIDEO_GAMMA_MODE_NONE
804 /* none, merge-only, fast */
805 #define DEFAULT_OPT_PRIMARIES_MODE GST_VIDEO_PRIMARIES_MODE_NONE
806 /* options full, upsample-only, downsample-only, none */
807 #define DEFAULT_OPT_CHROMA_MODE GST_VIDEO_CHROMA_MODE_FULL
808 #define DEFAULT_OPT_RESAMPLER_METHOD GST_VIDEO_RESAMPLER_METHOD_CUBIC
809 #define DEFAULT_OPT_CHROMA_RESAMPLER_METHOD GST_VIDEO_RESAMPLER_METHOD_LINEAR
810 #define DEFAULT_OPT_RESAMPLER_TAPS 0
811 #define DEFAULT_OPT_DITHER_METHOD GST_VIDEO_DITHER_BAYER
812 #define DEFAULT_OPT_DITHER_QUANTIZATION 1
814 #define GET_OPT_FILL_BORDER(c) get_opt_bool(c, \
815 GST_VIDEO_CONVERTER_OPT_FILL_BORDER, DEFAULT_OPT_FILL_BORDER)
816 #define GET_OPT_ALPHA_VALUE(c) get_opt_double(c, \
817 GST_VIDEO_CONVERTER_OPT_ALPHA_VALUE, DEFAULT_OPT_ALPHA_VALUE)
818 #define GET_OPT_ALPHA_MODE(c) get_opt_enum(c, \
819 GST_VIDEO_CONVERTER_OPT_ALPHA_MODE, GST_TYPE_VIDEO_ALPHA_MODE, DEFAULT_OPT_ALPHA_MODE)
820 #define GET_OPT_BORDER_ARGB(c) get_opt_uint(c, \
821 GST_VIDEO_CONVERTER_OPT_BORDER_ARGB, DEFAULT_OPT_BORDER_ARGB)
822 #define GET_OPT_MATRIX_MODE(c) get_opt_enum(c, \
823 GST_VIDEO_CONVERTER_OPT_MATRIX_MODE, GST_TYPE_VIDEO_MATRIX_MODE, DEFAULT_OPT_MATRIX_MODE)
824 #define GET_OPT_GAMMA_MODE(c) get_opt_enum(c, \
825 GST_VIDEO_CONVERTER_OPT_GAMMA_MODE, GST_TYPE_VIDEO_GAMMA_MODE, DEFAULT_OPT_GAMMA_MODE)
826 #define GET_OPT_PRIMARIES_MODE(c) get_opt_enum(c, \
827 GST_VIDEO_CONVERTER_OPT_PRIMARIES_MODE, GST_TYPE_VIDEO_PRIMARIES_MODE, DEFAULT_OPT_PRIMARIES_MODE)
828 #define GET_OPT_CHROMA_MODE(c) get_opt_enum(c, \
829 GST_VIDEO_CONVERTER_OPT_CHROMA_MODE, GST_TYPE_VIDEO_CHROMA_MODE, DEFAULT_OPT_CHROMA_MODE)
830 #define GET_OPT_RESAMPLER_METHOD(c) get_opt_enum(c, \
831 GST_VIDEO_CONVERTER_OPT_RESAMPLER_METHOD, GST_TYPE_VIDEO_RESAMPLER_METHOD, \
832 DEFAULT_OPT_RESAMPLER_METHOD)
833 #define GET_OPT_CHROMA_RESAMPLER_METHOD(c) get_opt_enum(c, \
834 GST_VIDEO_CONVERTER_OPT_CHROMA_RESAMPLER_METHOD, GST_TYPE_VIDEO_RESAMPLER_METHOD, \
835 DEFAULT_OPT_CHROMA_RESAMPLER_METHOD)
836 #define GET_OPT_RESAMPLER_TAPS(c) get_opt_uint(c, \
837 GST_VIDEO_CONVERTER_OPT_RESAMPLER_TAPS, DEFAULT_OPT_RESAMPLER_TAPS)
838 #define GET_OPT_DITHER_METHOD(c) get_opt_enum(c, \
839 GST_VIDEO_CONVERTER_OPT_DITHER_METHOD, GST_TYPE_VIDEO_DITHER_METHOD, \
840 DEFAULT_OPT_DITHER_METHOD)
841 #define GET_OPT_DITHER_QUANTIZATION(c) get_opt_uint(c, \
842 GST_VIDEO_CONVERTER_OPT_DITHER_QUANTIZATION, DEFAULT_OPT_DITHER_QUANTIZATION)
844 #define CHECK_ALPHA_COPY(c) (GET_OPT_ALPHA_MODE(c) == GST_VIDEO_ALPHA_MODE_COPY)
845 #define CHECK_ALPHA_SET(c) (GET_OPT_ALPHA_MODE(c) == GST_VIDEO_ALPHA_MODE_SET)
846 #define CHECK_ALPHA_MULT(c) (GET_OPT_ALPHA_MODE(c) == GST_VIDEO_ALPHA_MODE_MULT)
848 #define CHECK_MATRIX_FULL(c) (GET_OPT_MATRIX_MODE(c) == GST_VIDEO_MATRIX_MODE_FULL)
849 #define CHECK_MATRIX_INPUT(c) (GET_OPT_MATRIX_MODE(c) == GST_VIDEO_MATRIX_MODE_INPUT_ONLY)
850 #define CHECK_MATRIX_OUTPUT(c) (GET_OPT_MATRIX_MODE(c) == GST_VIDEO_MATRIX_MODE_OUTPUT_ONLY)
851 #define CHECK_MATRIX_NONE(c) (GET_OPT_MATRIX_MODE(c) == GST_VIDEO_MATRIX_MODE_NONE)
853 #define CHECK_GAMMA_NONE(c) (GET_OPT_GAMMA_MODE(c) == GST_VIDEO_GAMMA_MODE_NONE)
854 #define CHECK_GAMMA_REMAP(c) (GET_OPT_GAMMA_MODE(c) == GST_VIDEO_GAMMA_MODE_REMAP)
856 #define CHECK_PRIMARIES_NONE(c) (GET_OPT_PRIMARIES_MODE(c) == GST_VIDEO_PRIMARIES_MODE_NONE)
857 #define CHECK_PRIMARIES_MERGE(c) (GET_OPT_PRIMARIES_MODE(c) == GST_VIDEO_PRIMARIES_MODE_MERGE_ONLY)
858 #define CHECK_PRIMARIES_FAST(c) (GET_OPT_PRIMARIES_MODE(c) == GST_VIDEO_PRIMARIES_MODE_FAST)
860 #define CHECK_CHROMA_FULL(c) (GET_OPT_CHROMA_MODE(c) == GST_VIDEO_CHROMA_MODE_FULL)
861 #define CHECK_CHROMA_UPSAMPLE(c) (GET_OPT_CHROMA_MODE(c) == GST_VIDEO_CHROMA_MODE_UPSAMPLE_ONLY)
862 #define CHECK_CHROMA_DOWNSAMPLE(c) (GET_OPT_CHROMA_MODE(c) == GST_VIDEO_CHROMA_MODE_DOWNSAMPLE_ONLY)
863 #define CHECK_CHROMA_NONE(c) (GET_OPT_CHROMA_MODE(c) == GST_VIDEO_CHROMA_MODE_NONE)
865 static GstLineCache *
866 chain_unpack_line (GstVideoConverter * convert, gint idx)
871 info = &convert->in_info;
873 convert->current_format = convert->unpack_format;
874 convert->current_bits = convert->unpack_bits;
875 convert->current_pstride = convert->current_bits >> 1;
877 convert->unpack_pstride = convert->current_pstride;
878 convert->identity_unpack = (convert->current_format == info->finfo->format);
880 GST_DEBUG ("chain unpack line format %s, pstride %d, identity_unpack %d",
881 gst_video_format_to_string (convert->current_format),
882 convert->current_pstride, convert->identity_unpack);
884 prev = convert->unpack_lines[idx] = gst_line_cache_new (NULL);
885 prev->write_input = FALSE;
886 prev->pass_alloc = FALSE;
888 prev->stride = convert->current_pstride * convert->current_width;
889 gst_line_cache_set_need_line_func (prev, do_unpack_lines, idx, convert, NULL);
894 static GstLineCache *
895 chain_upsample (GstVideoConverter * convert, GstLineCache * prev, gint idx)
897 video_converter_compute_resample (convert, idx);
899 if (convert->upsample_p[idx] || convert->upsample_i[idx]) {
900 GST_DEBUG ("chain upsample");
901 prev = convert->upsample_lines[idx] = gst_line_cache_new (prev);
902 prev->write_input = TRUE;
903 prev->pass_alloc = TRUE;
905 prev->stride = convert->current_pstride * convert->current_width;
906 gst_line_cache_set_need_line_func (prev,
907 do_upsample_lines, idx, convert, NULL);
913 color_matrix_set_identity (MatrixData * m)
917 for (i = 0; i < 4; i++) {
918 for (j = 0; j < 4; j++) {
919 m->dm[i][j] = (i == j);
925 color_matrix_copy (MatrixData * d, const MatrixData * s)
929 for (i = 0; i < 4; i++)
930 for (j = 0; j < 4; j++)
931 d->dm[i][j] = s->dm[i][j];
934 /* Perform 4x4 matrix multiplication:
935 * - @dst@ = @a@ * @b@
936 * - @dst@ may be a pointer to @a@ andor @b@
939 color_matrix_multiply (MatrixData * dst, MatrixData * a, MatrixData * b)
944 for (i = 0; i < 4; i++) {
945 for (j = 0; j < 4; j++) {
947 for (k = 0; k < 4; k++) {
948 x += a->dm[i][k] * b->dm[k][j];
953 color_matrix_copy (dst, &tmp);
957 color_matrix_invert (MatrixData * d, MatrixData * s)
963 color_matrix_set_identity (&tmp);
964 for (j = 0; j < 3; j++) {
965 for (i = 0; i < 3; i++) {
967 s->dm[(i + 1) % 3][(j + 1) % 3] * s->dm[(i + 2) % 3][(j + 2) % 3] -
968 s->dm[(i + 1) % 3][(j + 2) % 3] * s->dm[(i + 2) % 3][(j + 1) % 3];
972 tmp.dm[0][0] * s->dm[0][0] + tmp.dm[0][1] * s->dm[1][0] +
973 tmp.dm[0][2] * s->dm[2][0];
974 for (j = 0; j < 3; j++) {
975 for (i = 0; i < 3; i++) {
979 color_matrix_copy (d, &tmp);
983 color_matrix_offset_components (MatrixData * m, double a1, double a2, double a3)
987 color_matrix_set_identity (&a);
991 color_matrix_multiply (m, &a, m);
995 color_matrix_scale_components (MatrixData * m, double a1, double a2, double a3)
999 color_matrix_set_identity (&a);
1003 color_matrix_multiply (m, &a, m);
1007 color_matrix_debug (const MatrixData * s)
1009 GST_DEBUG ("[%f %f %f %f]", s->dm[0][0], s->dm[0][1], s->dm[0][2],
1011 GST_DEBUG ("[%f %f %f %f]", s->dm[1][0], s->dm[1][1], s->dm[1][2],
1013 GST_DEBUG ("[%f %f %f %f]", s->dm[2][0], s->dm[2][1], s->dm[2][2],
1015 GST_DEBUG ("[%f %f %f %f]", s->dm[3][0], s->dm[3][1], s->dm[3][2],
1020 color_matrix_convert (MatrixData * s)
1024 for (i = 0; i < 4; i++)
1025 for (j = 0; j < 4; j++)
1026 s->im[i][j] = rint (s->dm[i][j]);
1028 GST_DEBUG ("[%6d %6d %6d %6d]", s->im[0][0], s->im[0][1], s->im[0][2],
1030 GST_DEBUG ("[%6d %6d %6d %6d]", s->im[1][0], s->im[1][1], s->im[1][2],
1032 GST_DEBUG ("[%6d %6d %6d %6d]", s->im[2][0], s->im[2][1], s->im[2][2],
1034 GST_DEBUG ("[%6d %6d %6d %6d]", s->im[3][0], s->im[3][1], s->im[3][2],
1039 color_matrix_YCbCr_to_RGB (MatrixData * m, double Kr, double Kb)
1041 double Kg = 1.0 - Kr - Kb;
1044 {1., 0., 2 * (1 - Kr), 0.},
1045 {1., -2 * Kb * (1 - Kb) / Kg, -2 * Kr * (1 - Kr) / Kg, 0.},
1046 {1., 2 * (1 - Kb), 0., 0.},
1051 color_matrix_multiply (m, &k, m);
1055 color_matrix_RGB_to_YCbCr (MatrixData * m, double Kr, double Kb)
1057 double Kg = 1.0 - Kr - Kb;
1066 x = 1 / (2 * (1 - Kb));
1067 k.dm[1][0] = -x * Kr;
1068 k.dm[1][1] = -x * Kg;
1069 k.dm[1][2] = x * (1 - Kb);
1072 x = 1 / (2 * (1 - Kr));
1073 k.dm[2][0] = x * (1 - Kr);
1074 k.dm[2][1] = -x * Kg;
1075 k.dm[2][2] = -x * Kb;
1083 color_matrix_multiply (m, &k, m);
1087 color_matrix_RGB_to_XYZ (MatrixData * dst, double Rx, double Ry, double Gx,
1088 double Gy, double Bx, double By, double Wx, double Wy)
1094 color_matrix_set_identity (&m);
1098 m.dm[2][0] = (1.0 - Rx - Ry);
1101 m.dm[2][1] = (1.0 - Gx - Gy);
1104 m.dm[2][2] = (1.0 - Bx - By);
1106 color_matrix_invert (&im, &m);
1110 wz = (1.0 - Wx - Wy) / Wy;
1112 sx = im.dm[0][0] * wx + im.dm[0][1] * wy + im.dm[0][2] * wz;
1113 sy = im.dm[1][0] * wx + im.dm[1][1] * wy + im.dm[1][2] * wz;
1114 sz = im.dm[2][0] * wx + im.dm[2][1] * wy + im.dm[2][2] * wz;
1126 color_matrix_copy (dst, &m);
1130 videoconvert_convert_init_tables (MatrixData * data)
1134 data->t_r = g_new (gint64, 256);
1135 data->t_g = g_new (gint64, 256);
1136 data->t_b = g_new (gint64, 256);
1138 for (i = 0; i < 256; i++) {
1139 gint64 r = 0, g = 0, b = 0;
1141 for (j = 0; j < 3; j++) {
1142 r = (r << 16) + data->im[j][0] * i;
1143 g = (g << 16) + data->im[j][1] * i;
1144 b = (b << 16) + data->im[j][2] * i;
1150 data->t_c = ((gint64) data->im[0][3] << 32)
1151 + ((gint64) data->im[1][3] << 16)
1152 + ((gint64) data->im[2][3] << 0);
1156 _custom_video_orc_matrix8 (guint8 * ORC_RESTRICT d1,
1157 const guint8 * ORC_RESTRICT s1, orc_int64 p1, orc_int64 p2, orc_int64 p3,
1158 orc_int64 p4, int n)
1163 gint a00, a01, a02, a03;
1164 gint a10, a11, a12, a13;
1165 gint a20, a21, a22, a23;
1167 a00 = (gint16) (p1 >> 16);
1168 a01 = (gint16) (p2 >> 16);
1169 a02 = (gint16) (p3 >> 16);
1170 a03 = (gint16) (p4 >> 16);
1171 a10 = (gint16) (p1 >> 32);
1172 a11 = (gint16) (p2 >> 32);
1173 a12 = (gint16) (p3 >> 32);
1174 a13 = (gint16) (p4 >> 32);
1175 a20 = (gint16) (p1 >> 48);
1176 a21 = (gint16) (p2 >> 48);
1177 a22 = (gint16) (p3 >> 48);
1178 a23 = (gint16) (p4 >> 48);
1180 for (i = 0; i < n; i++) {
1185 y = ((a00 * r + a01 * g + a02 * b) >> SCALE) + a03;
1186 u = ((a10 * r + a11 * g + a12 * b) >> SCALE) + a13;
1187 v = ((a20 * r + a21 * g + a22 * b) >> SCALE) + a23;
1189 d1[i * 4 + 1] = CLAMP (y, 0, 255);
1190 d1[i * 4 + 2] = CLAMP (u, 0, 255);
1191 d1[i * 4 + 3] = CLAMP (v, 0, 255);
1196 video_converter_matrix8 (MatrixData * data, gpointer pixels)
1198 gpointer d = pixels;
1199 video_orc_matrix8 (d, pixels, data->orc_p1, data->orc_p2,
1200 data->orc_p3, data->orc_p4, data->width);
1204 video_converter_matrix8_table (MatrixData * data, gpointer pixels)
1206 gint i, width = data->width * 4;
1208 gint64 c = data->t_c;
1212 for (i = 0; i < width; i += 4) {
1217 x = data->t_r[r] + data->t_g[g] + data->t_b[b] + c;
1219 p[i + 1] = x >> (32 + SCALE);
1220 p[i + 2] = x >> (16 + SCALE);
1221 p[i + 3] = x >> (0 + SCALE);
1226 video_converter_matrix8_AYUV_ARGB (MatrixData * data, gpointer pixels)
1228 gpointer d = pixels;
1230 video_orc_convert_AYUV_ARGB (d, 0, pixels, 0,
1231 data->im[0][0], data->im[0][2],
1232 data->im[2][1], data->im[1][1], data->im[1][2], data->width, 1);
1236 is_ayuv_to_rgb_matrix (MatrixData * data)
1238 if (data->im[0][0] != data->im[1][0] || data->im[1][0] != data->im[2][0])
1241 if (data->im[0][1] != 0 || data->im[2][2] != 0)
1248 is_identity_matrix (MatrixData * data)
1251 gint c = data->im[0][0];
1253 /* not really checking identity because of rounding errors but given
1254 * the conversions we do we just check for anything that looks like:
1261 for (i = 0; i < 4; i++) {
1262 for (j = 0; j < 4; j++) {
1264 if (i == 3 && data->im[i][j] != 1)
1266 else if (data->im[i][j] != c)
1268 } else if (data->im[i][j] != 0)
1276 is_no_clip_matrix (MatrixData * data)
1279 static const guint8 test[8][3] = {
1290 for (i = 0; i < 8; i++) {
1298 y = (data->im[0][0] * r + data->im[0][1] * g +
1299 data->im[0][2] * b + data->im[0][3]) >> SCALE;
1300 u = (data->im[1][0] * r + data->im[1][1] * g +
1301 data->im[1][2] * b + data->im[1][3]) >> SCALE;
1302 v = (data->im[2][0] * r + data->im[2][1] * g +
1303 data->im[2][2] * b + data->im[2][3]) >> SCALE;
1305 if (y != CLAMP (y, 0, 255) || u != CLAMP (u, 0, 255)
1306 || v != CLAMP (v, 0, 255))
1313 video_converter_matrix16 (MatrixData * data, gpointer pixels)
1318 guint16 *p = pixels;
1319 gint width = data->width;
1321 for (i = 0; i < width; i++) {
1326 y = (data->im[0][0] * r + data->im[0][1] * g +
1327 data->im[0][2] * b + data->im[0][3]) >> SCALE;
1328 u = (data->im[1][0] * r + data->im[1][1] * g +
1329 data->im[1][2] * b + data->im[1][3]) >> SCALE;
1330 v = (data->im[2][0] * r + data->im[2][1] * g +
1331 data->im[2][2] * b + data->im[2][3]) >> SCALE;
1333 p[i * 4 + 1] = CLAMP (y, 0, 65535);
1334 p[i * 4 + 2] = CLAMP (u, 0, 65535);
1335 p[i * 4 + 3] = CLAMP (v, 0, 65535);
1341 prepare_matrix (GstVideoConverter * convert, MatrixData * data)
1343 if (is_identity_matrix (data))
1346 color_matrix_scale_components (data, SCALE_F, SCALE_F, SCALE_F);
1347 color_matrix_convert (data);
1349 data->width = convert->current_width;
1351 if (convert->current_bits == 8) {
1352 if (!convert->unpack_rgb && convert->pack_rgb
1353 && is_ayuv_to_rgb_matrix (data)) {
1354 GST_DEBUG ("use fast AYUV -> RGB matrix");
1355 data->matrix_func = video_converter_matrix8_AYUV_ARGB;
1356 } else if (is_no_clip_matrix (data)) {
1357 GST_DEBUG ("use 8bit table");
1358 data->matrix_func = video_converter_matrix8_table;
1359 videoconvert_convert_init_tables (data);
1363 GST_DEBUG ("use 8bit matrix");
1364 data->matrix_func = video_converter_matrix8;
1366 data->orc_p1 = (((guint64) (guint16) data->im[2][0]) << 48) |
1367 (((guint64) (guint16) data->im[1][0]) << 32) |
1368 (((guint64) (guint16) data->im[0][0]) << 16);
1369 data->orc_p2 = (((guint64) (guint16) data->im[2][1]) << 48) |
1370 (((guint64) (guint16) data->im[1][1]) << 32) |
1371 (((guint64) (guint16) data->im[0][1]) << 16);
1372 data->orc_p3 = (((guint64) (guint16) data->im[2][2]) << 48) |
1373 (((guint64) (guint16) data->im[1][2]) << 32) |
1374 (((guint64) (guint16) data->im[0][2]) << 16);
1376 a03 = data->im[0][3] >> SCALE;
1377 a13 = data->im[1][3] >> SCALE;
1378 a23 = data->im[2][3] >> SCALE;
1380 data->orc_p4 = (((guint64) (guint16) a23) << 48) |
1381 (((guint64) (guint16) a13) << 32) | (((guint64) (guint16) a03) << 16);
1384 GST_DEBUG ("use 16bit matrix");
1385 data->matrix_func = video_converter_matrix16;
1390 compute_matrix_to_RGB (GstVideoConverter * convert, MatrixData * data)
1393 gdouble Kr = 0, Kb = 0;
1395 info = &convert->in_info;
1398 const GstVideoFormatInfo *uinfo;
1399 gint offset[4], scale[4];
1401 uinfo = gst_video_format_get_info (convert->unpack_format);
1403 /* bring color components to [0..1.0] range */
1404 gst_video_color_range_offsets (info->colorimetry.range, uinfo, offset,
1407 color_matrix_offset_components (data, -offset[0], -offset[1], -offset[2]);
1408 color_matrix_scale_components (data, 1 / ((float) scale[0]),
1409 1 / ((float) scale[1]), 1 / ((float) scale[2]));
1412 if (!convert->unpack_rgb && !CHECK_MATRIX_NONE (convert)) {
1413 if (CHECK_MATRIX_OUTPUT (convert))
1414 info = &convert->out_info;
1416 /* bring components to R'G'B' space */
1417 if (gst_video_color_matrix_get_Kr_Kb (info->colorimetry.matrix, &Kr, &Kb))
1418 color_matrix_YCbCr_to_RGB (data, Kr, Kb);
1420 color_matrix_debug (data);
1424 compute_matrix_to_YUV (GstVideoConverter * convert, MatrixData * data,
1428 gdouble Kr = 0, Kb = 0;
1430 if (force || (!convert->pack_rgb && !CHECK_MATRIX_NONE (convert))) {
1431 if (CHECK_MATRIX_INPUT (convert))
1432 info = &convert->in_info;
1434 info = &convert->out_info;
1436 /* bring components to YCbCr space */
1437 if (gst_video_color_matrix_get_Kr_Kb (info->colorimetry.matrix, &Kr, &Kb))
1438 color_matrix_RGB_to_YCbCr (data, Kr, Kb);
1441 info = &convert->out_info;
1444 const GstVideoFormatInfo *uinfo;
1445 gint offset[4], scale[4];
1447 uinfo = gst_video_format_get_info (convert->pack_format);
1449 /* bring color components to nominal range */
1450 gst_video_color_range_offsets (info->colorimetry.range, uinfo, offset,
1453 color_matrix_scale_components (data, (float) scale[0], (float) scale[1],
1455 color_matrix_offset_components (data, offset[0], offset[1], offset[2]);
1458 color_matrix_debug (data);
1463 gamma_convert_u8_u16 (GammaData * data, gpointer dest, gpointer src)
1468 guint16 *table = data->gamma_table;
1469 gint width = data->width * 4;
1471 for (i = 0; i < width; i += 4) {
1472 d[i + 0] = (s[i] << 8) | s[i];
1473 d[i + 1] = table[s[i + 1]];
1474 d[i + 2] = table[s[i + 2]];
1475 d[i + 3] = table[s[i + 3]];
1480 gamma_convert_u16_u8 (GammaData * data, gpointer dest, gpointer src)
1485 guint8 *table = data->gamma_table;
1486 gint width = data->width * 4;
1488 for (i = 0; i < width; i += 4) {
1489 d[i + 0] = s[i] >> 8;
1490 d[i + 1] = table[s[i + 1]];
1491 d[i + 2] = table[s[i + 2]];
1492 d[i + 3] = table[s[i + 3]];
1497 gamma_convert_u16_u16 (GammaData * data, gpointer dest, gpointer src)
1502 guint16 *table = data->gamma_table;
1503 gint width = data->width * 4;
1505 for (i = 0; i < width; i += 4) {
1507 d[i + 1] = table[s[i + 1]];
1508 d[i + 2] = table[s[i + 2]];
1509 d[i + 3] = table[s[i + 3]];
1514 setup_gamma_decode (GstVideoConverter * convert)
1516 GstVideoTransferFunction func;
1520 func = convert->in_info.colorimetry.transfer;
1522 convert->gamma_dec.width = convert->current_width;
1523 if (convert->current_bits == 8) {
1524 GST_DEBUG ("gamma decode 8->16: %d", func);
1525 convert->gamma_dec.gamma_func = gamma_convert_u8_u16;
1526 t = convert->gamma_dec.gamma_table = g_malloc (sizeof (guint16) * 256);
1528 for (i = 0; i < 256; i++)
1529 t[i] = rint (gst_video_color_transfer_decode (func, i / 255.0) * 65535.0);
1531 GST_DEBUG ("gamma decode 16->16: %d", func);
1532 convert->gamma_dec.gamma_func = gamma_convert_u16_u16;
1533 t = convert->gamma_dec.gamma_table = g_malloc (sizeof (guint16) * 65536);
1535 for (i = 0; i < 65536; i++)
1537 rint (gst_video_color_transfer_decode (func, i / 65535.0) * 65535.0);
1539 convert->current_bits = 16;
1540 convert->current_pstride = 8;
1541 convert->current_format = GST_VIDEO_FORMAT_ARGB64;
1545 setup_gamma_encode (GstVideoConverter * convert, gint target_bits)
1547 GstVideoTransferFunction func;
1550 func = convert->out_info.colorimetry.transfer;
1552 convert->gamma_enc.width = convert->current_width;
1553 if (target_bits == 8) {
1556 GST_DEBUG ("gamma encode 16->8: %d", func);
1557 convert->gamma_enc.gamma_func = gamma_convert_u16_u8;
1558 t = convert->gamma_enc.gamma_table = g_malloc (sizeof (guint8) * 65536);
1560 for (i = 0; i < 65536; i++)
1561 t[i] = rint (gst_video_color_transfer_encode (func, i / 65535.0) * 255.0);
1565 GST_DEBUG ("gamma encode 16->16: %d", func);
1566 convert->gamma_enc.gamma_func = gamma_convert_u16_u16;
1567 t = convert->gamma_enc.gamma_table = g_malloc (sizeof (guint16) * 65536);
1569 for (i = 0; i < 65536; i++)
1571 rint (gst_video_color_transfer_encode (func, i / 65535.0) * 65535.0);
1575 static GstLineCache *
1576 chain_convert_to_RGB (GstVideoConverter * convert, GstLineCache * prev,
1581 do_gamma = CHECK_GAMMA_REMAP (convert);
1586 if (!convert->unpack_rgb) {
1587 color_matrix_set_identity (&convert->to_RGB_matrix);
1588 compute_matrix_to_RGB (convert, &convert->to_RGB_matrix);
1590 /* matrix is in 0..1 range, scale to current bits */
1591 GST_DEBUG ("chain RGB convert");
1592 scale = 1 << convert->current_bits;
1593 color_matrix_scale_components (&convert->to_RGB_matrix,
1594 (float) scale, (float) scale, (float) scale);
1596 prepare_matrix (convert, &convert->to_RGB_matrix);
1598 if (convert->current_bits == 8)
1599 convert->current_format = GST_VIDEO_FORMAT_ARGB;
1601 convert->current_format = GST_VIDEO_FORMAT_ARGB64;
1604 prev = convert->to_RGB_lines[idx] = gst_line_cache_new (prev);
1605 prev->write_input = TRUE;
1606 prev->pass_alloc = FALSE;
1608 prev->stride = convert->current_pstride * convert->current_width;
1609 gst_line_cache_set_need_line_func (prev,
1610 do_convert_to_RGB_lines, idx, convert, NULL);
1612 GST_DEBUG ("chain gamma decode");
1613 setup_gamma_decode (convert);
1618 static GstLineCache *
1619 chain_hscale (GstVideoConverter * convert, GstLineCache * prev, gint idx)
1624 method = GET_OPT_RESAMPLER_METHOD (convert);
1625 taps = GET_OPT_RESAMPLER_TAPS (convert);
1627 convert->h_scaler[idx] =
1628 gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_NONE, taps,
1629 convert->in_width, convert->out_width, convert->config);
1631 gst_video_scaler_get_coeff (convert->h_scaler[idx], 0, NULL, &taps);
1633 GST_DEBUG ("chain hscale %d->%d, taps %d, method %d",
1634 convert->in_width, convert->out_width, taps, method);
1636 convert->current_width = convert->out_width;
1637 convert->h_scale_format = convert->current_format;
1639 prev = convert->hscale_lines[idx] = gst_line_cache_new (prev);
1640 prev->write_input = FALSE;
1641 prev->pass_alloc = FALSE;
1643 prev->stride = convert->current_pstride * convert->current_width;
1644 gst_line_cache_set_need_line_func (prev, do_hscale_lines, idx, convert, NULL);
1649 static GstLineCache *
1650 chain_vscale (GstVideoConverter * convert, GstLineCache * prev, gint idx)
1653 guint taps, taps_i = 0;
1656 method = GET_OPT_RESAMPLER_METHOD (convert);
1657 taps = GET_OPT_RESAMPLER_TAPS (convert);
1659 if (GST_VIDEO_INFO_IS_INTERLACED (&convert->in_info)) {
1660 convert->v_scaler_i[idx] =
1661 gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_INTERLACED,
1662 taps, convert->in_height, convert->out_height, convert->config);
1664 gst_video_scaler_get_coeff (convert->v_scaler_i[idx], 0, NULL, &taps_i);
1667 convert->v_scaler_p[idx] =
1668 gst_video_scaler_new (method, 0, taps, convert->in_height,
1669 convert->out_height, convert->config);
1670 convert->v_scale_width = convert->current_width;
1671 convert->v_scale_format = convert->current_format;
1672 convert->current_height = convert->out_height;
1674 gst_video_scaler_get_coeff (convert->v_scaler_p[idx], 0, NULL, &taps);
1676 GST_DEBUG ("chain vscale %d->%d, taps %d, method %d, backlog %d",
1677 convert->in_height, convert->out_height, taps, method, backlog);
1679 prev->backlog = backlog;
1680 prev = convert->vscale_lines[idx] = gst_line_cache_new (prev);
1681 prev->pass_alloc = (taps == 1);
1682 prev->write_input = FALSE;
1683 prev->n_lines = MAX (taps_i, taps);
1684 prev->stride = convert->current_pstride * convert->current_width;
1685 gst_line_cache_set_need_line_func (prev, do_vscale_lines, idx, convert, NULL);
1690 static GstLineCache *
1691 chain_scale (GstVideoConverter * convert, GstLineCache * prev, gboolean force,
1694 gint s0, s1, s2, s3;
1696 s0 = convert->current_width * convert->current_height;
1697 s3 = convert->out_width * convert->out_height;
1699 GST_DEBUG ("in pixels %d <> out pixels %d", s0, s3);
1701 if (s3 <= s0 || force) {
1702 /* we are making the image smaller or are forced to resample */
1703 s1 = convert->out_width * convert->current_height;
1704 s2 = convert->current_width * convert->out_height;
1706 GST_DEBUG ("%d <> %d", s1, s2);
1709 /* h scaling first produces less pixels */
1710 if (convert->current_width != convert->out_width)
1711 prev = chain_hscale (convert, prev, idx);
1712 if (convert->current_height != convert->out_height)
1713 prev = chain_vscale (convert, prev, idx);
1715 /* v scaling first produces less pixels */
1716 if (convert->current_height != convert->out_height)
1717 prev = chain_vscale (convert, prev, idx);
1718 if (convert->current_width != convert->out_width)
1719 prev = chain_hscale (convert, prev, idx);
1725 static GstLineCache *
1726 chain_convert (GstVideoConverter * convert, GstLineCache * prev, gint idx)
1728 gboolean do_gamma, do_conversion, pass_alloc = FALSE;
1729 gboolean same_matrix, same_primaries, same_bits;
1732 same_bits = convert->unpack_bits == convert->pack_bits;
1733 if (CHECK_MATRIX_NONE (convert)) {
1737 convert->in_info.colorimetry.matrix ==
1738 convert->out_info.colorimetry.matrix;
1741 if (CHECK_PRIMARIES_NONE (convert)) {
1742 same_primaries = TRUE;
1745 convert->in_info.colorimetry.primaries ==
1746 convert->out_info.colorimetry.primaries;
1749 GST_DEBUG ("matrix %d -> %d (%d)", convert->in_info.colorimetry.matrix,
1750 convert->out_info.colorimetry.matrix, same_matrix);
1751 GST_DEBUG ("bits %d -> %d (%d)", convert->unpack_bits, convert->pack_bits,
1753 GST_DEBUG ("primaries %d -> %d (%d)", convert->in_info.colorimetry.primaries,
1754 convert->out_info.colorimetry.primaries, same_primaries);
1756 color_matrix_set_identity (&convert->convert_matrix);
1758 if (!same_primaries) {
1759 const GstVideoColorPrimariesInfo *pi;
1761 /* Convert from RGB_input to RGB_output via XYZ
1762 * res = XYZ_to_RGB_output ( RGB_to_XYZ_input ( input ) )
1763 * or in matricial form:
1764 * RGB_output = XYZ_to_RGB_output_matrix * RGB_TO_XYZ_input_matrix * RGB_input
1766 * The RGB_input is the pre-existing convert_matrix
1767 * The convert_matrix will become the RGB_output
1770 /* Convert input RGB to XYZ */
1771 pi = gst_video_color_primaries_get_info (convert->in_info.colorimetry.
1773 /* Get the RGB_TO_XYZ_input_matrix */
1774 color_matrix_RGB_to_XYZ (&p1, pi->Rx, pi->Ry, pi->Gx, pi->Gy, pi->Bx,
1775 pi->By, pi->Wx, pi->Wy);
1776 GST_DEBUG ("to XYZ matrix");
1777 color_matrix_debug (&p1);
1778 GST_DEBUG ("current matrix");
1779 /* convert_matrix = RGB_TO_XYZ_input_matrix * input_RGB */
1780 color_matrix_multiply (&convert->convert_matrix, &convert->convert_matrix,
1782 color_matrix_debug (&convert->convert_matrix);
1784 /* Convert XYZ to output RGB */
1785 pi = gst_video_color_primaries_get_info (convert->out_info.colorimetry.
1787 /* Calculate the XYZ_to_RGB_output_matrix
1788 * * Get the RGB_TO_XYZ_output_matrix
1792 color_matrix_RGB_to_XYZ (&p2, pi->Rx, pi->Ry, pi->Gx, pi->Gy, pi->Bx,
1793 pi->By, pi->Wx, pi->Wy);
1794 color_matrix_invert (&p2, &p2);
1795 GST_DEBUG ("to RGB matrix");
1796 color_matrix_debug (&p2);
1798 * convert_matrix = XYZ_to_RGB_output_matrix * RGB_TO_XYZ_input_matrix * RGB_input
1799 * = XYZ_to_RGB_output_matrix * convert_matrix
1800 * = p2 * convert_matrix
1802 color_matrix_multiply (&convert->convert_matrix, &p2,
1803 &convert->convert_matrix);
1804 GST_DEBUG ("current matrix");
1805 color_matrix_debug (&convert->convert_matrix);
1808 do_gamma = CHECK_GAMMA_REMAP (convert);
1811 convert->in_bits = convert->unpack_bits;
1812 convert->out_bits = convert->pack_bits;
1814 if (!same_bits || !same_matrix || !same_primaries) {
1815 /* no gamma, combine all conversions into 1 */
1816 if (convert->in_bits < convert->out_bits) {
1817 gint scale = 1 << (convert->out_bits - convert->in_bits);
1818 color_matrix_scale_components (&convert->convert_matrix,
1819 1 / (float) scale, 1 / (float) scale, 1 / (float) scale);
1821 GST_DEBUG ("to RGB matrix");
1822 compute_matrix_to_RGB (convert, &convert->convert_matrix);
1823 GST_DEBUG ("current matrix");
1824 color_matrix_debug (&convert->convert_matrix);
1826 GST_DEBUG ("to YUV matrix");
1827 compute_matrix_to_YUV (convert, &convert->convert_matrix, FALSE);
1828 GST_DEBUG ("current matrix");
1829 color_matrix_debug (&convert->convert_matrix);
1830 if (convert->in_bits > convert->out_bits) {
1831 gint scale = 1 << (convert->in_bits - convert->out_bits);
1832 color_matrix_scale_components (&convert->convert_matrix,
1833 (float) scale, (float) scale, (float) scale);
1835 convert->current_bits = MAX (convert->in_bits, convert->out_bits);
1837 do_conversion = TRUE;
1838 if (!same_matrix || !same_primaries)
1839 prepare_matrix (convert, &convert->convert_matrix);
1840 if (convert->in_bits == convert->out_bits)
1843 do_conversion = FALSE;
1845 convert->current_bits = convert->pack_bits;
1846 convert->current_format = convert->pack_format;
1847 convert->current_pstride = convert->current_bits >> 1;
1849 /* we did gamma, just do colorspace conversion if needed */
1850 if (same_primaries) {
1851 do_conversion = FALSE;
1853 prepare_matrix (convert, &convert->convert_matrix);
1854 convert->in_bits = convert->out_bits = 16;
1856 do_conversion = TRUE;
1860 if (do_conversion) {
1861 GST_DEBUG ("chain conversion");
1862 prev = convert->convert_lines[idx] = gst_line_cache_new (prev);
1863 prev->write_input = TRUE;
1864 prev->pass_alloc = pass_alloc;
1866 prev->stride = convert->current_pstride * convert->current_width;
1867 gst_line_cache_set_need_line_func (prev,
1868 do_convert_lines, idx, convert, NULL);
1874 convert_set_alpha_u8 (GstVideoConverter * convert, gpointer pixels, gint width)
1877 guint8 alpha = MIN (convert->alpha_value, 255);
1880 for (i = 0; i < width; i++)
1885 convert_set_alpha_u16 (GstVideoConverter * convert, gpointer pixels, gint width)
1887 guint16 *p = pixels;
1891 alpha = MIN (convert->alpha_value, 255);
1892 alpha |= alpha << 8;
1894 for (i = 0; i < width; i++)
1899 convert_mult_alpha_u8 (GstVideoConverter * convert, gpointer pixels, gint width)
1902 guint alpha = convert->alpha_value;
1905 for (i = 0; i < width; i++) {
1906 gint a = (p[i * 4] * alpha) / 255;
1907 p[i * 4] = CLAMP (a, 0, 255);
1912 convert_mult_alpha_u16 (GstVideoConverter * convert, gpointer pixels,
1915 guint16 *p = pixels;
1916 guint alpha = convert->alpha_value;
1919 for (i = 0; i < width; i++) {
1920 gint a = (p[i * 4] * alpha) / 255;
1921 p[i * 4] = CLAMP (a, 0, 65535);
1925 static GstLineCache *
1926 chain_alpha (GstVideoConverter * convert, GstLineCache * prev, gint idx)
1928 switch (convert->alpha_mode) {
1929 case ALPHA_MODE_NONE:
1930 case ALPHA_MODE_COPY:
1933 case ALPHA_MODE_SET:
1934 if (convert->current_bits == 8)
1935 convert->alpha_func = convert_set_alpha_u8;
1937 convert->alpha_func = convert_set_alpha_u16;
1939 case ALPHA_MODE_MULT:
1940 if (convert->current_bits == 8)
1941 convert->alpha_func = convert_mult_alpha_u8;
1943 convert->alpha_func = convert_mult_alpha_u16;
1947 GST_DEBUG ("chain alpha mode %d", convert->alpha_mode);
1948 prev = convert->alpha_lines[idx] = gst_line_cache_new (prev);
1949 prev->write_input = TRUE;
1950 prev->pass_alloc = TRUE;
1952 prev->stride = convert->current_pstride * convert->current_width;
1953 gst_line_cache_set_need_line_func (prev, do_alpha_lines, idx, convert, NULL);
1958 static GstLineCache *
1959 chain_convert_to_YUV (GstVideoConverter * convert, GstLineCache * prev,
1964 do_gamma = CHECK_GAMMA_REMAP (convert);
1969 GST_DEBUG ("chain gamma encode");
1970 setup_gamma_encode (convert, convert->pack_bits);
1972 convert->current_bits = convert->pack_bits;
1973 convert->current_pstride = convert->current_bits >> 1;
1975 if (!convert->pack_rgb) {
1976 color_matrix_set_identity (&convert->to_YUV_matrix);
1977 compute_matrix_to_YUV (convert, &convert->to_YUV_matrix, FALSE);
1979 /* matrix is in 0..255 range, scale to pack bits */
1980 GST_DEBUG ("chain YUV convert");
1981 scale = 1 << convert->pack_bits;
1982 color_matrix_scale_components (&convert->to_YUV_matrix,
1983 1 / (float) scale, 1 / (float) scale, 1 / (float) scale);
1984 prepare_matrix (convert, &convert->to_YUV_matrix);
1986 convert->current_format = convert->pack_format;
1988 prev = convert->to_YUV_lines[idx] = gst_line_cache_new (prev);
1989 prev->write_input = FALSE;
1990 prev->pass_alloc = FALSE;
1992 prev->stride = convert->current_pstride * convert->current_width;
1993 gst_line_cache_set_need_line_func (prev,
1994 do_convert_to_YUV_lines, idx, convert, NULL);
2000 static GstLineCache *
2001 chain_downsample (GstVideoConverter * convert, GstLineCache * prev, gint idx)
2003 if (convert->downsample_p[idx] || convert->downsample_i[idx]) {
2004 GST_DEBUG ("chain downsample");
2005 prev = convert->downsample_lines[idx] = gst_line_cache_new (prev);
2006 prev->write_input = TRUE;
2007 prev->pass_alloc = TRUE;
2009 prev->stride = convert->current_pstride * convert->current_width;
2010 gst_line_cache_set_need_line_func (prev,
2011 do_downsample_lines, idx, convert, NULL);
2016 static GstLineCache *
2017 chain_dither (GstVideoConverter * convert, GstLineCache * prev, gint idx)
2020 gboolean do_dither = FALSE;
2021 GstVideoDitherFlags flags = 0;
2022 GstVideoDitherMethod method;
2023 guint quant[4], target_quant;
2025 method = GET_OPT_DITHER_METHOD (convert);
2026 if (method == GST_VIDEO_DITHER_NONE)
2029 target_quant = GET_OPT_DITHER_QUANTIZATION (convert);
2030 GST_DEBUG ("method %d, target-quantization %d", method, target_quant);
2032 if (convert->pack_pal) {
2039 for (i = 0; i < GST_VIDEO_MAX_COMPONENTS; i++) {
2042 depth = convert->out_info.finfo->depth[i];
2049 if (convert->current_bits >= depth) {
2050 quant[i] = 1 << (convert->current_bits - depth);
2051 if (target_quant > quant[i]) {
2052 flags |= GST_VIDEO_DITHER_FLAG_QUANTIZE;
2053 quant[i] = target_quant;
2064 GST_DEBUG ("chain dither");
2066 convert->dither[idx] = gst_video_dither_new (method,
2067 flags, convert->pack_format, quant, convert->current_width);
2069 prev = convert->dither_lines[idx] = gst_line_cache_new (prev);
2070 prev->write_input = TRUE;
2071 prev->pass_alloc = TRUE;
2073 prev->stride = convert->current_pstride * convert->current_width;
2074 gst_line_cache_set_need_line_func (prev, do_dither_lines, idx, convert,
2080 static GstLineCache *
2081 chain_pack (GstVideoConverter * convert, GstLineCache * prev, gint idx)
2083 convert->pack_nlines = convert->out_info.finfo->pack_lines;
2084 convert->pack_pstride = convert->current_pstride;
2085 convert->identity_pack =
2086 (convert->out_info.finfo->format ==
2087 convert->out_info.finfo->unpack_format);
2088 GST_DEBUG ("chain pack line format %s, pstride %d, identity_pack %d (%d %d)",
2089 gst_video_format_to_string (convert->current_format),
2090 convert->current_pstride, convert->identity_pack,
2091 convert->out_info.finfo->format, convert->out_info.finfo->unpack_format);
2097 setup_allocators (GstVideoConverter * convert)
2099 GstLineCache *cache, *prev;
2100 GstLineCacheAllocLineFunc alloc_line;
2101 gboolean alloc_writable;
2103 GDestroyNotify notify;
2107 width = MAX (convert->in_maxwidth, convert->out_maxwidth);
2108 width += convert->out_x;
2110 for (i = 0; i < convert->conversion_runner->n_threads; i++) {
2111 /* start with using dest lines if we can directly write into it */
2112 if (convert->identity_pack) {
2113 alloc_line = get_dest_line;
2114 alloc_writable = TRUE;
2115 user_data = convert;
2119 converter_alloc_new (sizeof (guint16) * width * 4, 4 + BACKLOG,
2121 setup_border_alloc (convert, user_data);
2122 notify = (GDestroyNotify) converter_alloc_free;
2123 alloc_line = get_border_temp_line;
2124 /* when we add a border, we need to write */
2125 alloc_writable = convert->borderline != NULL;
2128 /* First step, try to calculate how many temp lines we need. Go backwards,
2129 * keep track of the maximum number of lines we need for each intermediate
2131 for (prev = cache = convert->pack_lines[i]; cache; cache = cache->prev) {
2132 GST_DEBUG ("looking at cache %p, %d lines, %d backlog", cache,
2133 cache->n_lines, cache->backlog);
2134 prev->n_lines = MAX (prev->n_lines, cache->n_lines);
2135 if (!cache->pass_alloc) {
2136 GST_DEBUG ("cache %p, needs %d lines", prev, prev->n_lines);
2141 /* now walk backwards, we try to write into the dest lines directly
2142 * and keep track if the source needs to be writable */
2143 for (cache = convert->pack_lines[i]; cache; cache = cache->prev) {
2144 gst_line_cache_set_alloc_line_func (cache, alloc_line, user_data, notify);
2145 cache->alloc_writable = alloc_writable;
2147 /* make sure only one cache frees the allocator */
2150 if (!cache->pass_alloc) {
2151 /* can't pass allocator, make new temp line allocator */
2153 converter_alloc_new (sizeof (guint16) * width * 4,
2154 cache->n_lines + cache->backlog, convert, NULL);
2155 notify = (GDestroyNotify) converter_alloc_free;
2156 alloc_line = get_temp_line;
2157 alloc_writable = FALSE;
2159 /* if someone writes to the input, we need a writable line from the
2161 if (cache->write_input)
2162 alloc_writable = TRUE;
2164 /* free leftover allocator */
2171 setup_borderline (GstVideoConverter * convert)
2175 width = MAX (convert->in_maxwidth, convert->out_maxwidth);
2176 width += convert->out_x;
2178 if (convert->fill_border && (convert->out_height < convert->out_maxheight ||
2179 convert->out_width < convert->out_maxwidth)) {
2182 const GstVideoFormatInfo *out_finfo;
2183 gpointer planes[GST_VIDEO_MAX_PLANES];
2184 gint strides[GST_VIDEO_MAX_PLANES];
2186 convert->borderline = g_malloc0 (sizeof (guint16) * width * 4);
2188 out_finfo = convert->out_info.finfo;
2190 if (GST_VIDEO_INFO_IS_YUV (&convert->out_info)) {
2195 /* Get Color matrix. */
2196 color_matrix_set_identity (&cm);
2197 compute_matrix_to_YUV (convert, &cm, TRUE);
2198 color_matrix_convert (&cm);
2200 border_val = GINT32_FROM_BE (convert->border_argb);
2202 b = (0xFF000000 & border_val) >> 24;
2203 g = (0x00FF0000 & border_val) >> 16;
2204 r = (0x0000FF00 & border_val) >> 8;
2205 a = (0x000000FF & border_val);
2207 y = 16 + ((r * cm.im[0][0] + g * cm.im[0][1] + b * cm.im[0][2]) >> 8);
2208 u = 128 + ((r * cm.im[1][0] + g * cm.im[1][1] + b * cm.im[1][2]) >> 8);
2209 v = 128 + ((r * cm.im[2][0] + g * cm.im[2][1] + b * cm.im[2][2]) >> 8);
2211 a = CLAMP (a, 0, 255);
2212 y = CLAMP (y, 0, 255);
2213 u = CLAMP (u, 0, 255);
2214 v = CLAMP (v, 0, 255);
2216 border_val = a | (y << 8) | (u << 16) | ((guint32) v << 24);
2218 border_val = GINT32_FROM_BE (convert->border_argb);
2220 if (convert->pack_bits == 8)
2221 video_orc_splat_u32 (convert->borderline, border_val, width);
2223 video_orc_splat2_u64 (convert->borderline, border_val, width);
2225 /* convert pixels */
2226 for (i = 0; i < out_finfo->n_planes; i++) {
2227 planes[i] = &convert->borders[i];
2228 strides[i] = sizeof (guint64);
2231 if (out_finfo->n_planes == 1) {
2232 /* for packed formats, convert based on subsampling so that we
2233 * get a complete group of pixels */
2234 for (i = 0; i < out_finfo->n_components; i++) {
2235 w_sub = MAX (w_sub, out_finfo->w_sub[i]);
2238 out_finfo->pack_func (out_finfo, GST_VIDEO_PACK_FLAG_NONE,
2239 convert->borderline, 0, planes, strides,
2240 GST_VIDEO_CHROMA_SITE_UNKNOWN, 0, 1 << w_sub);
2242 convert->borderline = NULL;
2247 convert_get_alpha_mode (GstVideoConverter * convert)
2249 gboolean in_alpha, out_alpha;
2251 in_alpha = GST_VIDEO_INFO_HAS_ALPHA (&convert->in_info);
2252 out_alpha = GST_VIDEO_INFO_HAS_ALPHA (&convert->out_info);
2254 /* no output alpha, do nothing */
2256 return ALPHA_MODE_NONE;
2260 if (CHECK_ALPHA_COPY (convert))
2261 return ALPHA_MODE_COPY;
2263 if (CHECK_ALPHA_MULT (convert)) {
2264 if (GET_OPT_ALPHA_VALUE (convert) == 1.0)
2265 return ALPHA_MODE_COPY;
2267 return ALPHA_MODE_MULT;
2270 /* nothing special, this is what unpack etc does automatically */
2271 if (GET_OPT_ALPHA_VALUE (convert) == 1.0)
2272 return ALPHA_MODE_NONE;
2274 /* everything else becomes SET */
2275 return ALPHA_MODE_SET;
2279 * gst_video_converter_new: (skip)
2280 * @in_info: a #GstVideoInfo
2281 * @out_info: a #GstVideoInfo
2282 * @config: (transfer full): a #GstStructure with configuration options
2284 * Create a new converter object to convert between @in_info and @out_info
2287 * Returns: a #GstVideoConverter or %NULL if conversion is not possible.
2292 gst_video_converter_new (GstVideoInfo * in_info, GstVideoInfo * out_info,
2293 GstStructure * config)
2295 GstVideoConverter *convert;
2297 const GstVideoFormatInfo *fin, *fout, *finfo;
2298 gdouble alpha_value;
2301 g_return_val_if_fail (in_info != NULL, NULL);
2302 g_return_val_if_fail (out_info != NULL, NULL);
2303 /* we won't ever do framerate conversion */
2304 g_return_val_if_fail (in_info->fps_n == out_info->fps_n, NULL);
2305 g_return_val_if_fail (in_info->fps_d == out_info->fps_d, NULL);
2306 /* we won't ever do deinterlace */
2307 g_return_val_if_fail (in_info->interlace_mode == out_info->interlace_mode,
2310 convert = g_slice_new0 (GstVideoConverter);
2312 fin = in_info->finfo;
2313 fout = out_info->finfo;
2315 convert->in_info = *in_info;
2316 convert->out_info = *out_info;
2318 /* default config */
2319 convert->config = gst_structure_new_empty ("GstVideoConverter");
2321 gst_video_converter_set_config (convert, config);
2323 convert->in_maxwidth = GST_VIDEO_INFO_WIDTH (in_info);
2324 convert->in_maxheight = GST_VIDEO_INFO_HEIGHT (in_info);
2325 convert->out_maxwidth = GST_VIDEO_INFO_WIDTH (out_info);
2326 convert->out_maxheight = GST_VIDEO_INFO_HEIGHT (out_info);
2328 convert->in_x = get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_SRC_X, 0);
2329 convert->in_y = get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_SRC_Y, 0);
2330 convert->in_x &= ~((1 << fin->w_sub[1]) - 1);
2331 convert->in_y &= ~((1 << fin->h_sub[1]) - 1);
2333 convert->in_width = get_opt_int (convert,
2334 GST_VIDEO_CONVERTER_OPT_SRC_WIDTH, convert->in_maxwidth - convert->in_x);
2335 convert->in_height = get_opt_int (convert,
2336 GST_VIDEO_CONVERTER_OPT_SRC_HEIGHT,
2337 convert->in_maxheight - convert->in_y);
2340 MIN (convert->in_width, convert->in_maxwidth - convert->in_x);
2341 convert->in_height =
2342 MIN (convert->in_height, convert->in_maxheight - convert->in_y);
2344 convert->out_x = get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_DEST_X, 0);
2345 convert->out_y = get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_DEST_Y, 0);
2346 convert->out_x &= ~((1 << fout->w_sub[1]) - 1);
2347 convert->out_y &= ~((1 << fout->h_sub[1]) - 1);
2349 convert->out_width = get_opt_int (convert,
2350 GST_VIDEO_CONVERTER_OPT_DEST_WIDTH,
2351 convert->out_maxwidth - convert->out_x);
2352 convert->out_height =
2353 get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_DEST_HEIGHT,
2354 convert->out_maxheight - convert->out_y);
2356 convert->out_width =
2357 MIN (convert->out_width, convert->out_maxwidth - convert->out_x);
2358 convert->out_height =
2359 MIN (convert->out_height, convert->out_maxheight - convert->out_y);
2361 convert->fill_border = GET_OPT_FILL_BORDER (convert);
2362 convert->border_argb = get_opt_uint (convert,
2363 GST_VIDEO_CONVERTER_OPT_BORDER_ARGB, DEFAULT_OPT_BORDER_ARGB);
2365 alpha_value = GET_OPT_ALPHA_VALUE (convert);
2366 convert->alpha_value = 255 * alpha_value;
2367 convert->alpha_mode = convert_get_alpha_mode (convert);
2369 convert->unpack_format = in_info->finfo->unpack_format;
2370 finfo = gst_video_format_get_info (convert->unpack_format);
2371 convert->unpack_bits = GST_VIDEO_FORMAT_INFO_DEPTH (finfo, 0);
2372 convert->unpack_rgb = GST_VIDEO_FORMAT_INFO_IS_RGB (finfo);
2373 if (convert->unpack_rgb
2374 && in_info->colorimetry.matrix != GST_VIDEO_COLOR_MATRIX_RGB) {
2375 /* force identity matrix for RGB input */
2376 GST_WARNING ("invalid matrix %d for input RGB format, using RGB",
2377 in_info->colorimetry.matrix);
2378 convert->in_info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_RGB;
2381 convert->pack_format = out_info->finfo->unpack_format;
2382 finfo = gst_video_format_get_info (convert->pack_format);
2383 convert->pack_bits = GST_VIDEO_FORMAT_INFO_DEPTH (finfo, 0);
2384 convert->pack_rgb = GST_VIDEO_FORMAT_INFO_IS_RGB (finfo);
2386 gst_video_format_get_palette (GST_VIDEO_INFO_FORMAT (out_info),
2387 &convert->pack_palsize);
2388 if (convert->pack_rgb
2389 && out_info->colorimetry.matrix != GST_VIDEO_COLOR_MATRIX_RGB) {
2390 /* force identity matrix for RGB output */
2391 GST_WARNING ("invalid matrix %d for output RGB format, using RGB",
2392 out_info->colorimetry.matrix);
2393 convert->out_info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_RGB;
2396 n_threads = get_opt_uint (convert, GST_VIDEO_CONVERTER_OPT_THREADS, 1);
2397 if (n_threads == 0 || n_threads > g_get_num_processors ())
2398 n_threads = g_get_num_processors ();
2399 /* Magic number of 200 lines */
2400 if (MAX (convert->out_height, convert->in_height) / n_threads < 200)
2401 n_threads = (MAX (convert->out_height, convert->in_height) + 199) / 200;
2402 convert->conversion_runner = gst_parallelized_task_runner_new (n_threads);
2404 if (video_converter_lookup_fastpath (convert))
2407 if (in_info->finfo->unpack_func == NULL)
2408 goto no_unpack_func;
2410 if (out_info->finfo->pack_func == NULL)
2413 convert->convert = video_converter_generic;
2415 convert->upsample_p = g_new0 (GstVideoChromaResample *, n_threads);
2416 convert->upsample_i = g_new0 (GstVideoChromaResample *, n_threads);
2417 convert->downsample_p = g_new0 (GstVideoChromaResample *, n_threads);
2418 convert->downsample_i = g_new0 (GstVideoChromaResample *, n_threads);
2419 convert->v_scaler_p = g_new0 (GstVideoScaler *, n_threads);
2420 convert->v_scaler_i = g_new0 (GstVideoScaler *, n_threads);
2421 convert->h_scaler = g_new0 (GstVideoScaler *, n_threads);
2422 convert->unpack_lines = g_new0 (GstLineCache *, n_threads);
2423 convert->pack_lines = g_new0 (GstLineCache *, n_threads);
2424 convert->upsample_lines = g_new0 (GstLineCache *, n_threads);
2425 convert->to_RGB_lines = g_new0 (GstLineCache *, n_threads);
2426 convert->hscale_lines = g_new0 (GstLineCache *, n_threads);
2427 convert->vscale_lines = g_new0 (GstLineCache *, n_threads);
2428 convert->convert_lines = g_new0 (GstLineCache *, n_threads);
2429 convert->alpha_lines = g_new0 (GstLineCache *, n_threads);
2430 convert->to_YUV_lines = g_new0 (GstLineCache *, n_threads);
2431 convert->downsample_lines = g_new0 (GstLineCache *, n_threads);
2432 convert->dither_lines = g_new0 (GstLineCache *, n_threads);
2433 convert->dither = g_new0 (GstVideoDither *, n_threads);
2435 for (i = 0; i < n_threads; i++) {
2436 convert->current_format = GST_VIDEO_INFO_FORMAT (in_info);
2437 convert->current_width = convert->in_width;
2438 convert->current_height = convert->in_height;
2441 prev = chain_unpack_line (convert, i);
2442 /* upsample chroma */
2443 prev = chain_upsample (convert, prev, i);
2444 /* convert to gamma decoded RGB */
2445 prev = chain_convert_to_RGB (convert, prev, i);
2446 /* do all downscaling */
2447 prev = chain_scale (convert, prev, FALSE, i);
2448 /* do conversion between color spaces */
2449 prev = chain_convert (convert, prev, i);
2450 /* do alpha channels */
2451 prev = chain_alpha (convert, prev, i);
2452 /* do all remaining (up)scaling */
2453 prev = chain_scale (convert, prev, TRUE, i);
2454 /* convert to gamma encoded Y'Cb'Cr' */
2455 prev = chain_convert_to_YUV (convert, prev, i);
2456 /* downsample chroma */
2457 prev = chain_downsample (convert, prev, i);
2459 prev = chain_dither (convert, prev, i);
2460 /* pack into final format */
2461 convert->pack_lines[i] = chain_pack (convert, prev, i);
2464 setup_borderline (convert);
2465 /* now figure out allocators */
2466 setup_allocators (convert);
2474 GST_ERROR ("no unpack_func for format %s",
2475 gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (in_info)));
2476 gst_video_converter_free (convert);
2481 GST_ERROR ("no pack_func for format %s",
2482 gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (out_info)));
2483 gst_video_converter_free (convert);
2489 clear_matrix_data (MatrixData * data)
2497 * gst_video_converter_free:
2498 * @convert: a #GstVideoConverter
2505 gst_video_converter_free (GstVideoConverter * convert)
2509 g_return_if_fail (convert != NULL);
2511 for (i = 0; i < convert->conversion_runner->n_threads; i++) {
2512 if (convert->upsample_p && convert->upsample_p[i])
2513 gst_video_chroma_resample_free (convert->upsample_p[i]);
2514 if (convert->upsample_i && convert->upsample_i[i])
2515 gst_video_chroma_resample_free (convert->upsample_i[i]);
2516 if (convert->downsample_p && convert->downsample_p[i])
2517 gst_video_chroma_resample_free (convert->downsample_p[i]);
2518 if (convert->downsample_i && convert->downsample_i[i])
2519 gst_video_chroma_resample_free (convert->downsample_i[i]);
2520 if (convert->v_scaler_p && convert->v_scaler_p[i])
2521 gst_video_scaler_free (convert->v_scaler_p[i]);
2522 if (convert->v_scaler_i && convert->v_scaler_i[i])
2523 gst_video_scaler_free (convert->v_scaler_i[i]);
2524 if (convert->h_scaler && convert->h_scaler[i])
2525 gst_video_scaler_free (convert->h_scaler[i]);
2526 if (convert->unpack_lines && convert->unpack_lines[i])
2527 gst_line_cache_free (convert->unpack_lines[i]);
2528 if (convert->upsample_lines && convert->upsample_lines[i])
2529 gst_line_cache_free (convert->upsample_lines[i]);
2530 if (convert->to_RGB_lines && convert->to_RGB_lines[i])
2531 gst_line_cache_free (convert->to_RGB_lines[i]);
2532 if (convert->hscale_lines && convert->hscale_lines[i])
2533 gst_line_cache_free (convert->hscale_lines[i]);
2534 if (convert->vscale_lines && convert->vscale_lines[i])
2535 gst_line_cache_free (convert->vscale_lines[i]);
2536 if (convert->convert_lines && convert->convert_lines[i])
2537 gst_line_cache_free (convert->convert_lines[i]);
2538 if (convert->alpha_lines && convert->alpha_lines[i])
2539 gst_line_cache_free (convert->alpha_lines[i]);
2540 if (convert->to_YUV_lines && convert->to_YUV_lines[i])
2541 gst_line_cache_free (convert->to_YUV_lines[i]);
2542 if (convert->downsample_lines && convert->downsample_lines[i])
2543 gst_line_cache_free (convert->downsample_lines[i]);
2544 if (convert->dither_lines && convert->dither_lines[i])
2545 gst_line_cache_free (convert->dither_lines[i]);
2546 if (convert->dither && convert->dither[i])
2547 gst_video_dither_free (convert->dither[i]);
2549 g_free (convert->upsample_p);
2550 g_free (convert->upsample_i);
2551 g_free (convert->downsample_p);
2552 g_free (convert->downsample_i);
2553 g_free (convert->v_scaler_p);
2554 g_free (convert->v_scaler_i);
2555 g_free (convert->h_scaler);
2556 g_free (convert->unpack_lines);
2557 g_free (convert->pack_lines);
2558 g_free (convert->upsample_lines);
2559 g_free (convert->to_RGB_lines);
2560 g_free (convert->hscale_lines);
2561 g_free (convert->vscale_lines);
2562 g_free (convert->convert_lines);
2563 g_free (convert->alpha_lines);
2564 g_free (convert->to_YUV_lines);
2565 g_free (convert->downsample_lines);
2566 g_free (convert->dither_lines);
2567 g_free (convert->dither);
2569 g_free (convert->gamma_dec.gamma_table);
2570 g_free (convert->gamma_enc.gamma_table);
2572 if (convert->tmpline) {
2573 for (i = 0; i < convert->conversion_runner->n_threads; i++)
2574 g_free (convert->tmpline[i]);
2575 g_free (convert->tmpline);
2578 g_free (convert->borderline);
2580 if (convert->config)
2581 gst_structure_free (convert->config);
2583 for (i = 0; i < 4; i++) {
2584 for (j = 0; j < convert->conversion_runner->n_threads; j++) {
2585 if (convert->fv_scaler[i].scaler)
2586 gst_video_scaler_free (convert->fv_scaler[i].scaler[j]);
2587 if (convert->fh_scaler[i].scaler)
2588 gst_video_scaler_free (convert->fh_scaler[i].scaler[j]);
2590 g_free (convert->fv_scaler[i].scaler);
2591 g_free (convert->fh_scaler[i].scaler);
2594 if (convert->conversion_runner)
2595 gst_parallelized_task_runner_free (convert->conversion_runner);
2597 clear_matrix_data (&convert->to_RGB_matrix);
2598 clear_matrix_data (&convert->convert_matrix);
2599 clear_matrix_data (&convert->to_YUV_matrix);
2601 g_slice_free (GstVideoConverter, convert);
2605 copy_config (GQuark field_id, const GValue * value, gpointer user_data)
2607 GstVideoConverter *convert = user_data;
2609 gst_structure_id_set_value (convert->config, field_id, value);
2615 * gst_video_converter_set_config:
2616 * @convert: a #GstVideoConverter
2617 * @config: (transfer full): a #GstStructure
2619 * Set @config as extra configuraion for @convert.
2621 * If the parameters in @config can not be set exactly, this function returns
2622 * %FALSE and will try to update as much state as possible. The new state can
2623 * then be retrieved and refined with gst_video_converter_get_config().
2625 * Look at the #GST_VIDEO_CONVERTER_OPT_* fields to check valid configuration
2626 * option and values.
2628 * Returns: %TRUE when @config could be set.
2633 gst_video_converter_set_config (GstVideoConverter * convert,
2634 GstStructure * config)
2636 g_return_val_if_fail (convert != NULL, FALSE);
2637 g_return_val_if_fail (config != NULL, FALSE);
2639 gst_structure_foreach (config, copy_config, convert);
2640 gst_structure_free (config);
2646 * gst_video_converter_get_config:
2647 * @convert: a #GstVideoConverter
2649 * Get the current configuration of @convert.
2651 * Returns: a #GstStructure that remains valid for as long as @convert is valid
2652 * or until gst_video_converter_set_config() is called.
2654 const GstStructure *
2655 gst_video_converter_get_config (GstVideoConverter * convert)
2657 g_return_val_if_fail (convert != NULL, NULL);
2659 return convert->config;
2663 * gst_video_converter_frame:
2664 * @convert: a #GstVideoConverter
2665 * @dest: a #GstVideoFrame
2666 * @src: a #GstVideoFrame
2668 * Convert the pixels of @src into @dest using @convert.
2673 gst_video_converter_frame (GstVideoConverter * convert,
2674 const GstVideoFrame * src, GstVideoFrame * dest)
2676 g_return_if_fail (convert != NULL);
2677 g_return_if_fail (src != NULL);
2678 g_return_if_fail (dest != NULL);
2680 convert->convert (convert, src, dest);
2684 video_converter_compute_matrix (GstVideoConverter * convert)
2686 MatrixData *dst = &convert->convert_matrix;
2688 color_matrix_set_identity (dst);
2689 compute_matrix_to_RGB (convert, dst);
2690 compute_matrix_to_YUV (convert, dst, FALSE);
2692 convert->current_bits = 8;
2693 prepare_matrix (convert, dst);
2697 video_converter_compute_resample (GstVideoConverter * convert, gint idx)
2699 GstVideoInfo *in_info, *out_info;
2700 const GstVideoFormatInfo *sfinfo, *dfinfo;
2702 if (CHECK_CHROMA_NONE (convert))
2705 in_info = &convert->in_info;
2706 out_info = &convert->out_info;
2708 sfinfo = in_info->finfo;
2709 dfinfo = out_info->finfo;
2711 GST_DEBUG ("site: %d->%d, w_sub: %d->%d, h_sub: %d->%d", in_info->chroma_site,
2712 out_info->chroma_site, sfinfo->w_sub[2], dfinfo->w_sub[2],
2713 sfinfo->h_sub[2], dfinfo->h_sub[2]);
2715 if (sfinfo->w_sub[2] != dfinfo->w_sub[2] ||
2716 sfinfo->h_sub[2] != dfinfo->h_sub[2] ||
2717 in_info->chroma_site != out_info->chroma_site ||
2718 in_info->width != out_info->width ||
2719 in_info->height != out_info->height) {
2720 if (GST_VIDEO_INFO_IS_INTERLACED (in_info)) {
2721 if (!CHECK_CHROMA_DOWNSAMPLE (convert))
2722 convert->upsample_i[idx] = gst_video_chroma_resample_new (0,
2723 in_info->chroma_site, GST_VIDEO_CHROMA_FLAG_INTERLACED,
2724 sfinfo->unpack_format, sfinfo->w_sub[2], sfinfo->h_sub[2]);
2725 if (!CHECK_CHROMA_UPSAMPLE (convert))
2726 convert->downsample_i[idx] =
2727 gst_video_chroma_resample_new (0, out_info->chroma_site,
2728 GST_VIDEO_CHROMA_FLAG_INTERLACED, dfinfo->unpack_format,
2729 -dfinfo->w_sub[2], -dfinfo->h_sub[2]);
2731 if (!CHECK_CHROMA_DOWNSAMPLE (convert))
2732 convert->upsample_p[idx] = gst_video_chroma_resample_new (0,
2733 in_info->chroma_site, 0, sfinfo->unpack_format, sfinfo->w_sub[2],
2735 if (!CHECK_CHROMA_UPSAMPLE (convert))
2736 convert->downsample_p[idx] = gst_video_chroma_resample_new (0,
2737 out_info->chroma_site, 0, dfinfo->unpack_format, -dfinfo->w_sub[2],
2742 #define FRAME_GET_PLANE_STRIDE(frame, plane) \
2743 GST_VIDEO_FRAME_PLANE_STRIDE (frame, plane)
2744 #define FRAME_GET_PLANE_LINE(frame, plane, line) \
2745 (gpointer)(((guint8*)(GST_VIDEO_FRAME_PLANE_DATA (frame, plane))) + \
2746 FRAME_GET_PLANE_STRIDE (frame, plane) * (line))
2748 #define FRAME_GET_COMP_STRIDE(frame, comp) \
2749 GST_VIDEO_FRAME_COMP_STRIDE (frame, comp)
2750 #define FRAME_GET_COMP_LINE(frame, comp, line) \
2751 (gpointer)(((guint8*)(GST_VIDEO_FRAME_COMP_DATA (frame, comp))) + \
2752 FRAME_GET_COMP_STRIDE (frame, comp) * (line))
2754 #define FRAME_GET_STRIDE(frame) FRAME_GET_PLANE_STRIDE (frame, 0)
2755 #define FRAME_GET_LINE(frame,line) FRAME_GET_PLANE_LINE (frame, 0, line)
2757 #define FRAME_GET_Y_LINE(frame,line) FRAME_GET_COMP_LINE(frame, GST_VIDEO_COMP_Y, line)
2758 #define FRAME_GET_U_LINE(frame,line) FRAME_GET_COMP_LINE(frame, GST_VIDEO_COMP_U, line)
2759 #define FRAME_GET_V_LINE(frame,line) FRAME_GET_COMP_LINE(frame, GST_VIDEO_COMP_V, line)
2760 #define FRAME_GET_A_LINE(frame,line) FRAME_GET_COMP_LINE(frame, GST_VIDEO_COMP_A, line)
2762 #define FRAME_GET_Y_STRIDE(frame) FRAME_GET_COMP_STRIDE(frame, GST_VIDEO_COMP_Y)
2763 #define FRAME_GET_U_STRIDE(frame) FRAME_GET_COMP_STRIDE(frame, GST_VIDEO_COMP_U)
2764 #define FRAME_GET_V_STRIDE(frame) FRAME_GET_COMP_STRIDE(frame, GST_VIDEO_COMP_V)
2765 #define FRAME_GET_A_STRIDE(frame) FRAME_GET_COMP_STRIDE(frame, GST_VIDEO_COMP_A)
2768 #define UNPACK_FRAME(frame,dest,line,x,width) \
2769 frame->info.finfo->unpack_func (frame->info.finfo, \
2770 (GST_VIDEO_FRAME_IS_INTERLACED (frame) ? \
2771 GST_VIDEO_PACK_FLAG_INTERLACED : \
2772 GST_VIDEO_PACK_FLAG_NONE), \
2773 dest, frame->data, frame->info.stride, x, \
2775 #define PACK_FRAME(frame,src,line,width) \
2776 frame->info.finfo->pack_func (frame->info.finfo, \
2777 (GST_VIDEO_FRAME_IS_INTERLACED (frame) ? \
2778 GST_VIDEO_PACK_FLAG_INTERLACED : \
2779 GST_VIDEO_PACK_FLAG_NONE), \
2780 src, 0, frame->data, frame->info.stride, \
2781 frame->info.chroma_site, line, width);
2784 get_dest_line (GstLineCache * cache, gint idx, gpointer user_data)
2786 GstVideoConverter *convert = user_data;
2788 gint pstride = convert->pack_pstride;
2789 gint out_x = convert->out_x;
2792 cline = CLAMP (idx, 0, convert->out_maxheight - 1);
2794 line = FRAME_GET_LINE (convert->dest, cline);
2795 GST_DEBUG ("get dest line %d %p", cline, line);
2797 if (convert->borderline) {
2798 gint r_border = (out_x + convert->out_width) * pstride;
2799 gint rb_width = convert->out_maxwidth * pstride - r_border;
2800 gint lb_width = out_x * pstride;
2802 memcpy (line, convert->borderline, lb_width);
2803 memcpy (line + r_border, convert->borderline, rb_width);
2805 line += out_x * pstride;
2811 do_unpack_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line,
2814 GstVideoConverter *convert = user_data;
2818 cline = CLAMP (in_line + convert->in_y, 0, convert->in_maxheight - 1);
2820 if (cache->alloc_writable || !convert->identity_unpack) {
2821 tmpline = gst_line_cache_alloc_line (cache, out_line);
2822 GST_DEBUG ("unpack line %d (%u) %p", in_line, cline, tmpline);
2823 UNPACK_FRAME (convert->src, tmpline, cline, convert->in_x,
2826 tmpline = ((guint8 *) FRAME_GET_LINE (convert->src, cline)) +
2827 convert->in_x * convert->unpack_pstride;
2828 GST_DEBUG ("get src line %d (%u) %p", in_line, cline, tmpline);
2830 gst_line_cache_add_line (cache, in_line, tmpline);
2836 do_upsample_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line,
2839 GstVideoConverter *convert = user_data;
2841 gint i, start_line, n_lines;
2843 n_lines = convert->up_n_lines;
2844 start_line = in_line;
2845 if (start_line < n_lines + convert->up_offset) {
2846 start_line += convert->up_offset;
2847 out_line += convert->up_offset;
2850 /* get the lines needed for chroma upsample */
2852 gst_line_cache_get_lines (cache->prev, idx, out_line, start_line,
2855 if (convert->upsample) {
2856 GST_DEBUG ("doing upsample %d-%d %p", start_line, start_line + n_lines - 1,
2858 gst_video_chroma_resample (convert->upsample[idx], lines,
2862 for (i = 0; i < n_lines; i++)
2863 gst_line_cache_add_line (cache, start_line + i, lines[i]);
2869 do_convert_to_RGB_lines (GstLineCache * cache, gint idx, gint out_line,
2870 gint in_line, gpointer user_data)
2872 GstVideoConverter *convert = user_data;
2873 MatrixData *data = &convert->to_RGB_matrix;
2874 gpointer *lines, destline;
2876 lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1);
2877 destline = lines[0];
2879 if (data->matrix_func) {
2880 GST_DEBUG ("to RGB line %d %p", in_line, destline);
2881 data->matrix_func (data, destline);
2883 if (convert->gamma_dec.gamma_func) {
2884 destline = gst_line_cache_alloc_line (cache, out_line);
2886 GST_DEBUG ("gamma decode line %d %p->%p", in_line, lines[0], destline);
2887 convert->gamma_dec.gamma_func (&convert->gamma_dec, destline, lines[0]);
2889 gst_line_cache_add_line (cache, in_line, destline);
2895 do_hscale_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line,
2898 GstVideoConverter *convert = user_data;
2899 gpointer *lines, destline;
2901 lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1);
2903 destline = gst_line_cache_alloc_line (cache, out_line);
2905 GST_DEBUG ("hresample line %d %p->%p", in_line, lines[0], destline);
2906 gst_video_scaler_horizontal (convert->h_scaler[idx], convert->h_scale_format,
2907 lines[0], destline, 0, convert->out_width);
2909 gst_line_cache_add_line (cache, in_line, destline);
2915 do_vscale_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line,
2918 GstVideoConverter *convert = user_data;
2919 gpointer *lines, destline;
2920 guint sline, n_lines;
2923 cline = CLAMP (in_line, 0, convert->out_height - 1);
2925 gst_video_scaler_get_coeff (convert->v_scaler[idx], cline, &sline, &n_lines);
2926 lines = gst_line_cache_get_lines (cache->prev, idx, out_line, sline, n_lines);
2928 destline = gst_line_cache_alloc_line (cache, out_line);
2930 GST_DEBUG ("vresample line %d %d-%d %p->%p", in_line, sline,
2931 sline + n_lines - 1, lines[0], destline);
2932 gst_video_scaler_vertical (convert->v_scaler[idx], convert->v_scale_format,
2933 lines, destline, cline, convert->v_scale_width);
2935 gst_line_cache_add_line (cache, in_line, destline);
2941 do_convert_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line,
2944 GstVideoConverter *convert = user_data;
2945 MatrixData *data = &convert->convert_matrix;
2946 gpointer *lines, destline;
2947 guint in_bits, out_bits;
2950 lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1);
2952 destline = lines[0];
2954 in_bits = convert->in_bits;
2955 out_bits = convert->out_bits;
2957 width = MIN (convert->in_width, convert->out_width);
2959 if (out_bits == 16 || in_bits == 16) {
2960 gpointer srcline = lines[0];
2962 if (out_bits != in_bits)
2963 destline = gst_line_cache_alloc_line (cache, out_line);
2965 /* FIXME, we can scale in the conversion matrix */
2967 GST_DEBUG ("8->16 line %d %p->%p", in_line, srcline, destline);
2968 video_orc_convert_u8_to_u16 (destline, srcline, width * 4);
2972 if (data->matrix_func) {
2973 GST_DEBUG ("matrix line %d %p", in_line, srcline);
2974 data->matrix_func (data, srcline);
2977 /* FIXME, dither here */
2978 if (out_bits == 8) {
2979 GST_DEBUG ("16->8 line %d %p->%p", in_line, srcline, destline);
2980 video_orc_convert_u16_to_u8 (destline, srcline, width * 4);
2983 if (data->matrix_func) {
2984 GST_DEBUG ("matrix line %d %p", in_line, destline);
2985 data->matrix_func (data, destline);
2988 gst_line_cache_add_line (cache, in_line, destline);
2994 do_alpha_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line,
2997 gpointer *lines, destline;
2998 GstVideoConverter *convert = user_data;
2999 gint width = MIN (convert->in_width, convert->out_width);
3001 lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1);
3002 destline = lines[0];
3004 GST_DEBUG ("alpha line %d %p", in_line, destline);
3005 convert->alpha_func (convert, destline, width);
3007 gst_line_cache_add_line (cache, in_line, destline);
3013 do_convert_to_YUV_lines (GstLineCache * cache, gint idx, gint out_line,
3014 gint in_line, gpointer user_data)
3016 GstVideoConverter *convert = user_data;
3017 MatrixData *data = &convert->to_YUV_matrix;
3018 gpointer *lines, destline;
3020 lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1);
3021 destline = lines[0];
3023 if (convert->gamma_enc.gamma_func) {
3024 destline = gst_line_cache_alloc_line (cache, out_line);
3026 GST_DEBUG ("gamma encode line %d %p->%p", in_line, lines[0], destline);
3027 convert->gamma_enc.gamma_func (&convert->gamma_enc, destline, lines[0]);
3029 if (data->matrix_func) {
3030 GST_DEBUG ("to YUV line %d %p", in_line, destline);
3031 data->matrix_func (data, destline);
3033 gst_line_cache_add_line (cache, in_line, destline);
3039 do_downsample_lines (GstLineCache * cache, gint idx, gint out_line,
3040 gint in_line, gpointer user_data)
3042 GstVideoConverter *convert = user_data;
3044 gint i, start_line, n_lines;
3046 n_lines = convert->down_n_lines;
3047 start_line = in_line;
3048 if (start_line < n_lines + convert->down_offset)
3049 start_line += convert->down_offset;
3051 /* get the lines needed for chroma downsample */
3053 gst_line_cache_get_lines (cache->prev, idx, out_line, start_line,
3056 if (convert->downsample) {
3057 GST_DEBUG ("downsample line %d %d-%d %p", in_line, start_line,
3058 start_line + n_lines - 1, lines[0]);
3059 gst_video_chroma_resample (convert->downsample[idx], lines,
3060 convert->out_width);
3063 for (i = 0; i < n_lines; i++)
3064 gst_line_cache_add_line (cache, start_line + i, lines[i]);
3070 do_dither_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line,
3073 GstVideoConverter *convert = user_data;
3074 gpointer *lines, destline;
3076 lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1);
3077 destline = lines[0];
3079 if (convert->dither) {
3080 GST_DEBUG ("Dither line %d %p", in_line, destline);
3081 gst_video_dither_line (convert->dither[idx], destline, 0, out_line,
3082 convert->out_width);
3084 gst_line_cache_add_line (cache, in_line, destline);
3091 GstLineCache *pack_lines;
3094 gint pack_lines_count;
3096 gboolean identity_pack;
3097 gint lb_width, out_maxwidth;
3098 GstVideoFrame *dest;
3102 convert_generic_task (ConvertTask * task)
3106 for (i = task->h_0; i < task->h_1; i += task->pack_lines_count) {
3109 /* load the lines needed to pack */
3111 gst_line_cache_get_lines (task->pack_lines, task->idx, i + task->out_y,
3112 i, task->pack_lines_count);
3114 if (!task->identity_pack) {
3115 /* take away the border */
3116 guint8 *l = ((guint8 *) lines[0]) - task->lb_width;
3117 /* and pack into destination */
3118 GST_DEBUG ("pack line %d %p (%p)", i + task->out_y, lines[0], l);
3119 PACK_FRAME (task->dest, l, i + task->out_y, task->out_maxwidth);
3125 video_converter_generic (GstVideoConverter * convert, const GstVideoFrame * src,
3126 GstVideoFrame * dest)
3129 gint out_maxwidth, out_maxheight;
3130 gint out_x, out_y, out_height;
3131 gint pack_lines, pstride;
3134 ConvertTask **tasks_p;
3136 gint lines_per_thread;
3138 out_height = convert->out_height;
3139 out_maxwidth = convert->out_maxwidth;
3140 out_maxheight = convert->out_maxheight;
3142 out_x = convert->out_x;
3143 out_y = convert->out_y;
3146 convert->dest = dest;
3148 if (GST_VIDEO_FRAME_IS_INTERLACED (src)) {
3149 GST_DEBUG ("setup interlaced frame");
3150 convert->upsample = convert->upsample_i;
3151 convert->downsample = convert->downsample_i;
3152 convert->v_scaler = convert->v_scaler_i;
3154 GST_DEBUG ("setup progressive frame");
3155 convert->upsample = convert->upsample_p;
3156 convert->downsample = convert->downsample_p;
3157 convert->v_scaler = convert->v_scaler_p;
3159 if (convert->upsample[0]) {
3160 gst_video_chroma_resample_get_info (convert->upsample[0],
3161 &convert->up_n_lines, &convert->up_offset);
3163 convert->up_n_lines = 1;
3164 convert->up_offset = 0;
3166 if (convert->downsample[0]) {
3167 gst_video_chroma_resample_get_info (convert->downsample[0],
3168 &convert->down_n_lines, &convert->down_offset);
3170 convert->down_n_lines = 1;
3171 convert->down_offset = 0;
3174 pack_lines = convert->pack_nlines; /* only 1 for now */
3175 pstride = convert->pack_pstride;
3177 lb_width = out_x * pstride;
3179 if (convert->borderline) {
3180 /* FIXME we should try to avoid PACK_FRAME */
3181 for (i = 0; i < out_y; i++)
3182 PACK_FRAME (dest, convert->borderline, i, out_maxwidth);
3185 n_threads = convert->conversion_runner->n_threads;
3186 tasks = g_newa (ConvertTask, n_threads);
3187 tasks_p = g_newa (ConvertTask *, n_threads);
3190 GST_ROUND_UP_N ((out_height + n_threads - 1) / n_threads, pack_lines);
3192 for (i = 0; i < n_threads; i++) {
3193 tasks[i].dest = dest;
3194 tasks[i].pack_lines = convert->pack_lines[i];
3196 tasks[i].pack_lines_count = pack_lines;
3197 tasks[i].out_y = out_y;
3198 tasks[i].identity_pack = convert->identity_pack;
3199 tasks[i].lb_width = lb_width;
3200 tasks[i].out_maxwidth = out_maxwidth;
3202 tasks[i].h_0 = i * lines_per_thread;
3203 tasks[i].h_1 = MIN ((i + 1) * lines_per_thread, out_height);
3205 tasks_p[i] = &tasks[i];
3208 gst_parallelized_task_runner_run (convert->conversion_runner,
3209 (GstParallelizedTaskFunc) convert_generic_task, (gpointer) tasks_p);
3211 if (convert->borderline) {
3212 for (i = out_y + out_height; i < out_maxheight; i++)
3213 PACK_FRAME (dest, convert->borderline, i, out_maxwidth);
3215 if (convert->pack_pal) {
3216 memcpy (GST_VIDEO_FRAME_PLANE_DATA (dest, 1), convert->pack_pal,
3217 convert->pack_palsize);
3221 static void convert_fill_border (GstVideoConverter * convert,
3222 GstVideoFrame * dest);
3226 #define GET_LINE_OFFSETS(interlaced,line,l1,l2) \
3228 l1 = (line & 2 ? line - 1 : line); \
3237 const GstVideoFrame *src;
3238 GstVideoFrame *dest;
3239 gint height_0, height_1;
3242 gboolean interlaced;
3252 convert_I420_YUY2_task (FConvertTask * task)
3257 for (i = task->height_0; i < task->height_1; i += 2) {
3258 GET_LINE_OFFSETS (task->interlaced, i, l1, l2);
3260 video_orc_convert_I420_YUY2 (FRAME_GET_LINE (task->dest, l1),
3261 FRAME_GET_LINE (task->dest, l2),
3262 FRAME_GET_Y_LINE (task->src, l1),
3263 FRAME_GET_Y_LINE (task->src, l2),
3264 FRAME_GET_U_LINE (task->src, i >> 1),
3265 FRAME_GET_V_LINE (task->src, i >> 1), (task->width + 1) / 2);
3270 convert_I420_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src,
3271 GstVideoFrame * dest)
3274 gint width = convert->in_width;
3275 gint height = convert->in_height;
3276 gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src);
3278 FConvertTask *tasks;
3279 FConvertTask **tasks_p;
3281 gint lines_per_thread;
3283 /* I420 has half as many chroma lines, as such we have to
3284 * always merge two into one. For non-interlaced these are
3285 * the two next to each other, for interlaced one is skipped
3288 h2 = GST_ROUND_DOWN_4 (height);
3290 h2 = GST_ROUND_DOWN_2 (height);
3292 n_threads = convert->conversion_runner->n_threads;
3293 tasks = g_newa (FConvertTask, n_threads);
3294 tasks_p = g_newa (FConvertTask *, n_threads);
3296 lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads);
3298 for (i = 0; i < n_threads; i++) {
3300 tasks[i].dest = dest;
3302 tasks[i].interlaced = interlaced;
3303 tasks[i].width = width;
3305 tasks[i].height_0 = i * lines_per_thread;
3306 tasks[i].height_1 = tasks[i].height_0 + lines_per_thread;
3307 tasks[i].height_1 = MIN (h2, tasks[i].height_1);
3309 tasks_p[i] = &tasks[i];
3312 gst_parallelized_task_runner_run (convert->conversion_runner,
3313 (GstParallelizedTaskFunc) convert_I420_YUY2_task, (gpointer) tasks_p);
3315 /* now handle last lines. For interlaced these are up to 3 */
3317 for (i = h2; i < height; i++) {
3318 UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width);
3319 PACK_FRAME (dest, convert->tmpline[0], i, width);
3325 convert_I420_UYVY_task (FConvertTask * task)
3330 for (i = task->height_0; i < task->height_1; i += 2) {
3331 GET_LINE_OFFSETS (task->interlaced, i, l1, l2);
3333 video_orc_convert_I420_UYVY (FRAME_GET_LINE (task->dest, l1),
3334 FRAME_GET_LINE (task->dest, l2),
3335 FRAME_GET_Y_LINE (task->src, l1),
3336 FRAME_GET_Y_LINE (task->src, l2),
3337 FRAME_GET_U_LINE (task->src, i >> 1),
3338 FRAME_GET_V_LINE (task->src, i >> 1), (task->width + 1) / 2);
3343 convert_I420_UYVY (GstVideoConverter * convert, const GstVideoFrame * src,
3344 GstVideoFrame * dest)
3347 gint width = convert->in_width;
3348 gint height = convert->in_height;
3349 gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src);
3351 FConvertTask *tasks;
3352 FConvertTask **tasks_p;
3354 gint lines_per_thread;
3356 /* I420 has half as many chroma lines, as such we have to
3357 * always merge two into one. For non-interlaced these are
3358 * the two next to each other, for interlaced one is skipped
3361 h2 = GST_ROUND_DOWN_4 (height);
3363 h2 = GST_ROUND_DOWN_2 (height);
3365 n_threads = convert->conversion_runner->n_threads;
3366 tasks = g_newa (FConvertTask, n_threads);
3367 tasks_p = g_newa (FConvertTask *, n_threads);
3369 lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads);
3371 for (i = 0; i < n_threads; i++) {
3373 tasks[i].dest = dest;
3375 tasks[i].interlaced = interlaced;
3376 tasks[i].width = width;
3378 tasks[i].height_0 = i * lines_per_thread;
3379 tasks[i].height_1 = tasks[i].height_0 + lines_per_thread;
3380 tasks[i].height_1 = MIN (h2, tasks[i].height_1);
3382 tasks_p[i] = &tasks[i];
3385 gst_parallelized_task_runner_run (convert->conversion_runner,
3386 (GstParallelizedTaskFunc) convert_I420_UYVY_task, (gpointer) tasks_p);
3388 /* now handle last lines. For interlaced these are up to 3 */
3390 for (i = h2; i < height; i++) {
3391 UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width);
3392 PACK_FRAME (dest, convert->tmpline[0], i, width);
3398 convert_I420_AYUV_task (FConvertTask * task)
3403 for (i = task->height_0; i < task->height_1; i += 2) {
3404 GET_LINE_OFFSETS (task->interlaced, i, l1, l2);
3406 video_orc_convert_I420_AYUV (FRAME_GET_LINE (task->dest, l1),
3407 FRAME_GET_LINE (task->dest, l2),
3408 FRAME_GET_Y_LINE (task->src, l1),
3409 FRAME_GET_Y_LINE (task->src, l2),
3410 FRAME_GET_U_LINE (task->src, i >> 1), FRAME_GET_V_LINE (task->src,
3411 i >> 1), task->alpha, task->width);
3416 convert_I420_AYUV (GstVideoConverter * convert, const GstVideoFrame * src,
3417 GstVideoFrame * dest)
3420 gint width = convert->in_width;
3421 gint height = convert->in_height;
3422 gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src);
3423 guint8 alpha = MIN (convert->alpha_value, 255);
3425 FConvertTask *tasks;
3426 FConvertTask **tasks_p;
3428 gint lines_per_thread;
3430 /* I420 has half as many chroma lines, as such we have to
3431 * always merge two into one. For non-interlaced these are
3432 * the two next to each other, for interlaced one is skipped
3435 h2 = GST_ROUND_DOWN_4 (height);
3437 h2 = GST_ROUND_DOWN_2 (height);
3440 n_threads = convert->conversion_runner->n_threads;
3441 tasks = g_newa (FConvertTask, n_threads);
3442 tasks_p = g_newa (FConvertTask *, n_threads);
3444 lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads);
3446 for (i = 0; i < n_threads; i++) {
3448 tasks[i].dest = dest;
3450 tasks[i].interlaced = interlaced;
3451 tasks[i].width = width;
3452 tasks[i].alpha = alpha;
3454 tasks[i].height_0 = i * lines_per_thread;
3455 tasks[i].height_1 = tasks[i].height_0 + lines_per_thread;
3456 tasks[i].height_1 = MIN (h2, tasks[i].height_1);
3458 tasks_p[i] = &tasks[i];
3461 gst_parallelized_task_runner_run (convert->conversion_runner,
3462 (GstParallelizedTaskFunc) convert_I420_AYUV_task, (gpointer) tasks_p);
3464 /* now handle last lines. For interlaced these are up to 3 */
3466 for (i = h2; i < height; i++) {
3467 UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width);
3469 convert_set_alpha_u8 (convert, convert->tmpline[0], width);
3470 PACK_FRAME (dest, convert->tmpline[0], i, width);
3476 convert_YUY2_I420_task (FConvertTask * task)
3481 for (i = task->height_0; i < task->height_1; i += 2) {
3482 GET_LINE_OFFSETS (task->interlaced, i, l1, l2);
3484 video_orc_convert_YUY2_I420 (FRAME_GET_Y_LINE (task->dest, l1),
3485 FRAME_GET_Y_LINE (task->dest, l2),
3486 FRAME_GET_U_LINE (task->dest, i >> 1),
3487 FRAME_GET_V_LINE (task->dest, i >> 1),
3488 FRAME_GET_LINE (task->src, l1), FRAME_GET_LINE (task->src, l2),
3489 (task->width + 1) / 2);
3494 convert_YUY2_I420 (GstVideoConverter * convert, const GstVideoFrame * src,
3495 GstVideoFrame * dest)
3498 gint width = convert->in_width;
3499 gint height = convert->in_height;
3500 gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src);
3502 FConvertTask *tasks;
3503 FConvertTask **tasks_p;
3505 gint lines_per_thread;
3507 /* I420 has half as many chroma lines, as such we have to
3508 * always merge two into one. For non-interlaced these are
3509 * the two next to each other, for interlaced one is skipped
3512 h2 = GST_ROUND_DOWN_4 (height);
3514 h2 = GST_ROUND_DOWN_2 (height);
3516 n_threads = convert->conversion_runner->n_threads;
3517 tasks = g_newa (FConvertTask, n_threads);
3518 tasks_p = g_newa (FConvertTask *, n_threads);
3520 lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads);
3522 for (i = 0; i < n_threads; i++) {
3524 tasks[i].dest = dest;
3526 tasks[i].interlaced = interlaced;
3527 tasks[i].width = width;
3529 tasks[i].height_0 = i * lines_per_thread;
3530 tasks[i].height_1 = tasks[i].height_0 + lines_per_thread;
3531 tasks[i].height_1 = MIN (h2, tasks[i].height_1);
3533 tasks_p[i] = &tasks[i];
3536 gst_parallelized_task_runner_run (convert->conversion_runner,
3537 (GstParallelizedTaskFunc) convert_YUY2_I420_task, (gpointer) tasks_p);
3539 /* now handle last lines. For interlaced these are up to 3 */
3541 for (i = h2; i < height; i++) {
3542 UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width);
3543 PACK_FRAME (dest, convert->tmpline[0], i, width);
3550 const guint8 *s, *s2, *su, *sv;
3551 guint8 *d, *d2, *du, *dv;
3552 gint sstride, sustride, svstride;
3553 gint dstride, dustride, dvstride;
3557 } FConvertPlaneTask;
3560 convert_YUY2_AYUV_task (FConvertPlaneTask * task)
3562 video_orc_convert_YUY2_AYUV (task->d, task->dstride, task->s,
3563 task->sstride, task->alpha, (task->width + 1) / 2, task->height);
3567 convert_YUY2_AYUV (GstVideoConverter * convert, const GstVideoFrame * src,
3568 GstVideoFrame * dest)
3570 gint width = convert->in_width;
3571 gint height = convert->in_height;
3573 guint8 alpha = MIN (convert->alpha_value, 255);
3574 FConvertPlaneTask *tasks;
3575 FConvertPlaneTask **tasks_p;
3577 gint lines_per_thread;
3580 s = FRAME_GET_LINE (src, convert->in_y);
3581 s += (GST_ROUND_UP_2 (convert->in_x) * 2);
3582 d = FRAME_GET_LINE (dest, convert->out_y);
3583 d += (convert->out_x * 4);
3585 n_threads = convert->conversion_runner->n_threads;
3586 tasks = g_newa (FConvertPlaneTask, n_threads);
3587 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
3589 lines_per_thread = (height + n_threads - 1) / n_threads;
3591 for (i = 0; i < n_threads; i++) {
3592 tasks[i].dstride = FRAME_GET_STRIDE (dest);
3593 tasks[i].sstride = FRAME_GET_STRIDE (src);
3594 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
3595 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
3597 tasks[i].width = width;
3598 tasks[i].height = (i + 1) * lines_per_thread;
3599 tasks[i].height = MIN (tasks[i].height, height);
3600 tasks[i].height -= i * lines_per_thread;
3601 tasks[i].alpha = alpha;
3603 tasks_p[i] = &tasks[i];
3606 gst_parallelized_task_runner_run (convert->conversion_runner,
3607 (GstParallelizedTaskFunc) convert_YUY2_AYUV_task, (gpointer) tasks_p);
3609 convert_fill_border (convert, dest);
3613 convert_YUY2_Y42B_task (FConvertPlaneTask * task)
3615 video_orc_convert_YUY2_Y42B (task->d, task->dstride, task->du,
3616 task->dustride, task->dv, task->dvstride,
3617 task->s, task->sstride, (task->width + 1) / 2, task->height);
3621 convert_YUY2_Y42B (GstVideoConverter * convert, const GstVideoFrame * src,
3622 GstVideoFrame * dest)
3624 gint width = convert->in_width;
3625 gint height = convert->in_height;
3626 guint8 *s, *dy, *du, *dv;
3627 FConvertPlaneTask *tasks;
3628 FConvertPlaneTask **tasks_p;
3630 gint lines_per_thread;
3633 s = FRAME_GET_LINE (src, convert->in_y);
3634 s += (GST_ROUND_UP_2 (convert->in_x) * 2);
3636 dy = FRAME_GET_Y_LINE (dest, convert->out_y);
3637 dy += convert->out_x;
3638 du = FRAME_GET_U_LINE (dest, convert->out_y);
3639 du += convert->out_x >> 1;
3640 dv = FRAME_GET_V_LINE (dest, convert->out_y);
3641 dv += convert->out_x >> 1;
3643 n_threads = convert->conversion_runner->n_threads;
3644 tasks = g_newa (FConvertPlaneTask, n_threads);
3645 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
3647 lines_per_thread = (height + n_threads - 1) / n_threads;
3649 for (i = 0; i < n_threads; i++) {
3650 tasks[i].dstride = FRAME_GET_Y_STRIDE (dest);
3651 tasks[i].dustride = FRAME_GET_U_STRIDE (dest);
3652 tasks[i].dvstride = FRAME_GET_V_STRIDE (dest);
3653 tasks[i].sstride = FRAME_GET_STRIDE (src);
3654 tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride;
3655 tasks[i].du = du + i * lines_per_thread * tasks[i].dustride;
3656 tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride;
3657 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
3659 tasks[i].width = width;
3660 tasks[i].height = (i + 1) * lines_per_thread;
3661 tasks[i].height = MIN (tasks[i].height, height);
3662 tasks[i].height -= i * lines_per_thread;
3664 tasks_p[i] = &tasks[i];
3667 gst_parallelized_task_runner_run (convert->conversion_runner,
3668 (GstParallelizedTaskFunc) convert_YUY2_Y42B_task, (gpointer) tasks_p);
3670 convert_fill_border (convert, dest);
3674 convert_YUY2_Y444_task (FConvertPlaneTask * task)
3676 video_orc_convert_YUY2_Y444 (task->d,
3677 task->dstride, task->du,
3678 task->dustride, task->dv,
3679 task->dvstride, task->s,
3680 task->sstride, (task->width + 1) / 2, task->height);
3684 convert_YUY2_Y444 (GstVideoConverter * convert, const GstVideoFrame * src,
3685 GstVideoFrame * dest)
3687 gint width = convert->in_width;
3688 gint height = convert->in_height;
3689 guint8 *s, *dy, *du, *dv;
3690 FConvertPlaneTask *tasks;
3691 FConvertPlaneTask **tasks_p;
3693 gint lines_per_thread;
3696 s = FRAME_GET_LINE (src, convert->in_y);
3697 s += (GST_ROUND_UP_2 (convert->in_x) * 2);
3699 dy = FRAME_GET_Y_LINE (dest, convert->out_y);
3700 dy += convert->out_x;
3701 du = FRAME_GET_U_LINE (dest, convert->out_y);
3702 du += convert->out_x;
3703 dv = FRAME_GET_V_LINE (dest, convert->out_y);
3704 dv += convert->out_x;
3706 n_threads = convert->conversion_runner->n_threads;
3707 tasks = g_newa (FConvertPlaneTask, n_threads);
3708 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
3710 lines_per_thread = (height + n_threads - 1) / n_threads;
3712 for (i = 0; i < n_threads; i++) {
3713 tasks[i].dstride = FRAME_GET_Y_STRIDE (dest);
3714 tasks[i].dustride = FRAME_GET_U_STRIDE (dest);
3715 tasks[i].dvstride = FRAME_GET_V_STRIDE (dest);
3716 tasks[i].sstride = FRAME_GET_STRIDE (src);
3717 tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride;
3718 tasks[i].du = du + i * lines_per_thread * tasks[i].dustride;
3719 tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride;
3720 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
3722 tasks[i].width = width;
3723 tasks[i].height = (i + 1) * lines_per_thread;
3724 tasks[i].height = MIN (tasks[i].height, height);
3725 tasks[i].height -= i * lines_per_thread;
3727 tasks_p[i] = &tasks[i];
3730 gst_parallelized_task_runner_run (convert->conversion_runner,
3731 (GstParallelizedTaskFunc) convert_YUY2_Y444_task, (gpointer) tasks_p);
3733 convert_fill_border (convert, dest);
3737 convert_UYVY_I420_task (FConvertTask * task)
3742 for (i = task->height_0; i < task->height_1; i += 2) {
3743 GET_LINE_OFFSETS (task->interlaced, i, l1, l2);
3745 video_orc_convert_UYVY_I420 (FRAME_GET_COMP_LINE (task->dest, 0, l1),
3746 FRAME_GET_COMP_LINE (task->dest, 0, l2),
3747 FRAME_GET_COMP_LINE (task->dest, 1, i >> 1),
3748 FRAME_GET_COMP_LINE (task->dest, 2, i >> 1),
3749 FRAME_GET_LINE (task->src, l1), FRAME_GET_LINE (task->src, l2),
3750 (task->width + 1) / 2);
3755 convert_UYVY_I420 (GstVideoConverter * convert, const GstVideoFrame * src,
3756 GstVideoFrame * dest)
3759 gint width = convert->in_width;
3760 gint height = convert->in_height;
3761 gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src);
3763 FConvertTask *tasks;
3764 FConvertTask **tasks_p;
3766 gint lines_per_thread;
3768 /* I420 has half as many chroma lines, as such we have to
3769 * always merge two into one. For non-interlaced these are
3770 * the two next to each other, for interlaced one is skipped
3773 h2 = GST_ROUND_DOWN_4 (height);
3775 h2 = GST_ROUND_DOWN_2 (height);
3777 n_threads = convert->conversion_runner->n_threads;
3778 tasks = g_newa (FConvertTask, n_threads);
3779 tasks_p = g_newa (FConvertTask *, n_threads);
3781 lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads);
3783 for (i = 0; i < n_threads; i++) {
3785 tasks[i].dest = dest;
3787 tasks[i].interlaced = interlaced;
3788 tasks[i].width = width;
3790 tasks[i].height_0 = i * lines_per_thread;
3791 tasks[i].height_1 = tasks[i].height_0 + lines_per_thread;
3792 tasks[i].height_1 = MIN (h2, tasks[i].height_1);
3794 tasks_p[i] = &tasks[i];
3797 gst_parallelized_task_runner_run (convert->conversion_runner,
3798 (GstParallelizedTaskFunc) convert_UYVY_I420_task, (gpointer) tasks_p);
3800 /* now handle last lines. For interlaced these are up to 3 */
3802 for (i = h2; i < height; i++) {
3803 UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width);
3804 PACK_FRAME (dest, convert->tmpline[0], i, width);
3810 convert_UYVY_AYUV_task (FConvertPlaneTask * task)
3812 video_orc_convert_UYVY_AYUV (task->d, task->dstride, task->s,
3813 task->sstride, task->alpha, (task->width + 1) / 2, task->height);
3817 convert_UYVY_AYUV (GstVideoConverter * convert, const GstVideoFrame * src,
3818 GstVideoFrame * dest)
3820 gint width = convert->in_width;
3821 gint height = convert->in_height;
3823 guint8 alpha = MIN (convert->alpha_value, 255);
3824 FConvertPlaneTask *tasks;
3825 FConvertPlaneTask **tasks_p;
3827 gint lines_per_thread;
3830 s = FRAME_GET_LINE (src, convert->in_y);
3831 s += (GST_ROUND_UP_2 (convert->in_x) * 2);
3832 d = FRAME_GET_LINE (dest, convert->out_y);
3833 d += (convert->out_x * 4);
3835 n_threads = convert->conversion_runner->n_threads;
3836 tasks = g_newa (FConvertPlaneTask, n_threads);
3837 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
3839 lines_per_thread = (height + n_threads - 1) / n_threads;
3841 for (i = 0; i < n_threads; i++) {
3842 tasks[i].dstride = FRAME_GET_STRIDE (dest);
3843 tasks[i].sstride = FRAME_GET_STRIDE (src);
3844 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
3845 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
3847 tasks[i].width = width;
3848 tasks[i].height = (i + 1) * lines_per_thread;
3849 tasks[i].height = MIN (tasks[i].height, height);
3850 tasks[i].height -= i * lines_per_thread;
3851 tasks[i].alpha = alpha;
3853 tasks_p[i] = &tasks[i];
3856 gst_parallelized_task_runner_run (convert->conversion_runner,
3857 (GstParallelizedTaskFunc) convert_UYVY_AYUV_task, (gpointer) tasks_p);
3859 convert_fill_border (convert, dest);
3863 convert_UYVY_YUY2_task (FConvertPlaneTask * task)
3865 video_orc_convert_UYVY_YUY2 (task->d, task->dstride, task->s,
3866 task->sstride, (task->width + 1) / 2, task->height);
3870 convert_UYVY_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src,
3871 GstVideoFrame * dest)
3873 gint width = convert->in_width;
3874 gint height = convert->in_height;
3876 FConvertPlaneTask *tasks;
3877 FConvertPlaneTask **tasks_p;
3879 gint lines_per_thread;
3882 s = FRAME_GET_LINE (src, convert->in_y);
3883 s += (GST_ROUND_UP_2 (convert->in_x) * 2);
3884 d = FRAME_GET_LINE (dest, convert->out_y);
3885 d += (GST_ROUND_UP_2 (convert->out_x) * 2);
3887 n_threads = convert->conversion_runner->n_threads;
3888 tasks = g_newa (FConvertPlaneTask, n_threads);
3889 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
3891 lines_per_thread = (height + n_threads - 1) / n_threads;
3893 for (i = 0; i < n_threads; i++) {
3894 tasks[i].dstride = FRAME_GET_STRIDE (dest);
3895 tasks[i].sstride = FRAME_GET_STRIDE (src);
3896 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
3897 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
3899 tasks[i].width = width;
3900 tasks[i].height = (i + 1) * lines_per_thread;
3901 tasks[i].height = MIN (tasks[i].height, height);
3902 tasks[i].height -= i * lines_per_thread;
3904 tasks_p[i] = &tasks[i];
3907 gst_parallelized_task_runner_run (convert->conversion_runner,
3908 (GstParallelizedTaskFunc) convert_UYVY_YUY2_task, (gpointer) tasks_p);
3910 convert_fill_border (convert, dest);
3914 convert_UYVY_Y42B_task (FConvertPlaneTask * task)
3916 video_orc_convert_UYVY_Y42B (task->d, task->dstride, task->du,
3917 task->dustride, task->dv, task->dvstride,
3918 task->s, task->sstride, (task->width + 1) / 2, task->height);
3922 convert_UYVY_Y42B (GstVideoConverter * convert, const GstVideoFrame * src,
3923 GstVideoFrame * dest)
3925 gint width = convert->in_width;
3926 gint height = convert->in_height;
3927 guint8 *s, *dy, *du, *dv;
3928 FConvertPlaneTask *tasks;
3929 FConvertPlaneTask **tasks_p;
3931 gint lines_per_thread;
3934 s = FRAME_GET_LINE (src, convert->in_y);
3935 s += (GST_ROUND_UP_2 (convert->in_x) * 2);
3937 dy = FRAME_GET_Y_LINE (dest, convert->out_y);
3938 dy += convert->out_x;
3939 du = FRAME_GET_U_LINE (dest, convert->out_y);
3940 du += convert->out_x >> 1;
3941 dv = FRAME_GET_V_LINE (dest, convert->out_y);
3942 dv += convert->out_x >> 1;
3944 n_threads = convert->conversion_runner->n_threads;
3945 tasks = g_newa (FConvertPlaneTask, n_threads);
3946 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
3948 lines_per_thread = (height + n_threads - 1) / n_threads;
3950 for (i = 0; i < n_threads; i++) {
3951 tasks[i].dstride = FRAME_GET_Y_STRIDE (dest);
3952 tasks[i].dustride = FRAME_GET_U_STRIDE (dest);
3953 tasks[i].dvstride = FRAME_GET_V_STRIDE (dest);
3954 tasks[i].sstride = FRAME_GET_STRIDE (src);
3955 tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride;
3956 tasks[i].du = du + i * lines_per_thread * tasks[i].dustride;
3957 tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride;
3958 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
3960 tasks[i].width = width;
3961 tasks[i].height = (i + 1) * lines_per_thread;
3962 tasks[i].height = MIN (tasks[i].height, height);
3963 tasks[i].height -= i * lines_per_thread;
3965 tasks_p[i] = &tasks[i];
3968 gst_parallelized_task_runner_run (convert->conversion_runner,
3969 (GstParallelizedTaskFunc) convert_UYVY_Y42B_task, (gpointer) tasks_p);
3971 convert_fill_border (convert, dest);
3975 convert_UYVY_Y444_task (FConvertPlaneTask * task)
3977 video_orc_convert_UYVY_Y444 (task->d,
3978 task->dstride, task->du,
3979 task->dustride, task->dv,
3980 task->dvstride, task->s,
3981 task->sstride, (task->width + 1) / 2, task->height);
3985 convert_UYVY_Y444 (GstVideoConverter * convert, const GstVideoFrame * src,
3986 GstVideoFrame * dest)
3988 gint width = convert->in_width;
3989 gint height = convert->in_height;
3990 guint8 *s, *dy, *du, *dv;
3991 FConvertPlaneTask *tasks;
3992 FConvertPlaneTask **tasks_p;
3994 gint lines_per_thread;
3997 s = FRAME_GET_LINE (src, convert->in_y);
3998 s += (GST_ROUND_UP_2 (convert->in_x) * 2);
4000 dy = FRAME_GET_Y_LINE (dest, convert->out_y);
4001 dy += convert->out_x;
4002 du = FRAME_GET_U_LINE (dest, convert->out_y);
4003 du += convert->out_x;
4004 dv = FRAME_GET_V_LINE (dest, convert->out_y);
4005 dv += convert->out_x;
4007 n_threads = convert->conversion_runner->n_threads;
4008 tasks = g_newa (FConvertPlaneTask, n_threads);
4009 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4011 lines_per_thread = (height + n_threads - 1) / n_threads;
4013 for (i = 0; i < n_threads; i++) {
4014 tasks[i].dstride = FRAME_GET_Y_STRIDE (dest);
4015 tasks[i].dustride = FRAME_GET_U_STRIDE (dest);
4016 tasks[i].dvstride = FRAME_GET_V_STRIDE (dest);
4017 tasks[i].sstride = FRAME_GET_STRIDE (src);
4018 tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride;
4019 tasks[i].du = du + i * lines_per_thread * tasks[i].dustride;
4020 tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride;
4021 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4023 tasks[i].width = width;
4024 tasks[i].height = (i + 1) * lines_per_thread;
4025 tasks[i].height = MIN (tasks[i].height, height);
4026 tasks[i].height -= i * lines_per_thread;
4028 tasks_p[i] = &tasks[i];
4031 gst_parallelized_task_runner_run (convert->conversion_runner,
4032 (GstParallelizedTaskFunc) convert_UYVY_Y444_task, (gpointer) tasks_p);
4034 convert_fill_border (convert, dest);
4038 convert_UYVY_GRAY8_task (FConvertPlaneTask * task)
4040 video_orc_convert_UYVY_GRAY8 (task->d, task->dstride, (guint16 *) task->s,
4041 task->sstride, task->width, task->height);
4045 convert_UYVY_GRAY8 (GstVideoConverter * convert, const GstVideoFrame * src,
4046 GstVideoFrame * dest)
4048 gint width = convert->in_width;
4049 gint height = convert->in_height;
4052 FConvertPlaneTask *tasks;
4053 FConvertPlaneTask **tasks_p;
4055 gint lines_per_thread;
4058 s = GST_VIDEO_FRAME_PLANE_DATA (src, 0);
4059 d = GST_VIDEO_FRAME_PLANE_DATA (dest, 0);
4061 n_threads = convert->conversion_runner->n_threads;
4062 tasks = g_newa (FConvertPlaneTask, n_threads);
4063 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4065 lines_per_thread = (height + n_threads - 1) / n_threads;
4067 for (i = 0; i < n_threads; i++) {
4068 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4069 tasks[i].sstride = FRAME_GET_STRIDE (src);
4070 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4071 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4073 tasks[i].width = width;
4074 tasks[i].height = (i + 1) * lines_per_thread;
4075 tasks[i].height = MIN (tasks[i].height, height);
4076 tasks[i].height -= i * lines_per_thread;
4078 tasks_p[i] = &tasks[i];
4081 gst_parallelized_task_runner_run (convert->conversion_runner,
4082 (GstParallelizedTaskFunc) convert_UYVY_GRAY8_task, (gpointer) tasks_p);
4084 convert_fill_border (convert, dest);
4088 convert_AYUV_I420_task (FConvertPlaneTask * task)
4090 video_orc_convert_AYUV_I420 (task->d,
4091 2 * task->dstride, task->d2,
4092 2 * task->dstride, task->du,
4093 task->dustride, task->dv,
4094 task->dvstride, task->s,
4095 2 * task->sstride, task->s2,
4096 2 * task->sstride, task->width / 2, task->height / 2);
4100 convert_AYUV_I420 (GstVideoConverter * convert, const GstVideoFrame * src,
4101 GstVideoFrame * dest)
4103 gint width = convert->in_width;
4104 gint height = convert->in_height;
4105 guint8 *s1, *s2, *dy1, *dy2, *du, *dv;
4106 FConvertPlaneTask *tasks;
4107 FConvertPlaneTask **tasks_p;
4109 gint lines_per_thread;
4112 s1 = FRAME_GET_LINE (src, convert->in_y + 0);
4113 s1 += convert->in_x * 4;
4114 s2 = FRAME_GET_LINE (src, convert->in_y + 1);
4115 s2 += convert->in_x * 4;
4117 dy1 = FRAME_GET_Y_LINE (dest, convert->out_y + 0);
4118 dy1 += convert->out_x;
4119 dy2 = FRAME_GET_Y_LINE (dest, convert->out_y + 1);
4120 dy2 += convert->out_x;
4121 du = FRAME_GET_U_LINE (dest, convert->out_y >> 1);
4122 du += convert->out_x >> 1;
4123 dv = FRAME_GET_V_LINE (dest, convert->out_y >> 1);
4124 dv += convert->out_x >> 1;
4126 /* only for even width/height */
4128 n_threads = convert->conversion_runner->n_threads;
4129 tasks = g_newa (FConvertPlaneTask, n_threads);
4130 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4132 lines_per_thread = GST_ROUND_UP_2 ((height + n_threads - 1) / n_threads);
4134 for (i = 0; i < n_threads; i++) {
4135 tasks[i].dstride = FRAME_GET_Y_STRIDE (dest);
4136 tasks[i].dustride = FRAME_GET_U_STRIDE (dest);
4137 tasks[i].dvstride = FRAME_GET_V_STRIDE (dest);
4138 tasks[i].sstride = FRAME_GET_STRIDE (src);
4139 tasks[i].d = dy1 + i * lines_per_thread * tasks[i].dstride;
4140 tasks[i].d2 = dy2 + i * lines_per_thread * tasks[i].dstride;
4141 tasks[i].du = du + i * lines_per_thread * tasks[i].dustride / 2;
4142 tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride / 2;
4143 tasks[i].s = s1 + i * lines_per_thread * tasks[i].sstride;
4144 tasks[i].s2 = s2 + i * lines_per_thread * tasks[i].sstride;
4146 tasks[i].width = width;
4147 tasks[i].height = (i + 1) * lines_per_thread;
4148 tasks[i].height = MIN (tasks[i].height, height);
4149 tasks[i].height -= i * lines_per_thread;
4151 tasks_p[i] = &tasks[i];
4154 gst_parallelized_task_runner_run (convert->conversion_runner,
4155 (GstParallelizedTaskFunc) convert_AYUV_I420_task, (gpointer) tasks_p);
4157 convert_fill_border (convert, dest);
4161 convert_AYUV_YUY2_task (FConvertPlaneTask * task)
4163 video_orc_convert_AYUV_YUY2 (task->d, task->dstride, task->s,
4164 task->sstride, task->width / 2, task->height);
4168 convert_AYUV_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src,
4169 GstVideoFrame * dest)
4171 gint width = convert->in_width;
4172 gint height = convert->in_height;
4174 FConvertPlaneTask *tasks;
4175 FConvertPlaneTask **tasks_p;
4177 gint lines_per_thread;
4180 s = FRAME_GET_LINE (src, convert->in_y);
4181 s += convert->in_x * 4;
4182 d = FRAME_GET_LINE (dest, convert->out_y);
4183 d += (GST_ROUND_UP_2 (convert->out_x) * 2);
4185 /* only for even width */
4186 n_threads = convert->conversion_runner->n_threads;
4187 tasks = g_newa (FConvertPlaneTask, n_threads);
4188 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4190 lines_per_thread = (height + n_threads - 1) / n_threads;
4192 for (i = 0; i < n_threads; i++) {
4193 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4194 tasks[i].sstride = FRAME_GET_STRIDE (src);
4195 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4196 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4198 tasks[i].width = width;
4199 tasks[i].height = (i + 1) * lines_per_thread;
4200 tasks[i].height = MIN (tasks[i].height, height);
4201 tasks[i].height -= i * lines_per_thread;
4203 tasks_p[i] = &tasks[i];
4206 gst_parallelized_task_runner_run (convert->conversion_runner,
4207 (GstParallelizedTaskFunc) convert_AYUV_YUY2_task, (gpointer) tasks_p);
4209 convert_fill_border (convert, dest);
4213 convert_AYUV_UYVY_task (FConvertPlaneTask * task)
4215 video_orc_convert_AYUV_UYVY (task->d, task->dstride, task->s,
4216 task->sstride, task->width / 2, task->height);
4220 convert_AYUV_UYVY (GstVideoConverter * convert, const GstVideoFrame * src,
4221 GstVideoFrame * dest)
4223 gint width = convert->in_width;
4224 gint height = convert->in_height;
4226 FConvertPlaneTask *tasks;
4227 FConvertPlaneTask **tasks_p;
4229 gint lines_per_thread;
4232 s = FRAME_GET_LINE (src, convert->in_y);
4233 s += convert->in_x * 4;
4234 d = FRAME_GET_LINE (dest, convert->out_y);
4235 d += (GST_ROUND_UP_2 (convert->out_x) * 2);
4237 /* only for even width */
4238 n_threads = convert->conversion_runner->n_threads;
4239 tasks = g_newa (FConvertPlaneTask, n_threads);
4240 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4242 lines_per_thread = (height + n_threads - 1) / n_threads;
4244 for (i = 0; i < n_threads; i++) {
4245 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4246 tasks[i].sstride = FRAME_GET_STRIDE (src);
4247 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4248 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4250 tasks[i].width = width;
4251 tasks[i].height = (i + 1) * lines_per_thread;
4252 tasks[i].height = MIN (tasks[i].height, height);
4253 tasks[i].height -= i * lines_per_thread;
4255 tasks_p[i] = &tasks[i];
4258 gst_parallelized_task_runner_run (convert->conversion_runner,
4259 (GstParallelizedTaskFunc) convert_AYUV_UYVY_task, (gpointer) tasks_p);
4261 convert_fill_border (convert, dest);
4265 convert_AYUV_Y42B_task (FConvertPlaneTask * task)
4267 video_orc_convert_AYUV_Y42B (task->d, task->dstride, task->du,
4268 task->dustride, task->dv, task->dvstride,
4269 task->s, task->sstride, task->width / 2, task->height);
4273 convert_AYUV_Y42B (GstVideoConverter * convert, const GstVideoFrame * src,
4274 GstVideoFrame * dest)
4276 gint width = convert->in_width;
4277 gint height = convert->in_height;
4278 guint8 *s, *dy, *du, *dv;
4279 FConvertPlaneTask *tasks;
4280 FConvertPlaneTask **tasks_p;
4282 gint lines_per_thread;
4285 s = FRAME_GET_LINE (src, convert->in_y);
4286 s += convert->in_x * 4;
4288 dy = FRAME_GET_Y_LINE (dest, convert->out_y);
4289 dy += convert->out_x;
4290 du = FRAME_GET_U_LINE (dest, convert->out_y);
4291 du += convert->out_x >> 1;
4292 dv = FRAME_GET_V_LINE (dest, convert->out_y);
4293 dv += convert->out_x >> 1;
4295 /* only works for even width */
4296 n_threads = convert->conversion_runner->n_threads;
4297 tasks = g_newa (FConvertPlaneTask, n_threads);
4298 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4300 lines_per_thread = (height + n_threads - 1) / n_threads;
4302 for (i = 0; i < n_threads; i++) {
4303 tasks[i].dstride = FRAME_GET_Y_STRIDE (dest);
4304 tasks[i].dustride = FRAME_GET_U_STRIDE (dest);
4305 tasks[i].dvstride = FRAME_GET_V_STRIDE (dest);
4306 tasks[i].sstride = FRAME_GET_STRIDE (src);
4307 tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride;
4308 tasks[i].du = du + i * lines_per_thread * tasks[i].dustride;
4309 tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride;
4310 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4312 tasks[i].width = width;
4313 tasks[i].height = (i + 1) * lines_per_thread;
4314 tasks[i].height = MIN (tasks[i].height, height);
4315 tasks[i].height -= i * lines_per_thread;
4317 tasks_p[i] = &tasks[i];
4320 gst_parallelized_task_runner_run (convert->conversion_runner,
4321 (GstParallelizedTaskFunc) convert_AYUV_Y42B_task, (gpointer) tasks_p);
4323 convert_fill_border (convert, dest);
4327 convert_AYUV_Y444_task (FConvertPlaneTask * task)
4329 video_orc_convert_AYUV_Y444 (task->d, task->dstride, task->du,
4330 task->dustride, task->dv, task->dvstride,
4331 task->s, task->sstride, task->width, task->height);
4335 convert_AYUV_Y444 (GstVideoConverter * convert, const GstVideoFrame * src,
4336 GstVideoFrame * dest)
4338 gint width = convert->in_width;
4339 gint height = convert->in_height;
4340 guint8 *s, *dy, *du, *dv;
4341 FConvertPlaneTask *tasks;
4342 FConvertPlaneTask **tasks_p;
4344 gint lines_per_thread;
4347 s = FRAME_GET_LINE (src, convert->in_y);
4348 s += convert->in_x * 4;
4350 dy = FRAME_GET_Y_LINE (dest, convert->out_y);
4351 dy += convert->out_x;
4352 du = FRAME_GET_U_LINE (dest, convert->out_y);
4353 du += convert->out_x;
4354 dv = FRAME_GET_V_LINE (dest, convert->out_y);
4355 dv += convert->out_x;
4357 n_threads = convert->conversion_runner->n_threads;
4358 tasks = g_newa (FConvertPlaneTask, n_threads);
4359 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4361 lines_per_thread = (height + n_threads - 1) / n_threads;
4363 for (i = 0; i < n_threads; i++) {
4364 tasks[i].dstride = FRAME_GET_Y_STRIDE (dest);
4365 tasks[i].dustride = FRAME_GET_U_STRIDE (dest);
4366 tasks[i].dvstride = FRAME_GET_V_STRIDE (dest);
4367 tasks[i].sstride = FRAME_GET_STRIDE (src);
4368 tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride;
4369 tasks[i].du = du + i * lines_per_thread * tasks[i].dustride;
4370 tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride;
4371 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4373 tasks[i].width = width;
4374 tasks[i].height = (i + 1) * lines_per_thread;
4375 tasks[i].height = MIN (tasks[i].height, height);
4376 tasks[i].height -= i * lines_per_thread;
4378 tasks_p[i] = &tasks[i];
4381 gst_parallelized_task_runner_run (convert->conversion_runner,
4382 (GstParallelizedTaskFunc) convert_AYUV_Y444_task, (gpointer) tasks_p);
4383 convert_fill_border (convert, dest);
4387 convert_Y42B_YUY2_task (FConvertPlaneTask * task)
4389 video_orc_convert_Y42B_YUY2 (task->d, task->dstride,
4390 task->s, task->sstride,
4391 task->su, task->sustride,
4392 task->sv, task->svstride, (task->width + 1) / 2, task->height);
4396 convert_Y42B_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src,
4397 GstVideoFrame * dest)
4399 gint width = convert->in_width;
4400 gint height = convert->in_height;
4401 guint8 *sy, *su, *sv, *d;
4402 FConvertPlaneTask *tasks;
4403 FConvertPlaneTask **tasks_p;
4405 gint lines_per_thread;
4408 sy = FRAME_GET_Y_LINE (src, convert->in_y);
4409 sy += convert->in_x;
4410 su = FRAME_GET_U_LINE (src, convert->in_y);
4411 su += convert->in_x >> 1;
4412 sv = FRAME_GET_V_LINE (src, convert->in_y);
4413 sv += convert->in_x >> 1;
4415 d = FRAME_GET_LINE (dest, convert->out_y);
4416 d += (GST_ROUND_UP_2 (convert->out_x) * 2);
4418 n_threads = convert->conversion_runner->n_threads;
4419 tasks = g_newa (FConvertPlaneTask, n_threads);
4420 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4422 lines_per_thread = (height + n_threads - 1) / n_threads;
4424 for (i = 0; i < n_threads; i++) {
4425 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4426 tasks[i].sstride = FRAME_GET_Y_STRIDE (src);
4427 tasks[i].sustride = FRAME_GET_U_STRIDE (src);
4428 tasks[i].svstride = FRAME_GET_V_STRIDE (src);
4429 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4430 tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride;
4431 tasks[i].su = su + i * lines_per_thread * tasks[i].sustride;
4432 tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride;
4434 tasks[i].width = width;
4435 tasks[i].height = (i + 1) * lines_per_thread;
4436 tasks[i].height = MIN (tasks[i].height, height);
4437 tasks[i].height -= i * lines_per_thread;
4439 tasks_p[i] = &tasks[i];
4442 gst_parallelized_task_runner_run (convert->conversion_runner,
4443 (GstParallelizedTaskFunc) convert_Y42B_YUY2_task, (gpointer) tasks_p);
4445 convert_fill_border (convert, dest);
4449 convert_Y42B_UYVY_task (FConvertPlaneTask * task)
4451 video_orc_convert_Y42B_UYVY (task->d, task->dstride,
4452 task->s, task->sstride,
4453 task->su, task->sustride,
4454 task->sv, task->svstride, (task->width + 1) / 2, task->height);
4458 convert_Y42B_UYVY (GstVideoConverter * convert, const GstVideoFrame * src,
4459 GstVideoFrame * dest)
4461 gint width = convert->in_width;
4462 gint height = convert->in_height;
4463 guint8 *sy, *su, *sv, *d;
4464 FConvertPlaneTask *tasks;
4465 FConvertPlaneTask **tasks_p;
4467 gint lines_per_thread;
4470 sy = FRAME_GET_Y_LINE (src, convert->in_y);
4471 sy += convert->in_x;
4472 su = FRAME_GET_U_LINE (src, convert->in_y);
4473 su += convert->in_x >> 1;
4474 sv = FRAME_GET_V_LINE (src, convert->in_y);
4475 sv += convert->in_x >> 1;
4477 d = FRAME_GET_LINE (dest, convert->out_y);
4478 d += (GST_ROUND_UP_2 (convert->out_x) * 2);
4480 n_threads = convert->conversion_runner->n_threads;
4481 tasks = g_newa (FConvertPlaneTask, n_threads);
4482 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4484 lines_per_thread = (height + n_threads - 1) / n_threads;
4486 for (i = 0; i < n_threads; i++) {
4487 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4488 tasks[i].sstride = FRAME_GET_Y_STRIDE (src);
4489 tasks[i].sustride = FRAME_GET_U_STRIDE (src);
4490 tasks[i].svstride = FRAME_GET_V_STRIDE (src);
4491 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4492 tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride;
4493 tasks[i].su = su + i * lines_per_thread * tasks[i].sustride;
4494 tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride;
4496 tasks[i].width = width;
4497 tasks[i].height = (i + 1) * lines_per_thread;
4498 tasks[i].height = MIN (tasks[i].height, height);
4499 tasks[i].height -= i * lines_per_thread;
4501 tasks_p[i] = &tasks[i];
4504 gst_parallelized_task_runner_run (convert->conversion_runner,
4505 (GstParallelizedTaskFunc) convert_Y42B_UYVY_task, (gpointer) tasks_p);
4507 convert_fill_border (convert, dest);
4511 convert_Y42B_AYUV_task (FConvertPlaneTask * task)
4513 video_orc_convert_Y42B_AYUV (task->d, task->dstride, task->s,
4517 task->sv, task->svstride, task->alpha, task->width / 2, task->height);
4521 convert_Y42B_AYUV (GstVideoConverter * convert, const GstVideoFrame * src,
4522 GstVideoFrame * dest)
4524 gint width = convert->in_width;
4525 gint height = convert->in_height;
4526 guint8 *sy, *su, *sv, *d;
4527 guint8 alpha = MIN (convert->alpha_value, 255);
4528 FConvertPlaneTask *tasks;
4529 FConvertPlaneTask **tasks_p;
4531 gint lines_per_thread;
4534 sy = FRAME_GET_Y_LINE (src, convert->in_y);
4535 sy += convert->in_x;
4536 su = FRAME_GET_U_LINE (src, convert->in_y);
4537 su += convert->in_x >> 1;
4538 sv = FRAME_GET_V_LINE (src, convert->in_y);
4539 sv += convert->in_x >> 1;
4541 d = FRAME_GET_LINE (dest, convert->out_y);
4542 d += convert->out_x * 4;
4544 /* only for even width */
4545 n_threads = convert->conversion_runner->n_threads;
4546 tasks = g_newa (FConvertPlaneTask, n_threads);
4547 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4549 lines_per_thread = (height + n_threads - 1) / n_threads;
4551 for (i = 0; i < n_threads; i++) {
4552 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4553 tasks[i].sstride = FRAME_GET_Y_STRIDE (src);
4554 tasks[i].sustride = FRAME_GET_U_STRIDE (src);
4555 tasks[i].svstride = FRAME_GET_V_STRIDE (src);
4556 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4557 tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride;
4558 tasks[i].su = su + i * lines_per_thread * tasks[i].sustride;
4559 tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride;
4561 tasks[i].width = width;
4562 tasks[i].height = (i + 1) * lines_per_thread;
4563 tasks[i].height = MIN (tasks[i].height, height);
4564 tasks[i].height -= i * lines_per_thread;
4565 tasks[i].alpha = alpha;
4567 tasks_p[i] = &tasks[i];
4570 gst_parallelized_task_runner_run (convert->conversion_runner,
4571 (GstParallelizedTaskFunc) convert_Y42B_AYUV_task, (gpointer) tasks_p);
4573 convert_fill_border (convert, dest);
4577 convert_Y444_YUY2_task (FConvertPlaneTask * task)
4579 video_orc_convert_Y444_YUY2 (task->d, task->dstride, task->s,
4582 task->sustride, task->sv, task->svstride, task->width / 2, task->height);
4586 convert_Y444_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src,
4587 GstVideoFrame * dest)
4589 gint width = convert->in_width;
4590 gint height = convert->in_height;
4591 guint8 *sy, *su, *sv, *d;
4592 FConvertPlaneTask *tasks;
4593 FConvertPlaneTask **tasks_p;
4595 gint lines_per_thread;
4598 sy = FRAME_GET_Y_LINE (src, convert->in_y);
4599 sy += convert->in_x;
4600 su = FRAME_GET_U_LINE (src, convert->in_y);
4601 su += convert->in_x;
4602 sv = FRAME_GET_V_LINE (src, convert->in_y);
4603 sv += convert->in_x;
4605 d = FRAME_GET_LINE (dest, convert->out_y);
4606 d += (GST_ROUND_UP_2 (convert->out_x) * 2);
4608 n_threads = convert->conversion_runner->n_threads;
4609 tasks = g_newa (FConvertPlaneTask, n_threads);
4610 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4612 lines_per_thread = (height + n_threads - 1) / n_threads;
4614 for (i = 0; i < n_threads; i++) {
4615 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4616 tasks[i].sstride = FRAME_GET_Y_STRIDE (src);
4617 tasks[i].sustride = FRAME_GET_U_STRIDE (src);
4618 tasks[i].svstride = FRAME_GET_V_STRIDE (src);
4619 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4620 tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride;
4621 tasks[i].su = su + i * lines_per_thread * tasks[i].sustride;
4622 tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride;
4624 tasks[i].width = width;
4625 tasks[i].height = (i + 1) * lines_per_thread;
4626 tasks[i].height = MIN (tasks[i].height, height);
4627 tasks[i].height -= i * lines_per_thread;
4629 tasks_p[i] = &tasks[i];
4632 gst_parallelized_task_runner_run (convert->conversion_runner,
4633 (GstParallelizedTaskFunc) convert_Y444_YUY2_task, (gpointer) tasks_p);
4635 convert_fill_border (convert, dest);
4639 convert_Y444_UYVY_task (FConvertPlaneTask * task)
4641 video_orc_convert_Y444_UYVY (task->d, task->dstride, task->s,
4644 task->sustride, task->sv, task->svstride, task->width / 2, task->height);
4648 convert_Y444_UYVY (GstVideoConverter * convert, const GstVideoFrame * src,
4649 GstVideoFrame * dest)
4651 gint width = convert->in_width;
4652 gint height = convert->in_height;
4653 guint8 *sy, *su, *sv, *d;
4654 FConvertPlaneTask *tasks;
4655 FConvertPlaneTask **tasks_p;
4657 gint lines_per_thread;
4660 sy = FRAME_GET_Y_LINE (src, convert->in_y);
4661 sy += convert->in_x;
4662 su = FRAME_GET_U_LINE (src, convert->in_y);
4663 su += convert->in_x;
4664 sv = FRAME_GET_V_LINE (src, convert->in_y);
4665 sv += convert->in_x;
4667 d = FRAME_GET_LINE (dest, convert->out_y);
4668 d += (GST_ROUND_UP_2 (convert->out_x) * 2);
4670 n_threads = convert->conversion_runner->n_threads;
4671 tasks = g_newa (FConvertPlaneTask, n_threads);
4672 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4674 lines_per_thread = (height + n_threads - 1) / n_threads;
4676 for (i = 0; i < n_threads; i++) {
4677 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4678 tasks[i].sstride = FRAME_GET_Y_STRIDE (src);
4679 tasks[i].sustride = FRAME_GET_U_STRIDE (src);
4680 tasks[i].svstride = FRAME_GET_V_STRIDE (src);
4681 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4682 tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride;
4683 tasks[i].su = su + i * lines_per_thread * tasks[i].sustride;
4684 tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride;
4686 tasks[i].width = width;
4687 tasks[i].height = (i + 1) * lines_per_thread;
4688 tasks[i].height = MIN (tasks[i].height, height);
4689 tasks[i].height -= i * lines_per_thread;
4691 tasks_p[i] = &tasks[i];
4694 gst_parallelized_task_runner_run (convert->conversion_runner,
4695 (GstParallelizedTaskFunc) convert_Y444_UYVY_task, (gpointer) tasks_p);
4697 convert_fill_border (convert, dest);
4701 convert_Y444_AYUV_task (FConvertPlaneTask * task)
4703 video_orc_convert_Y444_AYUV (task->d, task->dstride, task->s,
4707 task->sv, task->svstride, task->alpha, task->width, task->height);
4711 convert_Y444_AYUV (GstVideoConverter * convert, const GstVideoFrame * src,
4712 GstVideoFrame * dest)
4714 gint width = convert->in_width;
4715 gint height = convert->in_height;
4716 guint8 *sy, *su, *sv, *d;
4717 guint8 alpha = MIN (convert->alpha_value, 255);
4718 FConvertPlaneTask *tasks;
4719 FConvertPlaneTask **tasks_p;
4721 gint lines_per_thread;
4724 sy = FRAME_GET_Y_LINE (src, convert->in_y);
4725 sy += convert->in_x;
4726 su = FRAME_GET_U_LINE (src, convert->in_y);
4727 su += convert->in_x;
4728 sv = FRAME_GET_V_LINE (src, convert->in_y);
4729 sv += convert->in_x;
4731 d = FRAME_GET_LINE (dest, convert->out_y);
4732 d += convert->out_x * 4;
4734 n_threads = convert->conversion_runner->n_threads;
4735 tasks = g_newa (FConvertPlaneTask, n_threads);
4736 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4738 lines_per_thread = (height + n_threads - 1) / n_threads;
4740 for (i = 0; i < n_threads; i++) {
4741 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4742 tasks[i].sstride = FRAME_GET_Y_STRIDE (src);
4743 tasks[i].sustride = FRAME_GET_U_STRIDE (src);
4744 tasks[i].svstride = FRAME_GET_V_STRIDE (src);
4745 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4746 tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride;
4747 tasks[i].su = su + i * lines_per_thread * tasks[i].sustride;
4748 tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride;
4750 tasks[i].width = width;
4751 tasks[i].height = (i + 1) * lines_per_thread;
4752 tasks[i].height = MIN (tasks[i].height, height);
4753 tasks[i].height -= i * lines_per_thread;
4754 tasks[i].alpha = alpha;
4756 tasks_p[i] = &tasks[i];
4759 gst_parallelized_task_runner_run (convert->conversion_runner,
4760 (GstParallelizedTaskFunc) convert_Y444_AYUV_task, (gpointer) tasks_p);
4762 convert_fill_border (convert, dest);
4765 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4767 convert_AYUV_ARGB_task (FConvertPlaneTask * task)
4769 video_orc_convert_AYUV_ARGB (task->d, task->dstride, task->s,
4770 task->sstride, task->data->im[0][0], task->data->im[0][2],
4771 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
4772 task->width, task->height);
4776 convert_AYUV_ARGB (GstVideoConverter * convert, const GstVideoFrame * src,
4777 GstVideoFrame * dest)
4779 gint width = convert->in_width;
4780 gint height = convert->in_height;
4781 MatrixData *data = &convert->convert_matrix;
4783 FConvertPlaneTask *tasks;
4784 FConvertPlaneTask **tasks_p;
4786 gint lines_per_thread;
4789 s = FRAME_GET_LINE (src, convert->in_y);
4790 s += (convert->in_x * 4);
4791 d = FRAME_GET_LINE (dest, convert->out_y);
4792 d += (convert->out_x * 4);
4794 n_threads = convert->conversion_runner->n_threads;
4795 tasks = g_newa (FConvertPlaneTask, n_threads);
4796 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4798 lines_per_thread = (height + n_threads - 1) / n_threads;
4800 for (i = 0; i < n_threads; i++) {
4801 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4802 tasks[i].sstride = FRAME_GET_STRIDE (src);
4803 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4804 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4806 tasks[i].width = width;
4807 tasks[i].height = (i + 1) * lines_per_thread;
4808 tasks[i].height = MIN (tasks[i].height, height);
4809 tasks[i].height -= i * lines_per_thread;
4810 tasks[i].data = data;
4812 tasks_p[i] = &tasks[i];
4815 gst_parallelized_task_runner_run (convert->conversion_runner,
4816 (GstParallelizedTaskFunc) convert_AYUV_ARGB_task, (gpointer) tasks_p);
4818 convert_fill_border (convert, dest);
4822 convert_AYUV_BGRA_task (FConvertPlaneTask * task)
4824 video_orc_convert_AYUV_BGRA (task->d, task->dstride, task->s,
4825 task->sstride, task->data->im[0][0], task->data->im[0][2],
4826 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
4827 task->width, task->height);
4831 convert_AYUV_BGRA (GstVideoConverter * convert, const GstVideoFrame * src,
4832 GstVideoFrame * dest)
4834 gint width = convert->in_width;
4835 gint height = convert->in_height;
4836 MatrixData *data = &convert->convert_matrix;
4838 FConvertPlaneTask *tasks;
4839 FConvertPlaneTask **tasks_p;
4841 gint lines_per_thread;
4844 s = FRAME_GET_LINE (src, convert->in_y);
4845 s += (convert->in_x * 4);
4846 d = FRAME_GET_LINE (dest, convert->out_y);
4847 d += (convert->out_x * 4);
4849 n_threads = convert->conversion_runner->n_threads;
4850 tasks = g_newa (FConvertPlaneTask, n_threads);
4851 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4853 lines_per_thread = (height + n_threads - 1) / n_threads;
4855 for (i = 0; i < n_threads; i++) {
4856 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4857 tasks[i].sstride = FRAME_GET_STRIDE (src);
4858 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4859 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4861 tasks[i].width = width;
4862 tasks[i].height = (i + 1) * lines_per_thread;
4863 tasks[i].height = MIN (tasks[i].height, height);
4864 tasks[i].height -= i * lines_per_thread;
4865 tasks[i].data = data;
4867 tasks_p[i] = &tasks[i];
4870 gst_parallelized_task_runner_run (convert->conversion_runner,
4871 (GstParallelizedTaskFunc) convert_AYUV_BGRA_task, (gpointer) tasks_p);
4873 convert_fill_border (convert, dest);
4877 convert_AYUV_ABGR_task (FConvertPlaneTask * task)
4879 video_orc_convert_AYUV_ABGR (task->d, task->dstride, task->s,
4880 task->sstride, task->data->im[0][0], task->data->im[0][2],
4881 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
4882 task->width, task->height);
4886 convert_AYUV_ABGR (GstVideoConverter * convert, const GstVideoFrame * src,
4887 GstVideoFrame * dest)
4889 gint width = convert->in_width;
4890 gint height = convert->in_height;
4891 MatrixData *data = &convert->convert_matrix;
4893 FConvertPlaneTask *tasks;
4894 FConvertPlaneTask **tasks_p;
4896 gint lines_per_thread;
4899 s = FRAME_GET_LINE (src, convert->in_y);
4900 s += (convert->in_x * 4);
4901 d = FRAME_GET_LINE (dest, convert->out_y);
4902 d += (convert->out_x * 4);
4904 n_threads = convert->conversion_runner->n_threads;
4905 tasks = g_newa (FConvertPlaneTask, n_threads);
4906 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4908 lines_per_thread = (height + n_threads - 1) / n_threads;
4910 for (i = 0; i < n_threads; i++) {
4911 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4912 tasks[i].sstride = FRAME_GET_STRIDE (src);
4913 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4914 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4916 tasks[i].width = width;
4917 tasks[i].height = (i + 1) * lines_per_thread;
4918 tasks[i].height = MIN (tasks[i].height, height);
4919 tasks[i].height -= i * lines_per_thread;
4920 tasks[i].data = data;
4922 tasks_p[i] = &tasks[i];
4925 gst_parallelized_task_runner_run (convert->conversion_runner,
4926 (GstParallelizedTaskFunc) convert_AYUV_ABGR_task, (gpointer) tasks_p);
4928 convert_fill_border (convert, dest);
4932 convert_AYUV_RGBA_task (FConvertPlaneTask * task)
4934 video_orc_convert_AYUV_RGBA (task->d, task->dstride, task->s,
4935 task->sstride, task->data->im[0][0], task->data->im[0][2],
4936 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
4937 task->width, task->height);
4941 convert_AYUV_RGBA (GstVideoConverter * convert, const GstVideoFrame * src,
4942 GstVideoFrame * dest)
4944 gint width = convert->in_width;
4945 gint height = convert->in_height;
4946 MatrixData *data = &convert->convert_matrix;
4948 FConvertPlaneTask *tasks;
4949 FConvertPlaneTask **tasks_p;
4951 gint lines_per_thread;
4954 s = FRAME_GET_LINE (src, convert->in_y);
4955 s += (convert->in_x * 4);
4956 d = FRAME_GET_LINE (dest, convert->out_y);
4957 d += (convert->out_x * 4);
4959 n_threads = convert->conversion_runner->n_threads;
4960 tasks = g_newa (FConvertPlaneTask, n_threads);
4961 tasks_p = g_newa (FConvertPlaneTask *, n_threads);
4963 lines_per_thread = (height + n_threads - 1) / n_threads;
4965 for (i = 0; i < n_threads; i++) {
4966 tasks[i].dstride = FRAME_GET_STRIDE (dest);
4967 tasks[i].sstride = FRAME_GET_STRIDE (src);
4968 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
4969 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
4971 tasks[i].width = width;
4972 tasks[i].height = (i + 1) * lines_per_thread;
4973 tasks[i].height = MIN (tasks[i].height, height);
4974 tasks[i].height -= i * lines_per_thread;
4975 tasks[i].data = data;
4977 tasks_p[i] = &tasks[i];
4980 gst_parallelized_task_runner_run (convert->conversion_runner,
4981 (GstParallelizedTaskFunc) convert_AYUV_RGBA_task, (gpointer) tasks_p);
4983 convert_fill_border (convert, dest);
4988 convert_I420_BGRA_task (FConvertTask * task)
4992 for (i = task->height_0; i < task->height_1; i++) {
4993 guint8 *sy, *su, *sv, *d;
4995 d = FRAME_GET_LINE (task->dest, i + task->out_y);
4996 d += (task->out_x * 4);
4997 sy = FRAME_GET_Y_LINE (task->src, i + task->in_y);
4999 su = FRAME_GET_U_LINE (task->src, (i + task->in_y) >> 1);
5000 su += (task->in_x >> 1);
5001 sv = FRAME_GET_V_LINE (task->src, (i + task->in_y) >> 1);
5002 sv += (task->in_x >> 1);
5004 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
5005 video_orc_convert_I420_BGRA (d, sy, su, sv,
5006 task->data->im[0][0], task->data->im[0][2],
5007 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
5010 video_orc_convert_I420_ARGB (d, sy, su, sv,
5011 task->data->im[0][0], task->data->im[0][2],
5012 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
5019 convert_I420_BGRA (GstVideoConverter * convert, const GstVideoFrame * src,
5020 GstVideoFrame * dest)
5023 gint width = convert->in_width;
5024 gint height = convert->in_height;
5025 MatrixData *data = &convert->convert_matrix;
5026 FConvertTask *tasks;
5027 FConvertTask **tasks_p;
5029 gint lines_per_thread;
5031 n_threads = convert->conversion_runner->n_threads;
5032 tasks = g_newa (FConvertTask, n_threads);
5033 tasks_p = g_newa (FConvertTask *, n_threads);
5035 lines_per_thread = (height + n_threads - 1) / n_threads;
5037 for (i = 0; i < n_threads; i++) {
5039 tasks[i].dest = dest;
5041 tasks[i].width = width;
5042 tasks[i].data = data;
5043 tasks[i].in_x = convert->in_x;
5044 tasks[i].in_y = convert->in_y;
5045 tasks[i].out_x = convert->out_x;
5046 tasks[i].out_y = convert->out_y;
5048 tasks[i].height_0 = i * lines_per_thread;
5049 tasks[i].height_1 = tasks[i].height_0 + lines_per_thread;
5050 tasks[i].height_1 = MIN (height, tasks[i].height_1);
5052 tasks_p[i] = &tasks[i];
5055 gst_parallelized_task_runner_run (convert->conversion_runner,
5056 (GstParallelizedTaskFunc) convert_I420_BGRA_task, (gpointer) tasks_p);
5058 convert_fill_border (convert, dest);
5062 convert_I420_ARGB_task (FConvertTask * task)
5066 for (i = task->height_0; i < task->height_1; i++) {
5067 guint8 *sy, *su, *sv, *d;
5069 d = FRAME_GET_LINE (task->dest, i + task->out_y);
5070 d += (task->out_x * 4);
5071 sy = FRAME_GET_Y_LINE (task->src, i + task->in_y);
5073 su = FRAME_GET_U_LINE (task->src, (i + task->in_y) >> 1);
5074 su += (task->in_x >> 1);
5075 sv = FRAME_GET_V_LINE (task->src, (i + task->in_y) >> 1);
5076 sv += (task->in_x >> 1);
5078 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
5079 video_orc_convert_I420_ARGB (d, sy, su, sv,
5080 task->data->im[0][0], task->data->im[0][2],
5081 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
5084 video_orc_convert_I420_BGRA (d, sy, su, sv,
5085 task->data->im[0][0], task->data->im[0][2],
5086 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
5093 convert_I420_ARGB (GstVideoConverter * convert, const GstVideoFrame * src,
5094 GstVideoFrame * dest)
5097 gint width = convert->in_width;
5098 gint height = convert->in_height;
5099 MatrixData *data = &convert->convert_matrix;
5100 FConvertTask *tasks;
5101 FConvertTask **tasks_p;
5103 gint lines_per_thread;
5105 n_threads = convert->conversion_runner->n_threads;
5106 tasks = g_newa (FConvertTask, n_threads);
5107 tasks_p = g_newa (FConvertTask *, n_threads);
5109 lines_per_thread = (height + n_threads - 1) / n_threads;
5111 for (i = 0; i < n_threads; i++) {
5113 tasks[i].dest = dest;
5115 tasks[i].width = width;
5116 tasks[i].data = data;
5117 tasks[i].in_x = convert->in_x;
5118 tasks[i].in_y = convert->in_y;
5119 tasks[i].out_x = convert->out_x;
5120 tasks[i].out_y = convert->out_y;
5122 tasks[i].height_0 = i * lines_per_thread;
5123 tasks[i].height_1 = tasks[i].height_0 + lines_per_thread;
5124 tasks[i].height_1 = MIN (height, tasks[i].height_1);
5126 tasks_p[i] = &tasks[i];
5129 gst_parallelized_task_runner_run (convert->conversion_runner,
5130 (GstParallelizedTaskFunc) convert_I420_ARGB_task, (gpointer) tasks_p);
5132 convert_fill_border (convert, dest);
5136 convert_I420_pack_ARGB_task (FConvertTask * task)
5139 gpointer d[GST_VIDEO_MAX_PLANES];
5141 d[0] = FRAME_GET_LINE (task->dest, 0);
5144 task->out_x * GST_VIDEO_FORMAT_INFO_PSTRIDE (task->dest->info.finfo, 0);
5146 for (i = task->height_0; i < task->height_1; i++) {
5147 guint8 *sy, *su, *sv;
5149 sy = FRAME_GET_Y_LINE (task->src, i + task->in_y);
5151 su = FRAME_GET_U_LINE (task->src, (i + task->in_y) >> 1);
5152 su += (task->in_x >> 1);
5153 sv = FRAME_GET_V_LINE (task->src, (i + task->in_y) >> 1);
5154 sv += (task->in_x >> 1);
5156 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
5157 video_orc_convert_I420_ARGB (task->tmpline, sy, su, sv,
5158 task->data->im[0][0], task->data->im[0][2],
5159 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
5162 video_orc_convert_I420_BGRA (task->tmpline, sy, su, sv,
5163 task->data->im[0][0], task->data->im[0][2],
5164 task->data->im[2][1], task->data->im[1][1], task->data->im[1][2],
5167 task->dest->info.finfo->pack_func (task->dest->info.finfo,
5168 (GST_VIDEO_FRAME_IS_INTERLACED (task->dest) ?
5169 GST_VIDEO_PACK_FLAG_INTERLACED :
5170 GST_VIDEO_PACK_FLAG_NONE),
5171 task->tmpline, 0, d, task->dest->info.stride,
5172 task->dest->info.chroma_site, i + task->out_y, task->width);
5177 convert_I420_pack_ARGB (GstVideoConverter * convert, const GstVideoFrame * src,
5178 GstVideoFrame * dest)
5181 gint width = convert->in_width;
5182 gint height = convert->in_height;
5183 MatrixData *data = &convert->convert_matrix;
5184 FConvertTask *tasks;
5185 FConvertTask **tasks_p;
5187 gint lines_per_thread;
5189 n_threads = convert->conversion_runner->n_threads;
5190 tasks = g_newa (FConvertTask, n_threads);
5191 tasks_p = g_newa (FConvertTask *, n_threads);
5193 lines_per_thread = (height + n_threads - 1) / n_threads;
5195 for (i = 0; i < n_threads; i++) {
5197 tasks[i].dest = dest;
5199 tasks[i].width = width;
5200 tasks[i].data = data;
5201 tasks[i].in_x = convert->in_x;
5202 tasks[i].in_y = convert->in_y;
5203 tasks[i].out_x = convert->out_x;
5204 tasks[i].out_y = convert->out_y;
5205 tasks[i].tmpline = convert->tmpline[i];
5207 tasks[i].height_0 = i * lines_per_thread;
5208 tasks[i].height_1 = tasks[i].height_0 + lines_per_thread;
5209 tasks[i].height_1 = MIN (height, tasks[i].height_1);
5211 tasks_p[i] = &tasks[i];
5214 gst_parallelized_task_runner_run (convert->conversion_runner,
5215 (GstParallelizedTaskFunc) convert_I420_pack_ARGB_task,
5216 (gpointer) tasks_p);
5218 convert_fill_border (convert, dest);
5222 memset_u24 (guint8 * data, guint8 col[3], unsigned int n)
5226 for (i = 0; i < n; i++) {
5235 memset_u32_16 (guint8 * data, guint8 col[4], unsigned int n)
5239 for (i = 0; i < n; i += 2) {
5250 #define MAKE_BORDER_FUNC(func) \
5251 for (i = 0; i < out_y; i++) \
5252 func (FRAME_GET_PLANE_LINE (dest, k, i), col, out_maxwidth); \
5253 if (rb_width || lb_width) { \
5254 for (i = 0; i < out_height; i++) { \
5255 guint8 *d = FRAME_GET_PLANE_LINE (dest, k, i + out_y); \
5257 func (d, col, lb_width); \
5259 func (d + (pstride * r_border), col, rb_width); \
5262 for (i = out_y + out_height; i < out_maxheight; i++) \
5263 func (FRAME_GET_PLANE_LINE (dest, k, i), col, out_maxwidth); \
5266 convert_fill_border (GstVideoConverter * convert, GstVideoFrame * dest)
5269 const GstVideoFormatInfo *out_finfo;
5271 if (!convert->fill_border || !convert->borderline)
5274 out_finfo = convert->out_info.finfo;
5276 n_planes = GST_VIDEO_FRAME_N_PLANES (dest);
5278 for (k = 0; k < n_planes; k++) {
5279 gint i, out_x, out_y, out_width, out_height, pstride, pgroup;
5280 gint r_border, lb_width, rb_width;
5281 gint out_maxwidth, out_maxheight;
5284 out_x = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, k, convert->out_x);
5285 out_y = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, k, convert->out_y);
5287 GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, k, convert->out_width);
5289 GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, k, convert->out_height);
5291 GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, k, convert->out_maxwidth);
5293 GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, k,
5294 convert->out_maxheight);
5296 pstride = GST_VIDEO_FORMAT_INFO_PSTRIDE (out_finfo, k);
5298 switch (GST_VIDEO_FORMAT_INFO_FORMAT (out_finfo)) {
5299 case GST_VIDEO_FORMAT_YUY2:
5300 case GST_VIDEO_FORMAT_YVYU:
5301 case GST_VIDEO_FORMAT_UYVY:
5303 out_maxwidth = GST_ROUND_UP_2 (out_maxwidth);
5310 r_border = out_x + out_width;
5311 rb_width = out_maxwidth - r_border;
5314 borders = &convert->borders[k];
5319 guint8 col = ((guint8 *) borders)[0];
5320 MAKE_BORDER_FUNC (memset);
5325 guint16 col = ((guint16 *) borders)[0];
5326 MAKE_BORDER_FUNC (video_orc_splat_u16);
5332 col[0] = ((guint8 *) borders)[0];
5333 col[1] = ((guint8 *) borders)[1];
5334 col[2] = ((guint8 *) borders)[2];
5335 MAKE_BORDER_FUNC (memset_u24);
5340 guint32 col = ((guint32 *) borders)[0];
5341 MAKE_BORDER_FUNC (video_orc_splat_u32);
5346 guint64 col = ((guint64 *) borders)[0];
5347 MAKE_BORDER_FUNC (video_orc_splat_u64);
5353 col[0] = ((guint8 *) borders)[0];
5354 col[2] = ((guint8 *) borders)[2];
5355 col[1] = ((guint8 *) borders)[r_border & 1 ? 3 : 1];
5356 col[3] = ((guint8 *) borders)[r_border & 1 ? 1 : 3];
5357 MAKE_BORDER_FUNC (memset_u32_16);
5368 const guint8 *s, *s2;
5370 gint sstride, dstride;
5376 convert_plane_fill_task (FSimpleScaleTask * task)
5378 video_orc_memset_2d (task->d, task->dstride,
5379 task->fill, task->width, task->height);
5383 convert_plane_fill (GstVideoConverter * convert,
5384 const GstVideoFrame * src, GstVideoFrame * dest, gint plane)
5387 FSimpleScaleTask *tasks;
5388 FSimpleScaleTask **tasks_p;
5390 gint lines_per_thread;
5393 d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]);
5394 d += convert->fout_x[plane];
5396 n_threads = convert->conversion_runner->n_threads;
5397 tasks = g_newa (FSimpleScaleTask, n_threads);
5398 tasks_p = g_newa (FSimpleScaleTask *, n_threads);
5399 lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads;
5401 for (i = 0; i < n_threads; i++) {
5402 tasks[i].d = d + i * lines_per_thread * convert->fout_width[plane];
5404 tasks[i].fill = convert->ffill[plane];
5405 tasks[i].width = convert->fout_width[plane];
5406 tasks[i].height = (i + 1) * lines_per_thread;
5407 tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]);
5408 tasks[i].height -= i * lines_per_thread;
5409 tasks[i].dstride = FRAME_GET_PLANE_STRIDE (dest, plane);
5411 tasks_p[i] = &tasks[i];
5414 gst_parallelized_task_runner_run (convert->conversion_runner,
5415 (GstParallelizedTaskFunc) convert_plane_fill_task, (gpointer) tasks_p);
5419 convert_plane_h_double_task (FSimpleScaleTask * task)
5421 video_orc_planar_chroma_422_444 (task->d,
5422 task->dstride, task->s, task->sstride, task->width / 2, task->height);
5426 convert_plane_h_double (GstVideoConverter * convert,
5427 const GstVideoFrame * src, GstVideoFrame * dest, gint plane)
5430 gint splane = convert->fsplane[plane];
5431 FSimpleScaleTask *tasks;
5432 FSimpleScaleTask **tasks_p;
5434 gint lines_per_thread;
5437 s = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]);
5438 s += convert->fin_x[splane];
5439 d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]);
5440 d += convert->fout_x[plane];
5442 n_threads = convert->conversion_runner->n_threads;
5443 tasks = g_newa (FSimpleScaleTask, n_threads);
5444 tasks_p = g_newa (FSimpleScaleTask *, n_threads);
5445 lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads;
5447 for (i = 0; i < n_threads; i++) {
5448 tasks[i].dstride = FRAME_GET_PLANE_STRIDE (dest, plane);
5449 tasks[i].sstride = FRAME_GET_PLANE_STRIDE (src, splane);
5451 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
5452 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
5454 tasks[i].width = convert->fout_width[plane];
5455 tasks[i].height = (i + 1) * lines_per_thread;
5456 tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]);
5457 tasks[i].height -= i * lines_per_thread;
5459 tasks_p[i] = &tasks[i];
5462 gst_parallelized_task_runner_run (convert->conversion_runner,
5463 (GstParallelizedTaskFunc) convert_plane_h_double_task,
5464 (gpointer) tasks_p);
5468 convert_plane_h_halve_task (FSimpleScaleTask * task)
5470 video_orc_planar_chroma_444_422 (task->d,
5471 task->dstride, task->s, task->sstride, task->width, task->height);
5475 convert_plane_h_halve (GstVideoConverter * convert,
5476 const GstVideoFrame * src, GstVideoFrame * dest, gint plane)
5479 gint splane = convert->fsplane[plane];
5480 FSimpleScaleTask *tasks;
5481 FSimpleScaleTask **tasks_p;
5483 gint lines_per_thread;
5486 s = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]);
5487 s += convert->fin_x[splane];
5488 d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]);
5489 d += convert->fout_x[plane];
5491 n_threads = convert->conversion_runner->n_threads;
5492 tasks = g_newa (FSimpleScaleTask, n_threads);
5493 tasks_p = g_newa (FSimpleScaleTask *, n_threads);
5494 lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads;
5496 for (i = 0; i < n_threads; i++) {
5497 tasks[i].dstride = FRAME_GET_PLANE_STRIDE (dest, plane);
5498 tasks[i].sstride = FRAME_GET_PLANE_STRIDE (src, splane);
5500 tasks[i].d = d + i * lines_per_thread * tasks[i].dstride;
5501 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride;
5503 tasks[i].width = convert->fout_width[plane];
5504 tasks[i].height = (i + 1) * lines_per_thread;
5505 tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]);
5506 tasks[i].height -= i * lines_per_thread;
5508 tasks_p[i] = &tasks[i];
5511 gst_parallelized_task_runner_run (convert->conversion_runner,
5512 (GstParallelizedTaskFunc) convert_plane_h_halve_task, (gpointer) tasks_p);
5516 convert_plane_v_double_task (FSimpleScaleTask * task)
5518 video_orc_planar_chroma_420_422 (task->d, 2 * task->dstride, task->d2,
5519 2 * task->dstride, task->s, task->sstride, task->width, task->height / 2);
5523 convert_plane_v_double (GstVideoConverter * convert,
5524 const GstVideoFrame * src, GstVideoFrame * dest, gint plane)
5526 guint8 *s, *d1, *d2;
5527 gint ds, splane = convert->fsplane[plane];
5528 FSimpleScaleTask *tasks;
5529 FSimpleScaleTask **tasks_p;
5531 gint lines_per_thread;
5534 s = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]);
5535 s += convert->fin_x[splane];
5536 d1 = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]);
5537 d1 += convert->fout_x[plane];
5538 d2 = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane] + 1);
5539 d2 += convert->fout_x[plane];
5540 ds = FRAME_GET_PLANE_STRIDE (dest, plane);
5542 n_threads = convert->conversion_runner->n_threads;
5543 tasks = g_newa (FSimpleScaleTask, n_threads);
5544 tasks_p = g_newa (FSimpleScaleTask *, n_threads);
5546 GST_ROUND_UP_2 ((convert->fout_height[plane] + n_threads -
5549 for (i = 0; i < n_threads; i++) {
5550 tasks[i].d = d1 + i * lines_per_thread * ds;
5551 tasks[i].d2 = d2 + i * lines_per_thread * ds;
5552 tasks[i].dstride = ds;
5553 tasks[i].sstride = FRAME_GET_PLANE_STRIDE (src, splane);
5554 tasks[i].s = s + i * lines_per_thread * tasks[i].sstride / 2;
5556 tasks[i].width = convert->fout_width[plane];
5557 tasks[i].height = (i + 1) * lines_per_thread;
5558 tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]);
5559 tasks[i].height -= i * lines_per_thread;
5561 tasks_p[i] = &tasks[i];
5564 gst_parallelized_task_runner_run (convert->conversion_runner,
5565 (GstParallelizedTaskFunc) convert_plane_v_double_task,
5566 (gpointer) tasks_p);
5570 convert_plane_v_halve_task (FSimpleScaleTask * task)
5572 video_orc_planar_chroma_422_420 (task->d, task->dstride, task->s,
5573 2 * task->sstride, task->s2, 2 * task->sstride, task->width,
5578 convert_plane_v_halve (GstVideoConverter * convert,
5579 const GstVideoFrame * src, GstVideoFrame * dest, gint plane)
5581 guint8 *s1, *s2, *d;
5582 gint ss, ds, splane = convert->fsplane[plane];
5583 FSimpleScaleTask *tasks;
5584 FSimpleScaleTask **tasks_p;
5586 gint lines_per_thread;
5589 s1 = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]);
5590 s1 += convert->fin_x[splane];
5591 s2 = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane] + 1);
5592 s2 += convert->fin_x[splane];
5593 d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]);
5594 d += convert->fout_x[plane];
5596 ss = FRAME_GET_PLANE_STRIDE (src, splane);
5597 ds = FRAME_GET_PLANE_STRIDE (dest, plane);
5599 n_threads = convert->conversion_runner->n_threads;
5600 tasks = g_newa (FSimpleScaleTask, n_threads);
5601 tasks_p = g_newa (FSimpleScaleTask *, n_threads);
5602 lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads;
5604 for (i = 0; i < n_threads; i++) {
5605 tasks[i].d = d + i * lines_per_thread * ds;
5606 tasks[i].dstride = ds;
5607 tasks[i].s = s1 + i * lines_per_thread * ss * 2;
5608 tasks[i].s2 = s2 + i * lines_per_thread * ss * 2;
5609 tasks[i].sstride = ss;
5611 tasks[i].width = convert->fout_width[plane];
5612 tasks[i].height = (i + 1) * lines_per_thread;
5613 tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]);
5614 tasks[i].height -= i * lines_per_thread;
5616 tasks_p[i] = &tasks[i];
5619 gst_parallelized_task_runner_run (convert->conversion_runner,
5620 (GstParallelizedTaskFunc) convert_plane_v_halve_task, (gpointer) tasks_p);
5624 convert_plane_hv_double_task (FSimpleScaleTask * task)
5626 video_orc_planar_chroma_420_444 (task->d, 2 * task->dstride, task->d2,
5627 2 * task->dstride, task->s, task->sstride, (task->width + 1) / 2,
5632 convert_plane_hv_double (GstVideoConverter * convert,
5633 const GstVideoFrame * src, GstVideoFrame * dest, gint plane)
5635 guint8 *s, *d1, *d2;
5636 gint ss, ds, splane = convert->fsplane[plane];
5637 FSimpleScaleTask *tasks;
5638 FSimpleScaleTask **tasks_p;
5640 gint lines_per_thread;
5643 s = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]);
5644 s += convert->fin_x[splane];
5645 d1 = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]);
5646 d1 += convert->fout_x[plane];
5647 d2 = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane] + 1);
5648 d2 += convert->fout_x[plane];
5649 ss = FRAME_GET_PLANE_STRIDE (src, splane);
5650 ds = FRAME_GET_PLANE_STRIDE (dest, plane);
5652 n_threads = convert->conversion_runner->n_threads;
5653 tasks = g_newa (FSimpleScaleTask, n_threads);
5654 tasks_p = g_newa (FSimpleScaleTask *, n_threads);
5656 GST_ROUND_UP_2 ((convert->fout_height[plane] + n_threads -
5659 for (i = 0; i < n_threads; i++) {
5660 tasks[i].d = d1 + i * lines_per_thread * ds;
5661 tasks[i].d2 = d2 + i * lines_per_thread * ds;
5662 tasks[i].dstride = ds;
5663 tasks[i].sstride = ss;
5664 tasks[i].s = s + i * lines_per_thread * ss / 2;
5666 tasks[i].width = convert->fout_width[plane];
5667 tasks[i].height = (i + 1) * lines_per_thread;
5668 tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]);
5669 tasks[i].height -= i * lines_per_thread;
5671 tasks_p[i] = &tasks[i];
5674 gst_parallelized_task_runner_run (convert->conversion_runner,
5675 (GstParallelizedTaskFunc) convert_plane_hv_double_task,
5676 (gpointer) tasks_p);
5680 convert_plane_hv_halve_task (FSimpleScaleTask * task)
5682 video_orc_planar_chroma_444_420 (task->d, task->dstride, task->s,
5683 2 * task->sstride, task->s2, 2 * task->sstride, task->width,
5688 convert_plane_hv_halve (GstVideoConverter * convert,
5689 const GstVideoFrame * src, GstVideoFrame * dest, gint plane)
5691 guint8 *s1, *s2, *d;
5692 gint ss, ds, splane = convert->fsplane[plane];
5693 FSimpleScaleTask *tasks;
5694 FSimpleScaleTask **tasks_p;
5696 gint lines_per_thread;
5699 s1 = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]);
5700 s1 += convert->fin_x[splane];
5701 s2 = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane] + 1);
5702 s2 += convert->fin_x[splane];
5703 d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]);
5704 d += convert->fout_x[plane];
5705 ss = FRAME_GET_PLANE_STRIDE (src, splane);
5706 ds = FRAME_GET_PLANE_STRIDE (dest, plane);
5708 n_threads = convert->conversion_runner->n_threads;
5709 tasks = g_newa (FSimpleScaleTask, n_threads);
5710 tasks_p = g_newa (FSimpleScaleTask *, n_threads);
5711 lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads;
5713 for (i = 0; i < n_threads; i++) {
5714 tasks[i].d = d + i * lines_per_thread * ds;
5715 tasks[i].dstride = ds;
5716 tasks[i].s = s1 + i * lines_per_thread * ss * 2;
5717 tasks[i].s2 = s2 + i * lines_per_thread * ss * 2;
5718 tasks[i].sstride = ss;
5720 tasks[i].width = convert->fout_width[plane];
5721 tasks[i].height = (i + 1) * lines_per_thread;
5722 tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]);
5723 tasks[i].height -= i * lines_per_thread;
5725 tasks_p[i] = &tasks[i];
5728 gst_parallelized_task_runner_run (convert->conversion_runner,
5729 (GstParallelizedTaskFunc) convert_plane_hv_halve_task,
5730 (gpointer) tasks_p);
5735 GstVideoScaler *h_scaler, *v_scaler;
5736 GstVideoFormat format;
5739 gint sstride, dstride;
5744 convert_plane_hv_task (FScaleTask * task)
5746 gst_video_scaler_2d (task->h_scaler, task->v_scaler, task->format,
5747 (guint8 *) task->s, task->sstride,
5748 task->d, task->dstride, task->x, task->y, task->w, task->h);
5752 convert_plane_hv (GstVideoConverter * convert,
5753 const GstVideoFrame * src, GstVideoFrame * dest, gint plane)
5755 gint in_x, in_y, out_x, out_y, out_width, out_height;
5756 GstVideoFormat format;
5757 gint splane = convert->fsplane[plane];
5759 gint sstride, dstride;
5761 FScaleTask **tasks_p;
5762 gint i, n_threads, lines_per_thread;
5764 in_x = convert->fin_x[splane];
5765 in_y = convert->fin_y[splane];
5766 out_x = convert->fout_x[plane];
5767 out_y = convert->fout_y[plane];
5768 out_width = convert->fout_width[plane];
5769 out_height = convert->fout_height[plane];
5770 format = convert->fformat[plane];
5772 s = FRAME_GET_PLANE_LINE (src, splane, in_y);
5774 d = FRAME_GET_PLANE_LINE (dest, plane, out_y);
5777 sstride = FRAME_GET_PLANE_STRIDE (src, splane);
5778 dstride = FRAME_GET_PLANE_STRIDE (dest, plane);
5780 n_threads = convert->conversion_runner->n_threads;
5781 tasks = g_newa (FScaleTask, n_threads);
5782 tasks_p = g_newa (FScaleTask *, n_threads);
5784 lines_per_thread = (out_height + n_threads - 1) / n_threads;
5786 for (i = 0; i < n_threads; i++) {
5788 convert->fh_scaler[plane].scaler ? convert->
5789 fh_scaler[plane].scaler[i] : NULL;
5791 convert->fv_scaler[plane].scaler ? convert->
5792 fv_scaler[plane].scaler[i] : NULL;
5793 tasks[i].format = format;
5796 tasks[i].sstride = sstride;
5797 tasks[i].dstride = dstride;
5800 tasks[i].w = out_width;
5802 tasks[i].y = i * lines_per_thread;
5803 tasks[i].h = tasks[i].y + lines_per_thread;
5804 tasks[i].h = MIN (out_height, tasks[i].h);
5806 tasks_p[i] = &tasks[i];
5809 gst_parallelized_task_runner_run (convert->conversion_runner,
5810 (GstParallelizedTaskFunc) convert_plane_hv_task, (gpointer) tasks_p);
5814 convert_scale_planes (GstVideoConverter * convert,
5815 const GstVideoFrame * src, GstVideoFrame * dest)
5819 n_planes = GST_VIDEO_FRAME_N_PLANES (dest);
5820 for (i = 0; i < n_planes; i++) {
5821 if (convert->fconvert[i])
5822 convert->fconvert[i] (convert, src, dest, i);
5824 convert_fill_border (convert, dest);
5829 convert_I420_SN12 (GstVideoConverter * convert, const GstVideoFrame * src,
5830 GstVideoFrame * dest )
5832 guint8 *mY, *mUV, *Y, *U, *V;
5835 gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src);
5837 gint width = convert->in_width;
5838 gint height = convert->in_height;
5840 mY = mUV = Y = U = V = NULL;
5841 mY = GST_VIDEO_FRAME_PLANE_DATA (dest, 0);
5842 mUV = GST_VIDEO_FRAME_PLANE_DATA (dest, 1);
5844 for (i = 0; i < GST_ROUND_DOWN_2 (height); i += 2) {
5845 GET_LINE_OFFSETS (interlaced, i, l1, l2);
5847 Y = FRAME_GET_Y_LINE (src, l1);
5848 memcpy(mY, Y, width);
5850 Y = FRAME_GET_Y_LINE (src, l2);
5851 memcpy(mY, Y, width);
5854 U = FRAME_GET_U_LINE (src, i >> 1);
5855 V = FRAME_GET_V_LINE (src, i >> 1);
5856 for (j = 0; j < (width + 1) / 2; j++) {
5864 static GstVideoFormat
5865 get_scale_format (GstVideoFormat format, gint plane)
5867 GstVideoFormat res = GST_VIDEO_FORMAT_UNKNOWN;
5870 case GST_VIDEO_FORMAT_I420:
5871 case GST_VIDEO_FORMAT_YV12:
5872 case GST_VIDEO_FORMAT_Y41B:
5873 case GST_VIDEO_FORMAT_Y42B:
5874 case GST_VIDEO_FORMAT_Y444:
5875 case GST_VIDEO_FORMAT_GRAY8:
5876 case GST_VIDEO_FORMAT_A420:
5877 case GST_VIDEO_FORMAT_YUV9:
5878 case GST_VIDEO_FORMAT_YVU9:
5879 case GST_VIDEO_FORMAT_GBR:
5880 case GST_VIDEO_FORMAT_GBRA:
5881 res = GST_VIDEO_FORMAT_GRAY8;
5883 case GST_VIDEO_FORMAT_GRAY16_BE:
5884 case GST_VIDEO_FORMAT_GRAY16_LE:
5885 res = GST_VIDEO_FORMAT_GRAY16_BE;
5887 case GST_VIDEO_FORMAT_YUY2:
5888 case GST_VIDEO_FORMAT_UYVY:
5889 case GST_VIDEO_FORMAT_VYUY:
5890 case GST_VIDEO_FORMAT_YVYU:
5891 case GST_VIDEO_FORMAT_AYUV:
5892 case GST_VIDEO_FORMAT_VUYA:
5893 case GST_VIDEO_FORMAT_RGBx:
5894 case GST_VIDEO_FORMAT_BGRx:
5895 case GST_VIDEO_FORMAT_xRGB:
5896 case GST_VIDEO_FORMAT_xBGR:
5897 case GST_VIDEO_FORMAT_RGBA:
5898 case GST_VIDEO_FORMAT_BGRA:
5899 case GST_VIDEO_FORMAT_ARGB:
5900 case GST_VIDEO_FORMAT_ABGR:
5901 case GST_VIDEO_FORMAT_RGB:
5902 case GST_VIDEO_FORMAT_BGR:
5903 case GST_VIDEO_FORMAT_v308:
5904 case GST_VIDEO_FORMAT_IYU2:
5905 case GST_VIDEO_FORMAT_ARGB64:
5906 case GST_VIDEO_FORMAT_AYUV64:
5909 case GST_VIDEO_FORMAT_RGB15:
5910 case GST_VIDEO_FORMAT_BGR15:
5911 case GST_VIDEO_FORMAT_RGB16:
5912 case GST_VIDEO_FORMAT_BGR16:
5913 res = GST_VIDEO_FORMAT_NV12;
5915 case GST_VIDEO_FORMAT_NV12:
5916 case GST_VIDEO_FORMAT_NV21:
5917 case GST_VIDEO_FORMAT_NV16:
5918 case GST_VIDEO_FORMAT_NV61:
5919 case GST_VIDEO_FORMAT_NV24:
5920 res = plane == 0 ? GST_VIDEO_FORMAT_GRAY8 : GST_VIDEO_FORMAT_NV12;
5922 case GST_VIDEO_FORMAT_UNKNOWN:
5923 case GST_VIDEO_FORMAT_ENCODED:
5924 case GST_VIDEO_FORMAT_v210:
5925 case GST_VIDEO_FORMAT_v216:
5926 case GST_VIDEO_FORMAT_Y210:
5927 case GST_VIDEO_FORMAT_Y410:
5928 case GST_VIDEO_FORMAT_UYVP:
5929 case GST_VIDEO_FORMAT_RGB8P:
5930 case GST_VIDEO_FORMAT_IYU1:
5931 case GST_VIDEO_FORMAT_r210:
5932 case GST_VIDEO_FORMAT_I420_10BE:
5933 case GST_VIDEO_FORMAT_I420_10LE:
5934 case GST_VIDEO_FORMAT_I422_10BE:
5935 case GST_VIDEO_FORMAT_I422_10LE:
5936 case GST_VIDEO_FORMAT_Y444_10BE:
5937 case GST_VIDEO_FORMAT_Y444_10LE:
5938 case GST_VIDEO_FORMAT_I420_12BE:
5939 case GST_VIDEO_FORMAT_I420_12LE:
5940 case GST_VIDEO_FORMAT_I422_12BE:
5941 case GST_VIDEO_FORMAT_I422_12LE:
5942 case GST_VIDEO_FORMAT_Y444_12BE:
5943 case GST_VIDEO_FORMAT_Y444_12LE:
5944 case GST_VIDEO_FORMAT_GBR_10BE:
5945 case GST_VIDEO_FORMAT_GBR_10LE:
5946 case GST_VIDEO_FORMAT_GBRA_10BE:
5947 case GST_VIDEO_FORMAT_GBRA_10LE:
5948 case GST_VIDEO_FORMAT_GBR_12BE:
5949 case GST_VIDEO_FORMAT_GBR_12LE:
5950 case GST_VIDEO_FORMAT_GBRA_12BE:
5951 case GST_VIDEO_FORMAT_GBRA_12LE:
5952 case GST_VIDEO_FORMAT_NV12_64Z32:
5953 case GST_VIDEO_FORMAT_A420_10BE:
5954 case GST_VIDEO_FORMAT_A420_10LE:
5955 case GST_VIDEO_FORMAT_A422_10BE:
5956 case GST_VIDEO_FORMAT_A422_10LE:
5957 case GST_VIDEO_FORMAT_A444_10BE:
5958 case GST_VIDEO_FORMAT_A444_10LE:
5959 case GST_VIDEO_FORMAT_P010_10BE:
5960 case GST_VIDEO_FORMAT_P010_10LE:
5961 case GST_VIDEO_FORMAT_GRAY10_LE32:
5962 case GST_VIDEO_FORMAT_NV12_10LE32:
5963 case GST_VIDEO_FORMAT_NV16_10LE32:
5964 case GST_VIDEO_FORMAT_NV12_10LE40:
5965 case GST_VIDEO_FORMAT_BGR10A2_LE:
5967 case GST_VIDEO_FORMAT_SN12:
5968 case GST_VIDEO_FORMAT_ST12:
5971 g_assert_not_reached ();
5973 #ifdef TIZEN_FEATURE_VIDEO_MODIFICATION
5983 is_merge_yuv (GstVideoInfo * info)
5985 switch (GST_VIDEO_INFO_FORMAT (info)) {
5986 case GST_VIDEO_FORMAT_YUY2:
5987 case GST_VIDEO_FORMAT_YVYU:
5988 case GST_VIDEO_FORMAT_UYVY:
5989 case GST_VIDEO_FORMAT_VYUY:
5997 setup_scale (GstVideoConverter * convert)
6000 gint method, cr_method, in_width, in_height, out_width, out_height;
6002 GstVideoInfo *in_info, *out_info;
6003 const GstVideoFormatInfo *in_finfo, *out_finfo;
6004 GstVideoFormat in_format, out_format;
6005 gboolean interlaced;
6006 guint n_threads = convert->conversion_runner->n_threads;
6008 in_info = &convert->in_info;
6009 out_info = &convert->out_info;
6011 in_finfo = in_info->finfo;
6012 out_finfo = out_info->finfo;
6014 n_planes = GST_VIDEO_INFO_N_PLANES (out_info);
6016 interlaced = GST_VIDEO_INFO_IS_INTERLACED (&convert->in_info);
6018 method = GET_OPT_RESAMPLER_METHOD (convert);
6019 if (method == GST_VIDEO_RESAMPLER_METHOD_NEAREST)
6022 cr_method = GET_OPT_CHROMA_RESAMPLER_METHOD (convert);
6023 taps = GET_OPT_RESAMPLER_TAPS (convert);
6025 in_format = GST_VIDEO_INFO_FORMAT (in_info);
6026 out_format = GST_VIDEO_INFO_FORMAT (out_info);
6029 if(out_format == GST_VIDEO_FORMAT_SN12) {
6030 /* do nothing for SN12 output format */
6034 switch (in_format) {
6035 case GST_VIDEO_FORMAT_RGB15:
6036 case GST_VIDEO_FORMAT_RGB16:
6037 case GST_VIDEO_FORMAT_BGR15:
6038 case GST_VIDEO_FORMAT_BGR16:
6039 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
6040 case GST_VIDEO_FORMAT_GRAY16_BE:
6042 case GST_VIDEO_FORMAT_GRAY16_LE:
6044 if (method != GST_VIDEO_RESAMPLER_METHOD_NEAREST) {
6045 GST_DEBUG ("%s only with nearest resampling",
6046 gst_video_format_to_string (in_format));
6051 case GST_VIDEO_FORMAT_SN12:
6052 return TRUE; /* do nothing for SN12 format */
6058 in_width = convert->in_width;
6059 in_height = convert->in_height;
6060 out_width = convert->out_width;
6061 out_height = convert->out_height;
6063 if (n_planes == 1 && !GST_VIDEO_FORMAT_INFO_IS_GRAY (out_finfo)) {
6067 if (is_merge_yuv (in_info)) {
6068 GstVideoScaler *y_scaler, *uv_scaler;
6070 if (in_width != out_width) {
6071 convert->fh_scaler[0].scaler = g_new (GstVideoScaler *, n_threads);
6072 for (j = 0; j < n_threads; j++) {
6074 gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_NONE, taps,
6075 GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (in_finfo, GST_VIDEO_COMP_Y,
6076 in_width), GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo,
6077 GST_VIDEO_COMP_Y, out_width), convert->config);
6079 gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_NONE,
6080 gst_video_scaler_get_max_taps (y_scaler),
6081 GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (in_finfo, GST_VIDEO_COMP_U,
6082 in_width), GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo,
6083 GST_VIDEO_COMP_U, out_width), convert->config);
6085 convert->fh_scaler[0].scaler[j] =
6086 gst_video_scaler_combine_packed_YUV (y_scaler, uv_scaler,
6087 in_format, out_format);
6089 gst_video_scaler_free (y_scaler);
6090 gst_video_scaler_free (uv_scaler);
6093 convert->fh_scaler[0].scaler = NULL;
6096 pstride = GST_VIDEO_FORMAT_INFO_PSTRIDE (out_finfo, GST_VIDEO_COMP_Y);
6097 convert->fin_x[0] = GST_ROUND_UP_2 (convert->in_x) * pstride;
6098 convert->fout_x[0] = GST_ROUND_UP_2 (convert->out_x) * pstride;
6101 if (in_width != out_width && in_width != 0 && out_width != 0) {
6102 convert->fh_scaler[0].scaler = g_new (GstVideoScaler *, n_threads);
6103 for (j = 0; j < n_threads; j++) {
6104 convert->fh_scaler[0].scaler[j] =
6105 gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_NONE, taps,
6106 in_width, out_width, convert->config);
6109 convert->fh_scaler[0].scaler = NULL;
6112 pstride = GST_VIDEO_FORMAT_INFO_PSTRIDE (out_finfo, GST_VIDEO_COMP_R);
6113 convert->fin_x[0] = convert->in_x * pstride;
6114 convert->fout_x[0] = convert->out_x * pstride;
6117 if (in_height != out_height && in_height != 0 && out_height != 0) {
6118 convert->fv_scaler[0].scaler = g_new (GstVideoScaler *, n_threads);
6120 for (j = 0; j < n_threads; j++) {
6121 convert->fv_scaler[0].scaler[j] =
6122 gst_video_scaler_new (method,
6124 GST_VIDEO_SCALER_FLAG_INTERLACED : GST_VIDEO_SCALER_FLAG_NONE, taps,
6125 in_height, out_height, convert->config);
6128 convert->fv_scaler[0].scaler = NULL;
6131 convert->fin_y[0] = convert->in_y;
6132 convert->fout_y[0] = convert->out_y;
6133 convert->fout_width[0] = out_width;
6134 convert->fout_height[0] = out_height;
6135 convert->fconvert[0] = convert_plane_hv;
6136 convert->fformat[0] = get_scale_format (in_format, 0);
6137 convert->fsplane[0] = 0;
6139 for (i = 0; i < n_planes; i++) {
6140 gint comp, n_comp, j, iw, ih, ow, oh, pstride;
6141 gboolean need_v_scaler, need_h_scaler;
6142 GstStructure *config;
6143 gint resample_method;
6145 n_comp = GST_VIDEO_FORMAT_INFO_N_COMPONENTS (in_finfo);
6147 /* find the component in this plane and map it to the plane of
6150 for (j = 0; j < n_comp; j++) {
6151 if (GST_VIDEO_FORMAT_INFO_PLANE (out_finfo, j) == i) {
6157 iw = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (in_finfo, i, in_width);
6158 ih = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (in_finfo, i, in_height);
6159 ow = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, i, out_width);
6160 oh = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, i, out_height);
6162 GST_DEBUG ("plane %d: %dx%d -> %dx%d", i, iw, ih, ow, oh);
6164 convert->fout_width[i] = ow;
6165 convert->fout_height[i] = oh;
6167 pstride = GST_VIDEO_FORMAT_INFO_PSTRIDE (out_finfo, i);
6169 GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (in_finfo, i, convert->in_x);
6170 convert->fin_x[i] *= pstride;
6172 GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (in_finfo, i, convert->in_y);
6173 convert->fout_x[i] =
6174 GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, i, convert->out_x);
6175 convert->fout_x[i] *= pstride;
6176 convert->fout_y[i] =
6177 GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, i, convert->out_y);
6179 GST_DEBUG ("plane %d: pstride %d", i, pstride);
6180 GST_DEBUG ("plane %d: in_x %d, in_y %d", i, convert->fin_x[i],
6182 GST_DEBUG ("plane %d: out_x %d, out_y %d", i, convert->fout_x[i],
6183 convert->fout_y[i]);
6186 convert->fconvert[i] = convert_plane_fill;
6187 if (GST_VIDEO_INFO_IS_YUV (out_info)) {
6189 convert->ffill[i] = convert->alpha_value;
6191 convert->ffill[i] = 0x00;
6193 convert->ffill[i] = 0x80;
6196 convert->ffill[i] = convert->alpha_value;
6198 convert->ffill[i] = 0x00;
6200 GST_DEBUG ("plane %d fill %02x", i, convert->ffill[i]);
6203 convert->fsplane[i] = GST_VIDEO_FORMAT_INFO_PLANE (in_finfo, comp);
6204 GST_DEBUG ("plane %d -> %d (comp %d)", i, convert->fsplane[i], comp);
6207 config = gst_structure_copy (convert->config);
6209 resample_method = (i == 0 ? method : cr_method);
6211 need_v_scaler = FALSE;
6212 need_h_scaler = FALSE;
6214 if (!interlaced && ih == oh) {
6215 convert->fconvert[i] = convert_plane_hv;
6216 GST_DEBUG ("plane %d: copy", i);
6217 } else if (!interlaced && ih == 2 * oh && pstride == 1
6218 && resample_method == GST_VIDEO_RESAMPLER_METHOD_LINEAR) {
6219 convert->fconvert[i] = convert_plane_v_halve;
6220 GST_DEBUG ("plane %d: vertical halve", i);
6221 } else if (!interlaced && 2 * ih == oh && pstride == 1
6222 && resample_method == GST_VIDEO_RESAMPLER_METHOD_NEAREST) {
6223 convert->fconvert[i] = convert_plane_v_double;
6224 GST_DEBUG ("plane %d: vertical double", i);
6226 convert->fconvert[i] = convert_plane_hv;
6227 GST_DEBUG ("plane %d: vertical scale", i);
6228 need_v_scaler = TRUE;
6230 } else if (ih == oh) {
6231 if (!interlaced && iw == 2 * ow && pstride == 1
6232 && resample_method == GST_VIDEO_RESAMPLER_METHOD_LINEAR) {
6233 convert->fconvert[i] = convert_plane_h_halve;
6234 GST_DEBUG ("plane %d: horizontal halve", i);
6235 } else if (!interlaced && 2 * iw == ow && pstride == 1
6236 && resample_method == GST_VIDEO_RESAMPLER_METHOD_NEAREST) {
6237 convert->fconvert[i] = convert_plane_h_double;
6238 GST_DEBUG ("plane %d: horizontal double", i);
6240 convert->fconvert[i] = convert_plane_hv;
6241 GST_DEBUG ("plane %d: horizontal scale", i);
6242 need_h_scaler = TRUE;
6245 if (!interlaced && iw == 2 * ow && ih == 2 * oh && pstride == 1
6246 && resample_method == GST_VIDEO_RESAMPLER_METHOD_LINEAR) {
6247 convert->fconvert[i] = convert_plane_hv_halve;
6248 GST_DEBUG ("plane %d: horizontal/vertical halve", i);
6249 } else if (!interlaced && 2 * iw == ow && 2 * ih == oh && pstride == 1
6250 && resample_method == GST_VIDEO_RESAMPLER_METHOD_NEAREST) {
6251 convert->fconvert[i] = convert_plane_hv_double;
6252 GST_DEBUG ("plane %d: horizontal/vertical double", i);
6254 convert->fconvert[i] = convert_plane_hv;
6255 GST_DEBUG ("plane %d: horizontal/vertical scale", i);
6256 need_v_scaler = TRUE;
6257 need_h_scaler = TRUE;
6261 if (need_h_scaler && iw != 0 && ow != 0) {
6262 convert->fh_scaler[i].scaler = g_new (GstVideoScaler *, n_threads);
6264 for (j = 0; j < n_threads; j++) {
6265 convert->fh_scaler[i].scaler[j] =
6266 gst_video_scaler_new (resample_method, GST_VIDEO_SCALER_FLAG_NONE,
6267 taps, iw, ow, config);
6270 convert->fh_scaler[i].scaler = NULL;
6273 if (need_v_scaler && ih != 0 && oh != 0) {
6274 convert->fv_scaler[i].scaler = g_new (GstVideoScaler *, n_threads);
6276 for (j = 0; j < n_threads; j++) {
6277 convert->fv_scaler[i].scaler[j] =
6278 gst_video_scaler_new (resample_method,
6280 GST_VIDEO_SCALER_FLAG_INTERLACED : GST_VIDEO_SCALER_FLAG_NONE,
6281 taps, ih, oh, config);
6284 convert->fv_scaler[i].scaler = NULL;
6287 gst_structure_free (config);
6288 convert->fformat[i] = get_scale_format (in_format, i);
6299 GstVideoFormat in_format;
6300 GstVideoFormat out_format;
6301 gboolean keeps_interlaced;
6302 gboolean needs_color_matrix;
6303 gboolean keeps_size;
6306 gboolean alpha_copy;
6308 gboolean alpha_mult;
6309 gint width_align, height_align;
6310 void (*convert) (GstVideoConverter * convert, const GstVideoFrame * src,
6311 GstVideoFrame * dest);
6314 static const VideoTransform transforms[] = {
6315 /* planar -> packed */
6316 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, FALSE,
6317 FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_YUY2},
6318 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, FALSE,
6319 FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_UYVY},
6320 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, FALSE,
6321 FALSE, FALSE, TRUE, FALSE, 0, 0, convert_I420_AYUV},
6323 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, FALSE,
6324 FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_YUY2},
6325 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, FALSE,
6326 FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_UYVY},
6327 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, FALSE,
6328 FALSE, FALSE, TRUE, FALSE, 0, 0, convert_I420_AYUV},
6330 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, TRUE,
6331 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_Y42B_YUY2},
6332 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, TRUE,
6333 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_Y42B_UYVY},
6334 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, TRUE,
6335 TRUE, FALSE, TRUE, FALSE, 1, 0, convert_Y42B_AYUV},
6337 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, TRUE,
6338 TRUE, FALSE, FALSE, FALSE, 1, 0, convert_Y444_YUY2},
6339 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, TRUE,
6340 TRUE, FALSE, FALSE, FALSE, 1, 0, convert_Y444_UYVY},
6341 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, TRUE,
6342 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_Y444_AYUV},
6344 /* packed -> packed */
6345 {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, FALSE, TRUE,
6346 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6347 {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, TRUE,
6348 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_YUY2}, /* alias */
6349 {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, TRUE,
6350 TRUE, FALSE, TRUE, FALSE, 1, 0, convert_YUY2_AYUV},
6352 {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, FALSE, TRUE,
6353 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6354 {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, TRUE,
6355 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_YUY2},
6356 {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, TRUE,
6357 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_UYVY_AYUV},
6359 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, FALSE, TRUE, TRUE,
6360 TRUE, FALSE, FALSE, 0, 0, convert_scale_planes},
6361 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, TRUE,
6362 TRUE, FALSE, FALSE, FALSE, 1, 0, convert_AYUV_YUY2},
6363 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, TRUE,
6364 TRUE, FALSE, FALSE, FALSE, 1, 0, convert_AYUV_UYVY},
6366 /* packed -> planar */
6367 {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_I420, TRUE, FALSE, TRUE, FALSE,
6368 FALSE, FALSE, FALSE, FALSE, 0, 0, convert_YUY2_I420},
6369 {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_YV12, TRUE, FALSE, TRUE, FALSE,
6370 FALSE, FALSE, FALSE, FALSE, 0, 0, convert_YUY2_I420},
6371 {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_Y42B, TRUE, FALSE, TRUE, TRUE,
6372 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_YUY2_Y42B},
6373 {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_Y444, TRUE, FALSE, TRUE, TRUE,
6374 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_YUY2_Y444},
6375 {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_GRAY8, TRUE, TRUE, TRUE, TRUE,
6376 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_GRAY8},
6378 {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_I420, TRUE, FALSE, TRUE, FALSE,
6379 FALSE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_I420},
6380 {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_YV12, TRUE, FALSE, TRUE, FALSE,
6381 FALSE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_I420},
6382 {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_Y42B, TRUE, FALSE, TRUE, TRUE,
6383 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_Y42B},
6384 {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_Y444, TRUE, FALSE, TRUE, TRUE,
6385 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_Y444},
6387 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_I420, FALSE, FALSE, TRUE, TRUE,
6388 TRUE, FALSE, FALSE, FALSE, 1, 1, convert_AYUV_I420},
6389 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, TRUE, TRUE,
6390 TRUE, FALSE, FALSE, FALSE, 1, 1, convert_AYUV_I420},
6391 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_Y42B, TRUE, FALSE, TRUE, TRUE,
6392 TRUE, FALSE, FALSE, FALSE, 1, 0, convert_AYUV_Y42B},
6393 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_Y444, TRUE, FALSE, TRUE, TRUE,
6394 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_AYUV_Y444},
6396 /* planar -> planar */
6397 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_I420, TRUE, FALSE, FALSE, TRUE,
6398 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6399 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_YV12, TRUE, FALSE, FALSE, TRUE,
6400 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6401 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE,
6402 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6403 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE,
6404 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6405 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE,
6406 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6408 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_SN12, FALSE, FALSE, FALSE, TRUE,
6409 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_SN12},
6411 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE,
6412 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6413 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE,
6414 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes},
6415 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE,
6416 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6417 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE,
6418 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6420 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_I420, TRUE, FALSE, FALSE, TRUE,
6421 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6422 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_YV12, TRUE, FALSE, FALSE, TRUE,
6423 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6424 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE,
6425 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6426 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE,
6427 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6428 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE,
6429 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6430 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE,
6431 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6432 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE,
6433 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes},
6434 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE,
6435 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6436 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE,
6437 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6439 {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE,
6440 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6441 {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE,
6442 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6443 {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_Y41B, TRUE, FALSE, FALSE, TRUE,
6444 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6445 {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE,
6446 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6447 {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE,
6448 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6449 {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE,
6450 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6451 {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE,
6452 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes},
6453 {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE,
6454 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6455 {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE,
6456 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6458 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE,
6459 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6460 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE,
6461 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6462 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE,
6463 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6464 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_Y42B, TRUE, FALSE, FALSE, TRUE,
6465 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6466 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE,
6467 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6468 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE,
6469 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6470 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE,
6471 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes},
6472 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE,
6473 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6474 {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE,
6475 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6477 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE,
6478 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6479 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE,
6480 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6481 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE,
6482 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6483 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE,
6484 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6485 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_Y444, TRUE, FALSE, FALSE, TRUE,
6486 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6487 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE,
6488 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6489 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE,
6490 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes},
6491 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE,
6492 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6493 {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE,
6494 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6496 {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE,
6497 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6498 {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE,
6499 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6500 {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE,
6501 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6502 {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE,
6503 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6504 {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE,
6505 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6506 {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_GRAY8, TRUE, FALSE, FALSE, TRUE,
6507 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6508 {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE,
6509 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes},
6510 {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE,
6511 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6512 {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE,
6513 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6515 {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE,
6516 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6517 {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE,
6518 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6519 {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE,
6520 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6521 {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE,
6522 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6523 {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE,
6524 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6525 {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE,
6526 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6527 {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_A420, TRUE, FALSE, FALSE, TRUE,
6528 TRUE, TRUE, FALSE, FALSE, 0, 0, convert_scale_planes},
6529 {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE,
6530 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6531 {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE,
6532 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6534 {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE,
6535 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6536 {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE,
6537 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6538 {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE,
6539 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6540 {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE,
6541 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6542 {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE,
6543 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6544 {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE,
6545 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6546 {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE,
6547 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes},
6548 {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_YUV9, TRUE, FALSE, FALSE, TRUE,
6549 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6550 {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_YVU9, TRUE, FALSE, FALSE, TRUE,
6551 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6553 {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE,
6554 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6555 {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE,
6556 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6557 {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE,
6558 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6559 {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE,
6560 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6561 {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE,
6562 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6563 {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE,
6564 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6565 {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE,
6566 TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes},
6567 {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_YUV9, TRUE, FALSE, FALSE, TRUE,
6568 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6569 {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_YVU9, TRUE, FALSE, FALSE, TRUE,
6570 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6572 /* sempiplanar -> semiplanar */
6573 {GST_VIDEO_FORMAT_NV12, GST_VIDEO_FORMAT_NV12, TRUE, FALSE, FALSE, TRUE,
6574 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6575 {GST_VIDEO_FORMAT_NV12, GST_VIDEO_FORMAT_NV16, TRUE, FALSE, FALSE, TRUE,
6576 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6577 {GST_VIDEO_FORMAT_NV12, GST_VIDEO_FORMAT_NV24, TRUE, FALSE, FALSE, TRUE,
6578 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6580 {GST_VIDEO_FORMAT_NV21, GST_VIDEO_FORMAT_NV21, TRUE, FALSE, FALSE, TRUE,
6581 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6583 {GST_VIDEO_FORMAT_NV16, GST_VIDEO_FORMAT_NV12, TRUE, FALSE, FALSE, TRUE,
6584 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6585 {GST_VIDEO_FORMAT_NV16, GST_VIDEO_FORMAT_NV16, TRUE, FALSE, FALSE, TRUE,
6586 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6587 {GST_VIDEO_FORMAT_NV16, GST_VIDEO_FORMAT_NV24, TRUE, FALSE, FALSE, TRUE,
6588 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6590 {GST_VIDEO_FORMAT_NV61, GST_VIDEO_FORMAT_NV61, TRUE, FALSE, FALSE, TRUE,
6591 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6593 {GST_VIDEO_FORMAT_NV24, GST_VIDEO_FORMAT_NV12, TRUE, FALSE, FALSE, TRUE,
6594 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6595 {GST_VIDEO_FORMAT_NV24, GST_VIDEO_FORMAT_NV16, TRUE, FALSE, FALSE, TRUE,
6596 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6597 {GST_VIDEO_FORMAT_NV24, GST_VIDEO_FORMAT_NV24, TRUE, FALSE, FALSE, TRUE,
6598 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6600 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
6601 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_ARGB, TRUE, TRUE, TRUE, TRUE, TRUE,
6602 TRUE, FALSE, FALSE, 0, 0, convert_AYUV_ARGB},
6603 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_BGRA, TRUE, TRUE, TRUE, TRUE, TRUE,
6604 TRUE, FALSE, FALSE, 0, 0, convert_AYUV_BGRA},
6605 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_xRGB, TRUE, TRUE, TRUE, TRUE, TRUE,
6606 FALSE, FALSE, FALSE, 0, 0, convert_AYUV_ARGB}, /* alias */
6607 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_BGRx, TRUE, TRUE, TRUE, TRUE, TRUE,
6608 FALSE, FALSE, FALSE, 0, 0, convert_AYUV_BGRA}, /* alias */
6609 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_ABGR, TRUE, TRUE, TRUE, TRUE, TRUE,
6610 TRUE, FALSE, FALSE, 0, 0, convert_AYUV_ABGR},
6611 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_RGBA, TRUE, TRUE, TRUE, TRUE, TRUE,
6612 TRUE, FALSE, FALSE, 0, 0, convert_AYUV_RGBA},
6613 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_xBGR, TRUE, TRUE, TRUE, TRUE, TRUE,
6614 FALSE, FALSE, FALSE, 0, 0, convert_AYUV_ABGR}, /* alias */
6615 {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_RGBx, TRUE, TRUE, TRUE, TRUE, TRUE,
6616 FALSE, FALSE, FALSE, 0, 0, convert_AYUV_RGBA}, /* alias */
6619 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGRA, FALSE, TRUE, TRUE, TRUE,
6620 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_BGRA},
6621 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGRx, FALSE, TRUE, TRUE, TRUE,
6622 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_BGRA},
6623 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGRA, FALSE, TRUE, TRUE, TRUE,
6624 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_BGRA},
6625 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGRx, FALSE, TRUE, TRUE, TRUE,
6626 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_BGRA},
6628 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_ARGB, FALSE, TRUE, TRUE, TRUE,
6629 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_ARGB},
6630 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_xRGB, FALSE, TRUE, TRUE, TRUE,
6631 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_ARGB},
6632 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_ARGB, FALSE, TRUE, TRUE, TRUE,
6633 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_ARGB},
6634 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_xRGB, FALSE, TRUE, TRUE, TRUE,
6635 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_ARGB},
6637 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_ABGR, FALSE, TRUE, TRUE, TRUE,
6638 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6639 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_xBGR, FALSE, TRUE, TRUE, TRUE,
6640 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6641 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGBA, FALSE, TRUE, TRUE, TRUE,
6642 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6643 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGBx, FALSE, TRUE, TRUE, TRUE,
6644 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6645 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGB, FALSE, TRUE, TRUE, TRUE,
6646 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6647 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGR, FALSE, TRUE, TRUE, TRUE,
6648 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6649 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGB15, FALSE, TRUE, TRUE, TRUE,
6650 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6651 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGR15, FALSE, TRUE, TRUE, TRUE,
6652 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6653 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGB16, FALSE, TRUE, TRUE, TRUE,
6654 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6655 {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGR16, FALSE, TRUE, TRUE, TRUE,
6656 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6658 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_ABGR, FALSE, TRUE, TRUE, TRUE,
6659 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6660 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_xBGR, FALSE, TRUE, TRUE, TRUE,
6661 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6662 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGBA, FALSE, TRUE, TRUE, TRUE,
6663 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6664 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGBx, FALSE, TRUE, TRUE, TRUE,
6665 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6666 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGB, FALSE, TRUE, TRUE, TRUE,
6667 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6668 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGR, FALSE, TRUE, TRUE, TRUE,
6669 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6670 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGB15, FALSE, TRUE, TRUE, TRUE,
6671 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6672 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGR15, FALSE, TRUE, TRUE, TRUE,
6673 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6674 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGB16, FALSE, TRUE, TRUE, TRUE,
6675 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6676 {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGR16, FALSE, TRUE, TRUE, TRUE,
6677 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB},
6680 {GST_VIDEO_FORMAT_GBR, GST_VIDEO_FORMAT_GBR, TRUE, FALSE, FALSE, TRUE,
6681 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6683 {GST_VIDEO_FORMAT_YVYU, GST_VIDEO_FORMAT_YVYU, TRUE, FALSE, FALSE, TRUE,
6684 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6686 {GST_VIDEO_FORMAT_RGB15, GST_VIDEO_FORMAT_RGB15, TRUE, FALSE, FALSE, TRUE,
6687 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6688 {GST_VIDEO_FORMAT_RGB16, GST_VIDEO_FORMAT_RGB16, TRUE, FALSE, FALSE, TRUE,
6689 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6690 {GST_VIDEO_FORMAT_BGR15, GST_VIDEO_FORMAT_BGR15, TRUE, FALSE, FALSE, TRUE,
6691 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6692 {GST_VIDEO_FORMAT_BGR16, GST_VIDEO_FORMAT_BGR16, TRUE, FALSE, FALSE, TRUE,
6693 TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6695 {GST_VIDEO_FORMAT_RGB, GST_VIDEO_FORMAT_RGB, TRUE, FALSE, FALSE, TRUE, TRUE,
6696 FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6697 {GST_VIDEO_FORMAT_BGR, GST_VIDEO_FORMAT_BGR, TRUE, FALSE, FALSE, TRUE, TRUE,
6698 FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6699 {GST_VIDEO_FORMAT_v308, GST_VIDEO_FORMAT_v308, TRUE, FALSE, FALSE, TRUE, TRUE,
6700 FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6701 {GST_VIDEO_FORMAT_IYU2, GST_VIDEO_FORMAT_IYU2, TRUE, FALSE, FALSE, TRUE, TRUE,
6702 FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6704 {GST_VIDEO_FORMAT_ARGB, GST_VIDEO_FORMAT_ARGB, TRUE, FALSE, FALSE, TRUE, TRUE,
6705 TRUE, FALSE, FALSE, 0, 0, convert_scale_planes},
6706 {GST_VIDEO_FORMAT_xRGB, GST_VIDEO_FORMAT_xRGB, TRUE, FALSE, FALSE, TRUE, TRUE,
6707 FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6708 {GST_VIDEO_FORMAT_ABGR, GST_VIDEO_FORMAT_ABGR, TRUE, FALSE, FALSE, TRUE, TRUE,
6709 TRUE, FALSE, FALSE, 0, 0, convert_scale_planes},
6710 {GST_VIDEO_FORMAT_xBGR, GST_VIDEO_FORMAT_xBGR, TRUE, FALSE, FALSE, TRUE, TRUE,
6711 FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6712 {GST_VIDEO_FORMAT_RGBA, GST_VIDEO_FORMAT_RGBA, TRUE, FALSE, FALSE, TRUE, TRUE,
6713 TRUE, FALSE, FALSE, 0, 0, convert_scale_planes},
6714 {GST_VIDEO_FORMAT_RGBx, GST_VIDEO_FORMAT_RGBx, TRUE, FALSE, FALSE, TRUE, TRUE,
6715 FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6716 {GST_VIDEO_FORMAT_BGRA, GST_VIDEO_FORMAT_BGRA, TRUE, FALSE, FALSE, TRUE, TRUE,
6717 TRUE, FALSE, FALSE, 0, 0, convert_scale_planes},
6718 {GST_VIDEO_FORMAT_BGRx, GST_VIDEO_FORMAT_BGRx, TRUE, FALSE, FALSE, TRUE, TRUE,
6719 FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6721 {GST_VIDEO_FORMAT_ARGB64, GST_VIDEO_FORMAT_ARGB64, TRUE, FALSE, FALSE, TRUE,
6722 TRUE, TRUE, FALSE, FALSE, 0, 0, convert_scale_planes},
6723 {GST_VIDEO_FORMAT_AYUV64, GST_VIDEO_FORMAT_AYUV64, TRUE, FALSE, FALSE, TRUE,
6724 TRUE, TRUE, FALSE, FALSE, 0, 0, convert_scale_planes},
6726 {GST_VIDEO_FORMAT_GRAY16_LE, GST_VIDEO_FORMAT_GRAY16_LE, TRUE, FALSE, FALSE,
6727 TRUE, TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6728 {GST_VIDEO_FORMAT_GRAY16_BE, GST_VIDEO_FORMAT_GRAY16_BE, TRUE, FALSE, FALSE,
6729 TRUE, TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes},
6733 video_converter_lookup_fastpath (GstVideoConverter * convert)
6736 GstVideoFormat in_format, out_format;
6737 GstVideoTransferFunction in_transf, out_transf;
6738 gboolean interlaced, same_matrix, same_primaries, same_size, crop, border;
6739 gboolean need_copy, need_set, need_mult;
6742 width = GST_VIDEO_INFO_WIDTH (&convert->in_info);
6743 height = GST_VIDEO_INFO_HEIGHT (&convert->in_info);
6745 if (GET_OPT_DITHER_QUANTIZATION (convert) != 1)
6748 /* we don't do gamma conversion in fastpath */
6749 in_transf = convert->in_info.colorimetry.transfer;
6750 out_transf = convert->out_info.colorimetry.transfer;
6752 same_size = (width == convert->out_width && height == convert->out_height);
6754 /* fastpaths don't do gamma */
6755 if (CHECK_GAMMA_REMAP (convert) && (!same_size || in_transf != out_transf))
6758 need_copy = (convert->alpha_mode & ALPHA_MODE_COPY) == ALPHA_MODE_COPY;
6759 need_set = (convert->alpha_mode & ALPHA_MODE_SET) == ALPHA_MODE_SET;
6760 need_mult = (convert->alpha_mode & ALPHA_MODE_MULT) == ALPHA_MODE_MULT;
6761 GST_DEBUG ("alpha copy %d, set %d, mult %d", need_copy, need_set, need_mult);
6763 in_format = GST_VIDEO_INFO_FORMAT (&convert->in_info);
6764 out_format = GST_VIDEO_INFO_FORMAT (&convert->out_info);
6766 if (CHECK_MATRIX_NONE (convert)) {
6769 GstVideoColorMatrix in_matrix, out_matrix;
6771 in_matrix = convert->in_info.colorimetry.matrix;
6772 out_matrix = convert->out_info.colorimetry.matrix;
6773 same_matrix = in_matrix == out_matrix;
6776 if (CHECK_PRIMARIES_NONE (convert)) {
6777 same_primaries = TRUE;
6779 GstVideoColorPrimaries in_primaries, out_primaries;
6781 in_primaries = convert->in_info.colorimetry.primaries;
6782 out_primaries = convert->out_info.colorimetry.primaries;
6783 same_primaries = in_primaries == out_primaries;
6786 interlaced = GST_VIDEO_INFO_IS_INTERLACED (&convert->in_info);
6787 interlaced |= GST_VIDEO_INFO_IS_INTERLACED (&convert->out_info);
6789 crop = convert->in_x || convert->in_y
6790 || convert->in_width < convert->in_maxwidth
6791 || convert->in_height < convert->in_maxheight;
6792 border = convert->out_x || convert->out_y
6793 || convert->out_width < convert->out_maxwidth
6794 || convert->out_height < convert->out_maxheight;
6796 for (i = 0; i < G_N_ELEMENTS (transforms); i++) {
6797 if (transforms[i].in_format == in_format &&
6798 transforms[i].out_format == out_format &&
6799 (transforms[i].keeps_interlaced || !interlaced) &&
6800 (transforms[i].needs_color_matrix || (same_matrix && same_primaries))
6801 && (!transforms[i].keeps_size || same_size)
6802 && (transforms[i].width_align & width) == 0
6803 && (transforms[i].height_align & height) == 0
6804 && (transforms[i].do_crop || !crop)
6805 && (transforms[i].do_border || !border)
6806 && (transforms[i].alpha_copy || !need_copy)
6807 && (transforms[i].alpha_set || !need_set)
6808 && (transforms[i].alpha_mult || !need_mult)) {
6811 GST_DEBUG ("using fastpath");
6812 if (transforms[i].needs_color_matrix)
6813 video_converter_compute_matrix (convert);
6814 convert->convert = transforms[i].convert;
6817 g_new (guint16 *, convert->conversion_runner->n_threads);
6818 for (j = 0; j < convert->conversion_runner->n_threads; j++)
6819 convert->tmpline[j] = g_malloc0 (sizeof (guint16) * (width + 8) * 4);
6821 if (!transforms[i].keeps_size)
6822 if (!setup_scale (convert))
6825 setup_borderline (convert);
6829 GST_DEBUG ("no fastpath found");