2 * Copyright (C) 2020 Igalia, S.L.
3 * Author: Víctor Jáquez <vjaquez@igalia.com>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the0
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
22 * SECTION:element-vavp9dec
24 * @short_description: A VA-API based VP9 video decoder
26 * vavp9dec decodes VP9 bitstreams to VA surfaces using the
27 * installed and chosen [VA-API](https://01.org/linuxmedia/vaapi)
30 * The decoding surfaces can be mapped onto main memory as video
33 * ## Example launch line
35 * gst-launch-1.0 filesrc location=sample.webm ! parsebin ! vavp9dec ! autovideosink
46 #include "gstvavp9dec.h"
48 #include "gstvabasedec.h"
50 GST_DEBUG_CATEGORY_STATIC (gst_va_vp9dec_debug);
51 #ifndef GST_DISABLE_GST_DEBUG
52 #define GST_CAT_DEFAULT gst_va_vp9dec_debug
54 #define GST_CAT_DEFAULT NULL
57 #define GST_VA_VP9_DEC(obj) ((GstVaVp9Dec *) obj)
58 #define GST_VA_VP9_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaVp9DecClass))
59 #define GST_VA_VP9_DEC_CLASS(klass) ((GstVaVp9DecClass *) klass)
61 typedef struct _GstVaVp9Dec GstVaVp9Dec;
62 typedef struct _GstVaVp9DecClass GstVaVp9DecClass;
64 struct _GstVaVp9DecClass
66 GstVaBaseDecClass parent_class;
72 GstVp9Segmentation segmentation[GST_VP9_MAX_SEGMENTS];
74 gboolean need_negotiation;
78 static const gchar *src_caps_str = GST_VIDEO_CAPS_MAKE_WITH_FEATURES ("memory:VAMemory",
79 "{ NV12 }") " ;" GST_VIDEO_CAPS_MAKE ("{ NV12 }");
82 static const gchar *sink_caps_str = "video/x-vp9";
85 _get_rtformat (GstVaVp9Dec * self, GstVP9Profile profile,
86 GstVp9BitDepth bit_depth, gint subsampling_x, gint subsampling_y)
89 case GST_VP9_PROFILE_0:
90 return VA_RT_FORMAT_YUV420;
91 case GST_VP9_PROFILE_1:
92 if (subsampling_x == 1 && subsampling_y == 0)
93 return VA_RT_FORMAT_YUV422;
94 else if (subsampling_x == 0 && subsampling_y == 0)
95 return VA_RT_FORMAT_YUV444;
97 case GST_VP9_PROFILE_2:
98 if (bit_depth == GST_VP9_BIT_DEPTH_10)
99 return VA_RT_FORMAT_YUV420_10;
100 else if (bit_depth == GST_VP9_BIT_DEPTH_12)
101 return VA_RT_FORMAT_YUV420_12;
103 case GST_VP9_PROFILE_3:
104 if (subsampling_x == 1 && subsampling_y == 0) {
105 if (bit_depth == GST_VP9_BIT_DEPTH_10)
106 return VA_RT_FORMAT_YUV422_10;
107 else if (bit_depth == GST_VP9_BIT_DEPTH_12)
108 return VA_RT_FORMAT_YUV422_12;
109 } else if (subsampling_x == 0 && subsampling_y == 0) {
110 if (bit_depth == GST_VP9_BIT_DEPTH_10)
111 return VA_RT_FORMAT_YUV444_10;
112 else if (bit_depth == GST_VP9_BIT_DEPTH_12)
113 return VA_RT_FORMAT_YUV444_12;
120 GST_ERROR_OBJECT (self, "Unsupported chroma format");
125 _get_profile (GstVaVp9Dec * self, GstVP9Profile profile)
128 case GST_VP9_PROFILE_0:
129 return VAProfileVP9Profile0;
130 case GST_VP9_PROFILE_1:
131 return VAProfileVP9Profile1;
132 case GST_VP9_PROFILE_2:
133 return VAProfileVP9Profile2;
134 case GST_VP9_PROFILE_3:
135 return VAProfileVP9Profile3;
140 GST_ERROR_OBJECT (self, "Unsupported profile");
141 return VAProfileNone;
145 gst_va_vp9_new_sequence (GstVp9Decoder * decoder,
146 const GstVp9FrameHeader * frame_hdr)
148 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
149 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
151 gboolean negotiation_needed = FALSE;
154 profile = _get_profile (self, frame_hdr->profile);
155 if (profile == VAProfileNone)
158 if (!gst_va_decoder_has_profile (base->decoder, profile)) {
159 GST_ERROR_OBJECT (self, "Profile %s is not supported",
160 gst_va_profile_name (profile));
164 rt_format = _get_rtformat (self, frame_hdr->profile, frame_hdr->bit_depth,
165 frame_hdr->subsampling_x, frame_hdr->subsampling_y);
169 if (!gst_va_decoder_config_is_equal (base->decoder, profile,
170 rt_format, frame_hdr->width, frame_hdr->height)) {
171 base->profile = profile;
172 base->width = frame_hdr->width;
173 base->height = frame_hdr->height;
174 base->rt_format = rt_format;
175 negotiation_needed = TRUE;
178 base->min_buffers = GST_VP9_REF_FRAMES;
180 if (negotiation_needed) {
181 self->need_negotiation = TRUE;
182 if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) {
183 GST_ERROR_OBJECT (self, "Failed to negotiate with downstream");
192 _check_resolution_change (GstVaVp9Dec * self, GstVp9Picture * picture)
194 GstVaBaseDec *base = GST_VA_BASE_DEC (self);
195 const GstVp9FrameHeader *frame_hdr = &picture->frame_hdr;
197 if ((base->width != frame_hdr->width) || base->height != frame_hdr->height) {
198 base->width = frame_hdr->width;
199 base->height = frame_hdr->height;
201 self->need_negotiation = TRUE;
202 if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) {
203 GST_ERROR_OBJECT (self, "Resolution changes, but failed to"
204 " negotiate with downstream");
213 gst_va_vp9_dec_new_picture (GstVp9Decoder * decoder,
214 GstVideoCodecFrame * frame, GstVp9Picture * picture)
217 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
218 GstVaDecodePicture *pic;
219 GstVideoDecoder *vdec = GST_VIDEO_DECODER (decoder);
220 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
222 if (!_check_resolution_change (self, picture))
225 ret = gst_video_decoder_allocate_output_frame (vdec, frame);
226 if (ret != GST_FLOW_OK)
229 pic = gst_va_decode_picture_new (base->decoder, frame->output_buffer);
231 gst_vp9_picture_set_user_data (picture, pic,
232 (GDestroyNotify) gst_va_decode_picture_free);
234 GST_LOG_OBJECT (self, "New va decode picture %p - %#x", pic,
235 gst_va_decode_picture_get_surface (pic));
241 GST_WARNING_OBJECT (self, "Failed to allocated output buffer, return %s",
242 gst_flow_get_name (ret));
247 static inline gboolean
248 _fill_param (GstVp9Decoder * decoder, GstVp9Picture * picture, GstVp9Dpb * dpb)
250 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
251 GstVaDecodePicture *va_pic;
252 const GstVp9FrameHeader *frame_hdr = &picture->frame_hdr;
253 const GstVp9LoopFilterParams *lfp = &frame_hdr->loop_filter_params;
254 const GstVp9SegmentationParams *sp = &frame_hdr->segmentation_params;
255 VADecPictureParameterBufferVP9 pic_param;
259 pic_param = (VADecPictureParameterBufferVP9) {
260 .frame_width = base->width,
261 .frame_height = base->height,
264 .subsampling_x = frame_hdr->subsampling_x,
265 .subsampling_y = frame_hdr->subsampling_x,
266 .frame_type = frame_hdr->frame_type,
267 .show_frame = frame_hdr->show_frame,
268 .error_resilient_mode = frame_hdr->error_resilient_mode,
269 .intra_only = frame_hdr->intra_only,
270 .allow_high_precision_mv = frame_hdr->allow_high_precision_mv,
271 .mcomp_filter_type = frame_hdr->interpolation_filter,
272 .frame_parallel_decoding_mode = frame_hdr->frame_parallel_decoding_mode,
273 .reset_frame_context = frame_hdr->reset_frame_context,
274 .refresh_frame_context = frame_hdr->refresh_frame_context,
275 .frame_context_idx = frame_hdr->frame_context_idx,
277 .segmentation_enabled = sp->segmentation_enabled,
278 .segmentation_temporal_update = sp->segmentation_temporal_update,
279 .segmentation_update_map = sp->segmentation_update_map,
282 frame_hdr->ref_frame_idx[GST_VP9_REF_FRAME_LAST - 1],
283 .last_ref_frame_sign_bias =
284 frame_hdr->ref_frame_sign_bias[GST_VP9_REF_FRAME_LAST],
286 frame_hdr->ref_frame_idx[GST_VP9_REF_FRAME_GOLDEN - 1],
287 .golden_ref_frame_sign_bias =
288 frame_hdr->ref_frame_sign_bias[GST_VP9_REF_FRAME_GOLDEN],
290 frame_hdr->ref_frame_idx[GST_VP9_REF_FRAME_ALTREF - 1],
291 .alt_ref_frame_sign_bias =
292 frame_hdr->ref_frame_sign_bias[GST_VP9_REF_FRAME_ALTREF],
294 .lossless_flag = frame_hdr->lossless_flag,
297 .filter_level = lfp->loop_filter_level,
298 .sharpness_level = lfp->loop_filter_sharpness,
299 .log2_tile_rows = frame_hdr->tile_rows_log2,
300 .log2_tile_columns = frame_hdr->tile_cols_log2,
302 .frame_header_length_in_bytes = frame_hdr->frame_header_length_in_bytes,
303 .first_partition_size = frame_hdr->header_size_in_bytes,
305 .profile = frame_hdr->profile,
306 .bit_depth = frame_hdr->bit_depth
310 memcpy (pic_param.mb_segment_tree_probs, sp->segmentation_tree_probs,
311 sizeof (sp->segmentation_tree_probs));
313 if (sp->segmentation_temporal_update) {
314 memcpy (pic_param.segment_pred_probs, sp->segmentation_pred_prob,
315 sizeof (sp->segmentation_pred_prob));
317 memset (pic_param.segment_pred_probs, 255,
318 sizeof (pic_param.segment_pred_probs));
321 for (i = 0; i < GST_VP9_REF_FRAMES; i++) {
322 if (dpb->pic_list[i]) {
323 GstVaDecodePicture *va_pic =
324 gst_vp9_picture_get_user_data (dpb->pic_list[i]);
326 pic_param.reference_frames[i] =
327 gst_va_decode_picture_get_surface (va_pic);
329 pic_param.reference_frames[i] = VA_INVALID_ID;
333 va_pic = gst_vp9_picture_get_user_data (picture);
335 return gst_va_decoder_add_param_buffer (base->decoder, va_pic,
336 VAPictureParameterBufferType, &pic_param, sizeof (pic_param));
340 _update_segmentation (GstVaVp9Dec * self, GstVp9FrameHeader * header)
342 const GstVp9LoopFilterParams *lfp = &header->loop_filter_params;
343 const GstVp9QuantizationParams *qp = &header->quantization_params;
344 const GstVp9SegmentationParams *sp = &header->segmentation_params;
345 guint8 n_shift = lfp->loop_filter_level >> 5;
348 for (i = 0; i < GST_VP9_MAX_SEGMENTS; i++) {
349 gint16 luma_dc_quant_scale;
350 gint16 luma_ac_quant_scale;
351 gint16 chroma_dc_quant_scale;
352 gint16 chroma_ac_quant_scale;
354 guint8 lvl_lookup[GST_VP9_MAX_REF_LF_DELTAS][GST_VP9_MAX_MODE_LF_DELTAS];
355 gint lvl_seg = lfp->loop_filter_level;
357 /* 8.6.1 Dequantization functions */
358 qindex = gst_vp9_get_qindex (sp, qp, i);
359 luma_dc_quant_scale =
360 gst_vp9_get_dc_quant (qindex, qp->delta_q_y_dc, header->bit_depth);
361 luma_ac_quant_scale = gst_vp9_get_ac_quant (qindex, 0, header->bit_depth);
362 chroma_dc_quant_scale =
363 gst_vp9_get_dc_quant (qindex, qp->delta_q_uv_dc, header->bit_depth);
364 chroma_ac_quant_scale =
365 gst_vp9_get_ac_quant (qindex, qp->delta_q_uv_ac, header->bit_depth);
367 if (!lfp->loop_filter_level) {
368 memset (lvl_lookup, 0, sizeof (lvl_lookup));
370 /* 8.8.1 Loop filter frame init process */
371 if (gst_vp9_seg_feature_active (sp, i, GST_VP9_SEG_LVL_ALT_L)) {
372 if (sp->segmentation_abs_or_delta_update) {
373 lvl_seg = sp->feature_data[i][GST_VP9_SEG_LVL_ALT_L];
375 lvl_seg += sp->feature_data[i][GST_VP9_SEG_LVL_ALT_L];
378 lvl_seg = CLAMP (lvl_seg, 0, GST_VP9_MAX_LOOP_FILTER);
381 if (!lfp->loop_filter_delta_enabled) {
382 memset (lvl_lookup, lvl_seg, sizeof (lvl_lookup));
385 gint intra_lvl = lvl_seg +
386 (lfp->loop_filter_ref_deltas[GST_VP9_REF_FRAME_INTRA] << n_shift);
388 memcpy (lvl_lookup, self->segmentation[i].filter_level,
389 sizeof (lvl_lookup));
391 lvl_lookup[GST_VP9_REF_FRAME_INTRA][0] =
392 CLAMP (intra_lvl, 0, GST_VP9_MAX_LOOP_FILTER);
393 for (ref = GST_VP9_REF_FRAME_LAST; ref < GST_VP9_REF_FRAME_MAX; ref++) {
394 for (mode = 0; mode < GST_VP9_MAX_MODE_LF_DELTAS; mode++) {
395 intra_lvl = lvl_seg + (lfp->loop_filter_ref_deltas[ref] << n_shift)
396 + (lfp->loop_filter_mode_deltas[mode] << n_shift);
397 lvl_lookup[ref][mode] =
398 CLAMP (intra_lvl, 0, GST_VP9_MAX_LOOP_FILTER);
405 self->segmentation[i] = (GstVp9Segmentation) {
406 .luma_dc_quant_scale = luma_dc_quant_scale,
407 .luma_ac_quant_scale = luma_ac_quant_scale,
408 .chroma_dc_quant_scale = chroma_dc_quant_scale,
409 .chroma_ac_quant_scale = chroma_ac_quant_scale,
411 .reference_frame_enabled = sp->feature_enabled[i][GST_VP9_SEG_LVL_REF_FRAME],
412 .reference_frame = sp->feature_data[i][GST_VP9_SEG_LVL_REF_FRAME],
413 .reference_skip = sp->feature_enabled[i][GST_VP9_SEG_SEG_LVL_SKIP],
417 memcpy (self->segmentation[i].filter_level, lvl_lookup,
418 sizeof (lvl_lookup));
422 static inline gboolean
423 _fill_slice (GstVp9Decoder * decoder, GstVp9Picture * picture)
425 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
426 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
427 GstVaDecodePicture *va_pic;
428 const GstVp9Segmentation *seg;
429 VASliceParameterBufferVP9 slice_param;
432 _update_segmentation (self, &picture->frame_hdr);
435 slice_param = (VASliceParameterBufferVP9) {
436 .slice_data_size = picture->size,
437 .slice_data_offset = 0,
438 .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
442 for (i = 0; i < GST_VP9_MAX_SEGMENTS; i++) {
443 seg = &self->segmentation[i];
446 slice_param.seg_param[i] = (VASegmentParameterVP9) {
447 .segment_flags.fields = {
448 .segment_reference_enabled = seg->reference_frame_enabled,
449 .segment_reference = seg->reference_frame,
450 .segment_reference_skipped = seg->reference_skip,
452 .luma_dc_quant_scale = seg->luma_dc_quant_scale,
453 .luma_ac_quant_scale = seg->luma_ac_quant_scale,
454 .chroma_dc_quant_scale = seg->chroma_dc_quant_scale,
455 .chroma_ac_quant_scale = seg->chroma_ac_quant_scale,
459 memcpy (slice_param.seg_param[i].filter_level, seg->filter_level,
460 sizeof (slice_param.seg_param[i].filter_level));
463 va_pic = gst_vp9_picture_get_user_data (picture);
465 return gst_va_decoder_add_slice_buffer (base->decoder, va_pic, &slice_param,
466 sizeof (slice_param), (gpointer) picture->data, picture->size);
470 gst_va_vp9_decode_picture (GstVp9Decoder * decoder, GstVp9Picture * picture,
473 return _fill_param (decoder, picture, dpb) && _fill_slice (decoder, picture);
477 gst_va_vp9_dec_end_picture (GstVp9Decoder * decoder, GstVp9Picture * picture)
479 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
480 GstVaDecodePicture *va_pic;
482 GST_LOG_OBJECT (base, "end picture %p", picture);
484 va_pic = gst_vp9_picture_get_user_data (picture);
486 return gst_va_decoder_decode (base->decoder, va_pic);
490 gst_va_vp9_dec_output_picture (GstVp9Decoder * decoder,
491 GstVideoCodecFrame * frame, GstVp9Picture * picture)
493 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
494 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
496 GST_LOG_OBJECT (self, "Outputting picture %p", picture);
498 if (base->copy_frames)
499 gst_va_base_dec_copy_output_buffer (base, frame);
501 gst_vp9_picture_unref (picture);
503 return gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame);
506 static GstVp9Picture *
507 gst_va_vp9_dec_duplicate_picture (GstVp9Decoder * decoder,
508 GstVideoCodecFrame * frame, GstVp9Picture * picture)
510 GstVaDecodePicture *va_pic, *va_dup;
511 GstVp9Picture *new_picture;
513 if (!_check_resolution_change (GST_VA_VP9_DEC (decoder), picture))
516 va_pic = gst_vp9_picture_get_user_data (picture);
517 va_dup = gst_va_decode_picture_dup (va_pic);
519 new_picture = gst_vp9_picture_new ();
520 new_picture->frame_hdr = picture->frame_hdr;
522 frame->output_buffer = gst_buffer_ref (va_dup->gstbuffer);
524 gst_vp9_picture_set_user_data (picture, va_dup,
525 (GDestroyNotify) gst_va_decode_picture_free);
531 gst_va_vp9_dec_negotiate (GstVideoDecoder * decoder)
533 GstCapsFeatures *capsfeatures = NULL;
534 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
535 GstVaVp9Dec *self = GST_VA_VP9_DEC (decoder);
536 GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
537 GstVp9Decoder *vp9dec = GST_VP9_DECODER (decoder);
540 /* Ignore downstream renegotiation request. */
541 if (!self->need_negotiation)
544 self->need_negotiation = FALSE;
547 /* The driver for VP9 should have the ability to handle the dynamical
548 resolution changes. So if only the resolution changes, we should not
549 re-create the config and context. */
550 if (gst_va_decoder_is_open (base->decoder)) {
551 VAProfile cur_profile;
553 gint cur_width, cur_height;
555 if (!gst_va_decoder_get_config (base->decoder, &cur_profile,
556 &cur_rtformat, &cur_width, &cur_height))
559 if (base->profile == cur_profile && base->rt_format == cur_rtformat) {
560 if (!gst_va_decoder_update_frame_size (base->decoder, base->width,
564 GST_INFO_OBJECT (self, "dynamical resolution changes from %dx%d to"
565 " %dx%d", cur_width, cur_height, base->width, base->height);
569 if (!gst_va_decoder_close (base->decoder))
575 if (!gst_va_decoder_open (base->decoder, base->profile, base->rt_format))
578 if (!gst_va_decoder_set_frame_size (base->decoder, base->width,
583 if (base->output_state)
584 gst_video_codec_state_unref (base->output_state);
586 gst_va_base_dec_get_preferred_format_and_caps_features (base, &format,
590 gst_video_decoder_set_output_state (decoder, format,
591 base->width, base->height, vp9dec->input_state);
593 base->output_state->caps = gst_video_info_to_caps (&base->output_state->info);
595 gst_caps_set_features_simple (base->output_state->caps, capsfeatures);
597 GST_INFO_OBJECT (self, "Negotiated caps %" GST_PTR_FORMAT,
598 base->output_state->caps);
600 return GST_VIDEO_DECODER_CLASS (GST_VA_BASE_DEC_GET_PARENT_CLASS
601 (decoder))->negotiate (decoder);
605 gst_va_vp9_dec_dispose (GObject * object)
607 gst_va_base_dec_close (GST_VIDEO_DECODER (object));
608 G_OBJECT_CLASS (GST_VA_BASE_DEC_GET_PARENT_CLASS (object))->dispose (object);
612 gst_va_vp9_dec_class_init (gpointer g_class, gpointer class_data)
614 GstCaps *src_doc_caps, *sink_doc_caps;
615 GObjectClass *gobject_class = G_OBJECT_CLASS (g_class);
616 GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
617 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (g_class);
618 GstVp9DecoderClass *vp9_class = GST_VP9_DECODER_CLASS (g_class);
619 struct CData *cdata = class_data;
622 if (cdata->description) {
623 long_name = g_strdup_printf ("VA-API VP9 Decoder in %s",
626 long_name = g_strdup ("VA-API VP9 Decoder");
629 gst_element_class_set_metadata (element_class, long_name,
630 "Codec/Decoder/Video/Hardware", "VA-API based VP9 video decoder",
631 "Víctor Jáquez <vjaquez@igalia.com>");
633 sink_doc_caps = gst_caps_from_string (sink_caps_str);
634 src_doc_caps = gst_caps_from_string (src_caps_str);
636 gst_va_base_dec_class_init (GST_VA_BASE_DEC_CLASS (g_class), VP9,
637 cdata->render_device_path, cdata->sink_caps, cdata->src_caps,
638 src_doc_caps, sink_doc_caps);
640 gobject_class->dispose = gst_va_vp9_dec_dispose;
642 decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_va_vp9_dec_negotiate);
644 vp9_class->new_sequence = GST_DEBUG_FUNCPTR (gst_va_vp9_new_sequence);
645 vp9_class->new_picture = GST_DEBUG_FUNCPTR (gst_va_vp9_dec_new_picture);
646 vp9_class->decode_picture = GST_DEBUG_FUNCPTR (gst_va_vp9_decode_picture);
647 vp9_class->end_picture = GST_DEBUG_FUNCPTR (gst_va_vp9_dec_end_picture);
648 vp9_class->output_picture = GST_DEBUG_FUNCPTR (gst_va_vp9_dec_output_picture);
649 vp9_class->duplicate_picture =
650 GST_DEBUG_FUNCPTR (gst_va_vp9_dec_duplicate_picture);
653 g_free (cdata->description);
654 g_free (cdata->render_device_path);
655 gst_caps_unref (cdata->src_caps);
656 gst_caps_unref (cdata->sink_caps);
661 gst_va_vp9_dec_init (GTypeInstance * instance, gpointer g_class)
663 gst_va_base_dec_init (GST_VA_BASE_DEC (instance), GST_CAT_DEFAULT);
666 /* This element doesn't parse supreframes. Let's delegate it to the
669 _complete_sink_caps (GstCaps * sinkcaps)
671 gst_caps_set_simple (sinkcaps, "alignment", G_TYPE_STRING, "frame", NULL);
672 return gst_caps_ref (sinkcaps);
676 _register_debug_category (gpointer data)
678 GST_DEBUG_CATEGORY_INIT (gst_va_vp9dec_debug, "vavp9dec", 0,
685 gst_va_vp9_dec_register (GstPlugin * plugin, GstVaDevice * device,
686 GstCaps * sink_caps, GstCaps * src_caps, guint rank)
688 static GOnce debug_once = G_ONCE_INIT;
690 GTypeInfo type_info = {
691 .class_size = sizeof (GstVaVp9DecClass),
692 .class_init = gst_va_vp9_dec_class_init,
693 .instance_size = sizeof (GstVaVp9Dec),
694 .instance_init = gst_va_vp9_dec_init,
698 gchar *type_name, *feature_name;
700 g_return_val_if_fail (GST_IS_PLUGIN (plugin), FALSE);
701 g_return_val_if_fail (GST_IS_VA_DEVICE (device), FALSE);
702 g_return_val_if_fail (GST_IS_CAPS (sink_caps), FALSE);
703 g_return_val_if_fail (GST_IS_CAPS (src_caps), FALSE);
705 cdata = g_new (struct CData, 1);
706 cdata->description = NULL;
707 cdata->render_device_path = g_strdup (device->render_device_path);
708 cdata->sink_caps = _complete_sink_caps (sink_caps);
709 cdata->src_caps = gst_caps_ref (src_caps);
711 /* class data will be leaked if the element never gets instantiated */
712 GST_MINI_OBJECT_FLAG_SET (sink_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
713 GST_MINI_OBJECT_FLAG_SET (src_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
715 type_info.class_data = cdata;
717 type_name = g_strdup ("GstVaVp9Dec");
718 feature_name = g_strdup ("vavp9dec");
720 /* The first decoder to be registered should use a constant name,
721 * like vavp9dec, for any additional decoders, we create unique
722 * names, using inserting the render device name. */
723 if (g_type_from_name (type_name)) {
724 gchar *basename = g_path_get_basename (device->render_device_path);
726 g_free (feature_name);
727 type_name = g_strdup_printf ("GstVa%sVp9Dec", basename);
728 feature_name = g_strdup_printf ("va%svp9dec", basename);
729 cdata->description = basename;
731 /* lower rank for non-first device */
736 g_once (&debug_once, _register_debug_category, NULL);
738 type = g_type_register_static (GST_TYPE_VP9_DECODER,
739 type_name, &type_info, 0);
741 ret = gst_element_register (plugin, feature_name, rank, type);
744 g_free (feature_name);