2 * Copyright (C) 2020 Intel Corporation
3 * Author: He Junyan <junyan.he@intel.com>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the0
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
22 * SECTION:element-vavp8dec
24 * @short_description: A VA-API based VP8 video decoder
26 * vavp8dec decodes VP8 bitstreams to VA surfaces using the
27 * installed and chosen [VA-API](https://01.org/linuxmedia/vaapi)
30 * The decoding surfaces can be mapped onto main memory as video
33 * ## Example launch line
35 * gst-launch-1.0 filesrc location=sample.webm ! parsebin ! vavp8dec ! autovideosink
46 #include "gstvavp8dec.h"
48 #include "gstvabasedec.h"
50 GST_DEBUG_CATEGORY_STATIC (gst_va_vp8dec_debug);
51 #ifndef GST_DISABLE_GST_DEBUG
52 #define GST_CAT_DEFAULT gst_va_vp8dec_debug
54 #define GST_CAT_DEFAULT NULL
57 #define GST_VA_VP8_DEC(obj) ((GstVaVp8Dec *) obj)
58 #define GST_VA_VP8_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaVp8DecClass))
59 #define GST_VA_VP8_DEC_CLASS(klass) ((GstVaVp8DecClass *) klass)
61 typedef struct _GstVaVp8Dec GstVaVp8Dec;
62 typedef struct _GstVaVp8DecClass GstVaVp8DecClass;
64 struct _GstVaVp8DecClass
66 GstVaBaseDecClass parent_class;
73 GstFlowReturn last_ret;
75 gboolean need_negotiation;
78 static GstElementClass *parent_class = NULL;
81 static const gchar *src_caps_str =
82 GST_VIDEO_CAPS_MAKE_WITH_FEATURES (GST_CAPS_FEATURE_MEMORY_VA,
84 GST_VIDEO_CAPS_MAKE ("{ NV12 }");
87 static const gchar *sink_caps_str = "video/x-vp8";
90 gst_va_vp8_dec_negotiate (GstVideoDecoder * decoder)
92 GstCapsFeatures *capsfeatures = NULL;
93 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
94 GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
95 GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
96 GstVp8Decoder *vp8dec = GST_VP8_DECODER (decoder);
98 /* Ignore downstream renegotiation request. */
99 if (!self->need_negotiation)
102 self->need_negotiation = FALSE;
104 if (gst_va_decoder_is_open (base->decoder)
105 && !gst_va_decoder_close (base->decoder))
108 if (!gst_va_decoder_open (base->decoder, base->profile, base->rt_format))
111 if (!gst_va_decoder_set_frame_size (base->decoder, base->width, base->height))
114 if (base->output_state)
115 gst_video_codec_state_unref (base->output_state);
117 gst_va_base_dec_get_preferred_format_and_caps_features (base, &format,
121 gst_video_decoder_set_output_state (decoder, format,
122 base->width, base->height, vp8dec->input_state);
124 base->output_state->caps = gst_video_info_to_caps (&base->output_state->info);
126 gst_caps_set_features_simple (base->output_state->caps, capsfeatures);
128 GST_INFO_OBJECT (self, "Negotiated caps %" GST_PTR_FORMAT,
129 base->output_state->caps);
131 return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder);
135 _get_profile (GstVaVp8Dec * self, const GstVp8FrameHdr * frame_hdr)
138 if (frame_hdr->version > 3) {
139 GST_ERROR_OBJECT (self, "Unsupported vp8 version: %d", frame_hdr->version);
140 return VAProfileNone;
143 return VAProfileVP8Version0_3;
147 gst_va_vp8_dec_new_sequence (GstVp8Decoder * decoder,
148 const GstVp8FrameHdr * frame_hdr)
150 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
151 GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
154 gboolean negotiation_needed = FALSE;
156 GST_LOG_OBJECT (self, "new sequence");
158 profile = _get_profile (self, frame_hdr);
159 if (profile == VAProfileNone)
160 return GST_FLOW_NOT_NEGOTIATED;
162 if (!gst_va_decoder_has_profile (base->decoder, profile)) {
163 GST_ERROR_OBJECT (self, "Profile %s is not supported",
164 gst_va_profile_name (profile));
165 return GST_FLOW_NOT_NEGOTIATED;
168 /* VP8 always use 8 bits 4:2:0 */
169 rt_format = VA_RT_FORMAT_YUV420;
171 if (!gst_va_decoder_config_is_equal (base->decoder, profile,
172 rt_format, frame_hdr->width, frame_hdr->height)) {
173 base->profile = profile;
174 base->width = frame_hdr->width;
175 base->height = frame_hdr->height;
176 base->rt_format = rt_format;
177 negotiation_needed = TRUE;
180 base->min_buffers = 3 + 4; /* max num pic references + scratch surfaces */
182 if (negotiation_needed) {
183 self->need_negotiation = TRUE;
184 if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) {
185 GST_ERROR_OBJECT (self, "Failed to negotiate with downstream");
186 return GST_FLOW_NOT_NEGOTIATED;
194 gst_va_vp8_dec_new_picture (GstVp8Decoder * decoder,
195 GstVideoCodecFrame * frame, GstVp8Picture * picture)
197 GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
198 GstVaDecodePicture *pic;
199 GstVideoDecoder *vdec = GST_VIDEO_DECODER (decoder);
200 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
202 self->last_ret = gst_video_decoder_allocate_output_frame (vdec, frame);
203 if (self->last_ret != GST_FLOW_OK)
206 pic = gst_va_decode_picture_new (base->decoder, frame->output_buffer);
208 gst_vp8_picture_set_user_data (picture, pic,
209 (GDestroyNotify) gst_va_decode_picture_free);
211 GST_LOG_OBJECT (self, "New va decode picture %p - %#x", pic,
212 gst_va_decode_picture_get_surface (pic));
218 GST_WARNING_OBJECT (self,
219 "Failed to allocated output buffer, return %s",
220 gst_flow_get_name (self->last_ret));
221 return self->last_ret;
226 _fill_quant_matrix (GstVp8Decoder * decoder, GstVp8Picture * picture,
227 GstVp8Parser * parser)
229 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
230 GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr;
231 GstVp8Segmentation *const seg = &parser->segmentation;
232 VAIQMatrixBufferVP8 iq_matrix = { };
233 const gint8 QI_MAX = 127;
237 /* Fill in VAIQMatrixBufferVP8 */
238 for (i = 0; i < 4; i++) {
239 if (seg->segmentation_enabled) {
240 qi_base = seg->quantizer_update_value[i];
241 if (!seg->segment_feature_mode) /* 0 means delta update */
242 qi_base += frame_hdr->quant_indices.y_ac_qi;
244 qi_base = frame_hdr->quant_indices.y_ac_qi;
247 iq_matrix.quantization_index[i][0] = CLAMP (qi, 0, QI_MAX);
248 qi = qi_base + frame_hdr->quant_indices.y_dc_delta;
249 iq_matrix.quantization_index[i][1] = CLAMP (qi, 0, QI_MAX);
250 qi = qi_base + frame_hdr->quant_indices.y2_dc_delta;
251 iq_matrix.quantization_index[i][2] = CLAMP (qi, 0, QI_MAX);
252 qi = qi_base + frame_hdr->quant_indices.y2_ac_delta;
253 iq_matrix.quantization_index[i][3] = CLAMP (qi, 0, QI_MAX);
254 qi = qi_base + frame_hdr->quant_indices.uv_dc_delta;
255 iq_matrix.quantization_index[i][4] = CLAMP (qi, 0, QI_MAX);
256 qi = qi_base + frame_hdr->quant_indices.uv_ac_delta;
257 iq_matrix.quantization_index[i][5] = CLAMP (qi, 0, QI_MAX);
260 return gst_va_decoder_add_param_buffer (base->decoder,
261 gst_vp8_picture_get_user_data (picture), VAIQMatrixBufferType, &iq_matrix,
266 _fill_probability_table (GstVp8Decoder * decoder, GstVp8Picture * picture)
268 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
269 GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr;
270 VAProbabilityDataBufferVP8 prob_table = { };
272 /* Fill in VAProbabilityDataBufferVP8 */
273 memcpy (prob_table.dct_coeff_probs, frame_hdr->token_probs.prob,
274 sizeof (frame_hdr->token_probs.prob));
276 return gst_va_decoder_add_param_buffer (base->decoder,
277 gst_vp8_picture_get_user_data (picture), VAProbabilityBufferType,
278 &prob_table, sizeof (prob_table));
282 _fill_picture (GstVp8Decoder * decoder, GstVp8Picture * picture,
283 GstVp8Parser * parser)
285 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
286 GstVaDecodePicture *va_pic;
287 VAPictureParameterBufferVP8 pic_param;
288 GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr;
289 GstVp8Segmentation *const seg = &parser->segmentation;
292 if (!_fill_quant_matrix (decoder, picture, parser))
295 if (!_fill_probability_table (decoder, picture))
299 pic_param = (VAPictureParameterBufferVP8) {
300 .frame_width = base->width,
301 .frame_height = base->height,
302 .last_ref_frame = VA_INVALID_SURFACE,
303 .golden_ref_frame = VA_INVALID_SURFACE,
304 .alt_ref_frame = VA_INVALID_SURFACE,
305 .out_of_loop_frame = VA_INVALID_SURFACE, // not used currently
306 .pic_fields.bits.key_frame = !frame_hdr->key_frame,
307 .pic_fields.bits.version = frame_hdr->version,
308 .pic_fields.bits.segmentation_enabled = seg->segmentation_enabled,
309 .pic_fields.bits.update_mb_segmentation_map =
310 seg->update_mb_segmentation_map,
311 .pic_fields.bits.update_segment_feature_data =
312 seg->update_segment_feature_data,
313 .pic_fields.bits.filter_type = frame_hdr->filter_type,
314 .pic_fields.bits.sharpness_level = frame_hdr->sharpness_level,
315 .pic_fields.bits.loop_filter_adj_enable =
316 parser->mb_lf_adjust.loop_filter_adj_enable,
317 .pic_fields.bits.mode_ref_lf_delta_update =
318 parser->mb_lf_adjust.mode_ref_lf_delta_update,
319 .pic_fields.bits.sign_bias_golden = frame_hdr->sign_bias_golden,
320 .pic_fields.bits.sign_bias_alternate = frame_hdr->sign_bias_alternate,
321 .pic_fields.bits.mb_no_coeff_skip = frame_hdr->mb_no_skip_coeff,
322 /* In decoding, the only loop filter settings that matter are those
323 in the frame header (9.1) */
324 .pic_fields.bits.loop_filter_disable = frame_hdr->loop_filter_level == 0,
325 .prob_skip_false = frame_hdr->prob_skip_false,
326 .prob_intra = frame_hdr->prob_intra,
327 .prob_last = frame_hdr->prob_last,
328 .prob_gf = frame_hdr->prob_gf,
329 .bool_coder_ctx.range = frame_hdr->rd_range,
330 .bool_coder_ctx.value = frame_hdr->rd_value,
331 .bool_coder_ctx.count = frame_hdr->rd_count,
335 if (!frame_hdr->key_frame) {
336 if (decoder->last_picture) {
337 va_pic = gst_vp8_picture_get_user_data (decoder->last_picture);
338 pic_param.last_ref_frame = gst_va_decode_picture_get_surface (va_pic);
340 if (decoder->golden_ref_picture) {
341 va_pic = gst_vp8_picture_get_user_data (decoder->golden_ref_picture);
342 pic_param.golden_ref_frame = gst_va_decode_picture_get_surface (va_pic);
344 if (decoder->alt_ref_picture) {
345 va_pic = gst_vp8_picture_get_user_data (decoder->alt_ref_picture);
346 pic_param.alt_ref_frame = gst_va_decode_picture_get_surface (va_pic);
350 for (i = 0; i < 3; i++)
351 pic_param.mb_segment_tree_probs[i] = seg->segment_prob[i];
353 for (i = 0; i < 4; i++) {
355 if (seg->segmentation_enabled) {
356 level = seg->lf_update_value[i];
357 /* 0 means delta update */
358 if (!seg->segment_feature_mode)
359 level += frame_hdr->loop_filter_level;
361 level = frame_hdr->loop_filter_level;
362 pic_param.loop_filter_level[i] = CLAMP (level, 0, 63);
364 pic_param.loop_filter_deltas_ref_frame[i] =
365 parser->mb_lf_adjust.ref_frame_delta[i];
366 pic_param.loop_filter_deltas_mode[i] =
367 parser->mb_lf_adjust.mb_mode_delta[i];
370 memcpy (pic_param.y_mode_probs, frame_hdr->mode_probs.y_prob,
371 sizeof (frame_hdr->mode_probs.y_prob));
372 memcpy (pic_param.uv_mode_probs, frame_hdr->mode_probs.uv_prob,
373 sizeof (frame_hdr->mode_probs.uv_prob));
374 memcpy (pic_param.mv_probs, frame_hdr->mv_probs.prob,
375 sizeof (frame_hdr->mv_probs));
377 va_pic = gst_vp8_picture_get_user_data (picture);
378 return gst_va_decoder_add_param_buffer (base->decoder, va_pic,
379 VAPictureParameterBufferType, &pic_param, sizeof (pic_param));
383 _add_slice (GstVp8Decoder * decoder, GstVp8Picture * picture,
384 GstVp8Parser * parser)
386 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
387 GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr;
388 VASliceParameterBufferVP8 slice_param;
389 GstVaDecodePicture *va_pic;
393 slice_param = (VASliceParameterBufferVP8) {
394 .slice_data_size = picture->size,
395 .slice_data_offset = frame_hdr->data_chunk_size,
396 .macroblock_offset = frame_hdr->header_size,
397 .num_of_partitions = (1 << frame_hdr->log2_nbr_of_dct_partitions) + 1,
401 slice_param.partition_size[0] =
402 frame_hdr->first_part_size - ((slice_param.macroblock_offset + 7) >> 3);
403 for (i = 1; i < slice_param.num_of_partitions; i++)
404 slice_param.partition_size[i] = frame_hdr->partition_size[i - 1];
405 for (; i < G_N_ELEMENTS (slice_param.partition_size); i++)
406 slice_param.partition_size[i] = 0;
408 va_pic = gst_vp8_picture_get_user_data (picture);
409 return gst_va_decoder_add_slice_buffer (base->decoder, va_pic, &slice_param,
410 sizeof (slice_param), (gpointer) picture->data, picture->size);
414 gst_va_vp8_dec_decode_picture (GstVp8Decoder * decoder, GstVp8Picture * picture,
415 GstVp8Parser * parser)
417 if (_fill_picture (decoder, picture, parser) &&
418 _add_slice (decoder, picture, parser))
421 return GST_FLOW_ERROR;
425 gst_va_vp8_dec_end_picture (GstVp8Decoder * decoder, GstVp8Picture * picture)
427 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
428 GstVaDecodePicture *va_pic;
430 GST_LOG_OBJECT (base, "end picture %p, (system_frame_number %d)",
431 picture, picture->system_frame_number);
433 va_pic = gst_vp8_picture_get_user_data (picture);
435 if (!gst_va_decoder_decode (base->decoder, va_pic))
436 return GST_FLOW_ERROR;
442 gst_va_vp8_dec_output_picture (GstVp8Decoder * decoder,
443 GstVideoCodecFrame * frame, GstVp8Picture * picture)
445 GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
446 GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
448 GST_LOG_OBJECT (self,
449 "Outputting picture %p (system_frame_number %d)",
450 picture, picture->system_frame_number);
452 if (self->last_ret != GST_FLOW_OK) {
453 gst_vp8_picture_unref (picture);
454 gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame);
455 return self->last_ret;
458 if (base->copy_frames)
459 gst_va_base_dec_copy_output_buffer (base, frame);
461 gst_vp8_picture_unref (picture);
463 return gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame);
467 gst_va_vp8_dec_init (GTypeInstance * instance, gpointer g_class)
469 gst_va_base_dec_init (GST_VA_BASE_DEC (instance), GST_CAT_DEFAULT);
473 gst_va_vp8_dec_dispose (GObject * object)
475 gst_va_base_dec_close (GST_VIDEO_DECODER (object));
476 G_OBJECT_CLASS (parent_class)->dispose (object);
480 gst_va_vp8_dec_class_init (gpointer g_class, gpointer class_data)
482 GstCaps *src_doc_caps, *sink_doc_caps;
483 GObjectClass *gobject_class = G_OBJECT_CLASS (g_class);
484 GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
485 GstVp8DecoderClass *vp8decoder_class = GST_VP8_DECODER_CLASS (g_class);
486 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (g_class);
487 struct CData *cdata = class_data;
490 if (cdata->description) {
491 long_name = g_strdup_printf ("VA-API VP8 Decoder in %s",
494 long_name = g_strdup ("VA-API VP8 Decoder");
497 gst_element_class_set_metadata (element_class, long_name,
498 "Codec/Decoder/Video/Hardware",
499 "VA-API based VP8 video decoder", "He Junyan <junyan.he@intel.com>");
501 sink_doc_caps = gst_caps_from_string (sink_caps_str);
502 src_doc_caps = gst_caps_from_string (src_caps_str);
504 parent_class = g_type_class_peek_parent (g_class);
506 gst_va_base_dec_class_init (GST_VA_BASE_DEC_CLASS (g_class), VP8,
507 cdata->render_device_path, cdata->sink_caps, cdata->src_caps,
508 src_doc_caps, sink_doc_caps);
510 gobject_class->dispose = gst_va_vp8_dec_dispose;
512 decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_negotiate);
514 vp8decoder_class->new_sequence =
515 GST_DEBUG_FUNCPTR (gst_va_vp8_dec_new_sequence);
516 vp8decoder_class->new_picture =
517 GST_DEBUG_FUNCPTR (gst_va_vp8_dec_new_picture);
518 vp8decoder_class->decode_picture =
519 GST_DEBUG_FUNCPTR (gst_va_vp8_dec_decode_picture);
520 vp8decoder_class->end_picture =
521 GST_DEBUG_FUNCPTR (gst_va_vp8_dec_end_picture);
522 vp8decoder_class->output_picture =
523 GST_DEBUG_FUNCPTR (gst_va_vp8_dec_output_picture);
526 g_free (cdata->description);
527 g_free (cdata->render_device_path);
528 gst_caps_unref (cdata->src_caps);
529 gst_caps_unref (cdata->sink_caps);
534 _register_debug_category (gpointer data)
536 GST_DEBUG_CATEGORY_INIT (gst_va_vp8dec_debug, "vavp8dec", 0,
543 gst_va_vp8_dec_register (GstPlugin * plugin, GstVaDevice * device,
544 GstCaps * sink_caps, GstCaps * src_caps, guint rank)
546 static GOnce debug_once = G_ONCE_INIT;
548 GTypeInfo type_info = {
549 .class_size = sizeof (GstVaVp8DecClass),
550 .class_init = gst_va_vp8_dec_class_init,
551 .instance_size = sizeof (GstVaVp8Dec),
552 .instance_init = gst_va_vp8_dec_init,
556 gchar *type_name, *feature_name;
558 g_return_val_if_fail (GST_IS_PLUGIN (plugin), FALSE);
559 g_return_val_if_fail (GST_IS_VA_DEVICE (device), FALSE);
560 g_return_val_if_fail (GST_IS_CAPS (sink_caps), FALSE);
561 g_return_val_if_fail (GST_IS_CAPS (src_caps), FALSE);
563 cdata = g_new (struct CData, 1);
564 cdata->description = NULL;
565 cdata->render_device_path = g_strdup (device->render_device_path);
566 cdata->sink_caps = gst_caps_ref (sink_caps);
567 cdata->src_caps = gst_caps_ref (src_caps);
569 /* class data will be leaked if the element never gets instantiated */
570 GST_MINI_OBJECT_FLAG_SET (cdata->sink_caps,
571 GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
572 GST_MINI_OBJECT_FLAG_SET (src_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED);
574 type_info.class_data = cdata;
576 type_name = g_strdup ("GstVaVp8dec");
577 feature_name = g_strdup ("vavp8dec");
579 /* The first decoder to be registered should use a constant name,
580 * like vavp8dec, for any additional decoders, we create unique
581 * names, using inserting the render device name. */
582 if (g_type_from_name (type_name)) {
583 gchar *basename = g_path_get_basename (device->render_device_path);
585 g_free (feature_name);
586 type_name = g_strdup_printf ("GstVa%sVP8Dec", basename);
587 feature_name = g_strdup_printf ("va%svp8dec", basename);
588 cdata->description = basename;
590 /* lower rank for non-first device */
595 g_once (&debug_once, _register_debug_category, NULL);
597 type = g_type_register_static (GST_TYPE_VP8_DECODER,
598 type_name, &type_info, 0);
600 ret = gst_element_register (plugin, feature_name, rank, type);
603 g_free (feature_name);