#define GST_PLUGIN_NAME "vaapidecode"
#define GST_PLUGIN_DESC "A VA-API based video decoder"
+#define GST_VAAPI_DECODE_FLOW_PARSE_DATA GST_FLOW_CUSTOM_SUCCESS_2
+
GST_DEBUG_CATEGORY_STATIC(gst_debug_vaapidecode);
#define GST_CAT_DEFAULT gst_debug_vaapidecode
static const char gst_vaapidecode_src_caps_str[] =
#if GST_CHECK_VERSION(1,1,0)
GST_VIDEO_CAPS_MAKE_WITH_FEATURES(
- GST_CAPS_FEATURE_MEMORY_VAAPI_SURFACE, "{ ENCODED, NV12, I420, YV12 }") ";"
+ GST_CAPS_FEATURE_MEMORY_VAAPI_SURFACE, "{ ENCODED, I420, YV12, NV12 }") ";"
GST_VIDEO_CAPS_MAKE_WITH_FEATURES(
GST_CAPS_FEATURE_META_GST_VIDEO_GL_TEXTURE_UPLOAD_META, "RGBA") ";"
- GST_VIDEO_CAPS_MAKE("{ NV12, I420, YV12 }");
+ GST_VIDEO_CAPS_MAKE("{ I420, YV12, NV12 }");
#else
GST_VAAPI_SURFACE_CAPS;
#endif
return TRUE;
}
+#if GST_CHECK_VERSION(1,1,0)
+static void
+gst_vaapidecode_video_info_change_format(GstVideoInfo *info,
+ GstVideoFormat format, guint width, guint height)
+{
+ GstVideoInfo vi = *info;
+
+ gst_video_info_set_format (info, format, width, height);
+
+ info->interlace_mode = vi.interlace_mode;
+ info->flags = vi.flags;
+ info->views = vi.views;
+ info->par_n = vi.par_n;
+ info->par_d = vi.par_d;
+ info->fps_n = vi.fps_n;
+ info->fps_d = vi.fps_d;
+}
+#endif
+
static gboolean
gst_vaapidecode_update_src_caps(GstVaapiDecode *decode,
const GstVideoCodecState *ref_state)
GstVideoDecoder * const vdec = GST_VIDEO_DECODER(decode);
GstVideoCodecState *state;
GstVideoInfo *vi, vis;
+ GstVideoFormat format, out_format;
#if GST_CHECK_VERSION(1,1,0)
GstCapsFeatures *features = NULL;
GstVaapiCapsFeature feature;
GST_VIDEO_INFO_FORMAT(&ref_state->info));
#endif
- state = gst_video_decoder_set_output_state(vdec,
- GST_VIDEO_INFO_FORMAT(&ref_state->info),
+ format = GST_VIDEO_INFO_FORMAT(&ref_state->info);
+
+ state = gst_video_decoder_set_output_state(vdec, format,
ref_state->info.width, ref_state->info.height,
(GstVideoCodecState *)ref_state);
if (!state)
return FALSE;
vi = &state->info;
- if (GST_VIDEO_INFO_FORMAT(vi) == GST_VIDEO_FORMAT_ENCODED) {
+ out_format = format;
+ if (format == GST_VIDEO_FORMAT_ENCODED) {
+ out_format = GST_VIDEO_FORMAT_I420;
gst_video_info_init(&vis);
- gst_video_info_set_format(&vis, GST_VIDEO_FORMAT_NV12,
+ gst_video_info_set_format(&vis, out_format,
GST_VIDEO_INFO_WIDTH(vi), GST_VIDEO_INFO_HEIGHT(vi));
vi->size = vis.size;
}
vis = *vi;
switch (feature) {
case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META:
- gst_video_info_set_format(&vis, GST_VIDEO_FORMAT_RGBA,
+ gst_vaapidecode_video_info_change_format(&vis, GST_VIDEO_FORMAT_RGBA,
GST_VIDEO_INFO_WIDTH(vi), GST_VIDEO_INFO_HEIGHT(vi));
features = gst_caps_features_new(
GST_CAPS_FEATURE_META_GST_VIDEO_GL_TEXTURE_UPLOAD_META, NULL);
break;
default:
- if (GST_VIDEO_INFO_FORMAT(vi) == GST_VIDEO_FORMAT_ENCODED) {
+ if (format == GST_VIDEO_FORMAT_ENCODED) {
/* XXX: this is a workaround until auto-plugging is fixed when
format=ENCODED + memory:VASurface caps feature are provided.
Meanwhile, providing a random format here works but this is
a terribly wrong thing per se. */
- gst_video_info_set_format(&vis, GST_VIDEO_FORMAT_NV12,
+ gst_vaapidecode_video_info_change_format(&vis, out_format,
GST_VIDEO_INFO_WIDTH(vi), GST_VIDEO_INFO_HEIGHT(vi));
#if GST_CHECK_VERSION(1,3,0)
if (feature == GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE)
guint size, min, max;
gboolean need_pool, update_pool;
gboolean has_video_meta = FALSE;
+ gboolean has_video_alignment = FALSE;
GstVideoCodecState *state;
#if GST_CHECK_VERSION(1,1,0) && USE_GLX
+ gboolean has_texture_upload_meta = FALSE;
GstCapsFeatures *features, *features2;
#endif
features = gst_caps_get_features(state->caps, 0);
features2 = gst_caps_features_new(GST_CAPS_FEATURE_META_GST_VIDEO_GL_TEXTURE_UPLOAD_META, NULL);
+ has_texture_upload_meta =
+ gst_vaapi_find_preferred_caps_feature(GST_VIDEO_DECODER_SRC_PAD(vdec),
+ GST_VIDEO_FORMAT_ENCODED) ==
+ GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META;
+
/* Update src caps if feature is not handled downstream */
if (!decode->has_texture_upload_meta &&
gst_caps_features_is_equal(features, features2))
gst_vaapidecode_update_src_caps (decode, state);
+ else if (has_texture_upload_meta &&
+ !gst_caps_features_is_equal(features, features2)) {
+ gst_video_info_set_format(&state->info, GST_VIDEO_FORMAT_RGBA,
+ state->info.width,
+ state->info.height);
+ gst_vaapidecode_update_src_caps(decode, state);
+ }
gst_caps_features_free(features2);
#endif
gst_video_info_init(&vi);
gst_video_info_from_caps(&vi, caps);
if (GST_VIDEO_INFO_FORMAT(&vi) == GST_VIDEO_FORMAT_ENCODED)
- gst_video_info_set_format(&vi, GST_VIDEO_FORMAT_NV12,
+ gst_video_info_set_format(&vi, GST_VIDEO_FORMAT_I420,
GST_VIDEO_INFO_WIDTH(&vi), GST_VIDEO_INFO_HEIGHT(&vi));
g_return_val_if_fail(GST_VAAPI_PLUGIN_BASE_DISPLAY(decode) != NULL, FALSE);
gst_query_parse_nth_allocation_pool(query, 0, &pool, &size, &min, &max);
size = MAX(size, vi.size);
update_pool = TRUE;
+
+ /* Check whether downstream element proposed a bufferpool but did
+ not provide a correct propose_allocation() implementation */
+ has_video_alignment = gst_buffer_pool_has_option(pool,
+ GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
}
else {
pool = NULL;
GST_BUFFER_POOL_OPTION_VAAPI_VIDEO_META)) {
GST_INFO("no pool or doesn't support GstVaapiVideoMeta, "
"making new pool");
+ if (pool)
+ gst_object_unref(pool);
pool = gst_vaapi_video_buffer_pool_new(
GST_VAAPI_PLUGIN_BASE_DISPLAY(decode));
if (!pool)
#endif
gst_buffer_pool_set_config(pool, config);
}
+ else if (has_video_alignment) {
+ config = gst_buffer_pool_get_config(pool);
+ gst_buffer_pool_config_add_option(config,
+ GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
+ gst_buffer_pool_set_config(pool, config);
+ }
if (update_pool)
gst_query_set_nth_allocation_pool(query, 0, pool, size, min, max);
break;
case GST_VAAPI_CODEC_H264:
decode->decoder = gst_vaapi_decoder_h264_new(dpy, caps);
+
+ /* Set the stream buffer alignment for better optimizations */
+ if (decode->decoder && caps) {
+ GstStructure * const structure = gst_caps_get_structure(caps, 0);
+ const gchar *str = NULL;
+
+ if ((str = gst_structure_get_string(structure, "alignment"))) {
+ GstVaapiStreamAlignH264 alignment;
+ if (g_strcmp0(str, "au") == 0)
+ alignment = GST_VAAPI_STREAM_ALIGN_H264_AU;
+ else if (g_strcmp0(str, "nal") == 0)
+ alignment = GST_VAAPI_STREAM_ALIGN_H264_NALU;
+ else
+ alignment = GST_VAAPI_STREAM_ALIGN_H264_NONE;
+ gst_vaapi_decoder_h264_set_alignment(
+ GST_VAAPI_DECODER_H264(decode->decoder), alignment);
+ }
+ }
break;
case GST_VAAPI_CODEC_WMV3:
case GST_VAAPI_CODEC_VC1:
decode->decoder = gst_vaapi_decoder_jpeg_new(dpy, caps);
break;
#endif
+#if USE_VP8_DECODER
case GST_VAAPI_CODEC_VP8:
decode->decoder = gst_vaapi_decoder_vp8_new(dpy, caps);
break;
+#endif
default:
decode->decoder = NULL;
break;
}
static GstFlowReturn
-gst_vaapidecode_parse(GstVideoDecoder *vdec,
+gst_vaapidecode_parse_frame(GstVideoDecoder *vdec,
GstVideoCodecFrame *frame, GstAdapter *adapter, gboolean at_eos)
{
GstVaapiDecode * const decode = GST_VAAPIDECODE(vdec);
decode->current_frame_size = 0;
}
else
- ret = GST_FLOW_OK;
+ ret = GST_VAAPI_DECODE_FLOW_PARSE_DATA;
break;
case GST_VAAPI_DECODER_STATUS_ERROR_NO_DATA:
ret = GST_VIDEO_DECODER_FLOW_NEED_DATA;
return ret;
}
+static GstFlowReturn
+gst_vaapidecode_parse(GstVideoDecoder *vdec,
+ GstVideoCodecFrame *frame, GstAdapter *adapter, gboolean at_eos)
+{
+ GstFlowReturn ret;
+
+ do {
+ ret = gst_vaapidecode_parse_frame(vdec, frame, adapter, at_eos);
+ } while (ret == GST_VAAPI_DECODE_FLOW_PARSE_DATA);
+ return ret;
+}
+
static GstStateChangeReturn
gst_vaapidecode_change_state (GstElement * element, GstStateChange transition)
{