2 * Copyright (c) 2019 Guo Yejun
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * implementing a generic image processing filter using deep learning networks.
26 #include "libavutil/opt.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/imgutils.h"
31 #include "dnn_filter_common.h"
34 #include "libswscale/swscale.h"
35 #include "libavutil/time.h"
37 typedef struct DnnProcessingContext {
40 struct SwsContext *sws_uv_scale;
42 } DnnProcessingContext;
44 #define OFFSET(x) offsetof(DnnProcessingContext, dnnctx.x)
45 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
46 static const AVOption dnn_processing_options[] = {
47 { "dnn_backend", "DNN backend", OFFSET(backend_type), AV_OPT_TYPE_INT, { .i64 = DNN_TF }, INT_MIN, INT_MAX, FLAGS, "backend" },
48 #if (CONFIG_LIBTENSORFLOW == 1)
49 { "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = DNN_TF }, 0, 0, FLAGS, "backend" },
51 #if (CONFIG_LIBOPENVINO == 1)
52 { "openvino", "openvino backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = DNN_OV }, 0, 0, FLAGS, "backend" },
58 AVFILTER_DEFINE_CLASS(dnn_processing);
60 static av_cold int init(AVFilterContext *context)
62 DnnProcessingContext *ctx = context->priv;
63 return ff_dnn_init(&ctx->dnnctx, DFT_PROCESS_FRAME, context);
66 static const enum AVPixelFormat pix_fmts[] = {
67 AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
68 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAYF32,
69 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
70 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
75 #define LOG_FORMAT_CHANNEL_MISMATCH() \
76 av_log(ctx, AV_LOG_ERROR, \
77 "the frame's format %s does not match " \
78 "the model input channel %d\n", \
79 av_get_pix_fmt_name(fmt), \
80 model_input->channels);
82 static int check_modelinput_inlink(const DNNData *model_input, const AVFilterLink *inlink)
84 AVFilterContext *ctx = inlink->dst;
85 enum AVPixelFormat fmt = inlink->format;
87 // the design is to add explicit scale filter before this filter
88 if (model_input->height != -1 && model_input->height != inlink->h) {
89 av_log(ctx, AV_LOG_ERROR, "the model requires frame height %d but got %d\n",
90 model_input->height, inlink->h);
93 if (model_input->width != -1 && model_input->width != inlink->w) {
94 av_log(ctx, AV_LOG_ERROR, "the model requires frame width %d but got %d\n",
95 model_input->width, inlink->w);
98 if (model_input->dt != DNN_FLOAT) {
99 avpriv_report_missing_feature(ctx, "data type rather than DNN_FLOAT");
104 case AV_PIX_FMT_RGB24:
105 case AV_PIX_FMT_BGR24:
106 if (model_input->channels != 3) {
107 LOG_FORMAT_CHANNEL_MISMATCH();
111 case AV_PIX_FMT_GRAY8:
112 case AV_PIX_FMT_GRAYF32:
113 case AV_PIX_FMT_YUV420P:
114 case AV_PIX_FMT_YUV422P:
115 case AV_PIX_FMT_YUV444P:
116 case AV_PIX_FMT_YUV410P:
117 case AV_PIX_FMT_YUV411P:
118 case AV_PIX_FMT_NV12:
119 if (model_input->channels != 1) {
120 LOG_FORMAT_CHANNEL_MISMATCH();
125 avpriv_report_missing_feature(ctx, "%s", av_get_pix_fmt_name(fmt));
132 static int config_input(AVFilterLink *inlink)
134 AVFilterContext *context = inlink->dst;
135 DnnProcessingContext *ctx = context->priv;
140 result = ff_dnn_get_input(&ctx->dnnctx, &model_input);
142 av_log(ctx, AV_LOG_ERROR, "could not get input from the model\n");
146 check = check_modelinput_inlink(&model_input, inlink);
154 static av_always_inline int isPlanarYUV(enum AVPixelFormat pix_fmt)
156 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
158 return !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components == 3;
161 static int prepare_uv_scale(AVFilterLink *outlink)
163 AVFilterContext *context = outlink->src;
164 DnnProcessingContext *ctx = context->priv;
165 AVFilterLink *inlink = context->inputs[0];
166 enum AVPixelFormat fmt = inlink->format;
168 if (isPlanarYUV(fmt)) {
169 if (inlink->w != outlink->w || inlink->h != outlink->h) {
170 if (fmt == AV_PIX_FMT_NV12) {
171 ctx->sws_uv_scale = sws_getContext(inlink->w >> 1, inlink->h >> 1, AV_PIX_FMT_YA8,
172 outlink->w >> 1, outlink->h >> 1, AV_PIX_FMT_YA8,
173 SWS_BICUBIC, NULL, NULL, NULL);
174 ctx->sws_uv_height = inlink->h >> 1;
176 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
177 int sws_src_h = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
178 int sws_src_w = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
179 int sws_dst_h = AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h);
180 int sws_dst_w = AV_CEIL_RSHIFT(outlink->w, desc->log2_chroma_w);
181 ctx->sws_uv_scale = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
182 sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
183 SWS_BICUBIC, NULL, NULL, NULL);
184 ctx->sws_uv_height = sws_src_h;
192 static int config_output(AVFilterLink *outlink)
194 AVFilterContext *context = outlink->src;
195 DnnProcessingContext *ctx = context->priv;
197 AVFilterLink *inlink = context->inputs[0];
199 // have a try run in case that the dnn model resize the frame
200 result = ff_dnn_get_output(&ctx->dnnctx, inlink->w, inlink->h, &outlink->w, &outlink->h);
202 av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
206 prepare_uv_scale(outlink);
211 static int copy_uv_planes(DnnProcessingContext *ctx, AVFrame *out, const AVFrame *in)
213 const AVPixFmtDescriptor *desc;
216 if (!ctx->sws_uv_scale) {
217 av_assert0(in->height == out->height && in->width == out->width);
218 desc = av_pix_fmt_desc_get(in->format);
219 uv_height = AV_CEIL_RSHIFT(in->height, desc->log2_chroma_h);
220 for (int i = 1; i < 3; ++i) {
221 int bytewidth = av_image_get_linesize(in->format, in->width, i);
223 return AVERROR(EINVAL);
225 av_image_copy_plane(out->data[i], out->linesize[i],
226 in->data[i], in->linesize[i],
227 bytewidth, uv_height);
229 } else if (in->format == AV_PIX_FMT_NV12) {
230 sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1,
231 0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);
233 sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1,
234 0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);
235 sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 2), in->linesize + 2,
236 0, ctx->sws_uv_height, out->data + 2, out->linesize + 2);
242 static int flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_pts)
244 DnnProcessingContext *ctx = outlink->src->priv;
246 DNNAsyncStatusType async_state;
248 ret = ff_dnn_flush(&ctx->dnnctx);
254 AVFrame *in_frame = NULL;
255 AVFrame *out_frame = NULL;
256 async_state = ff_dnn_get_result(&ctx->dnnctx, &in_frame, &out_frame);
258 if (isPlanarYUV(in_frame->format))
259 copy_uv_planes(ctx, out_frame, in_frame);
260 av_frame_free(&in_frame);
261 ret = ff_filter_frame(outlink, out_frame);
265 *out_pts = out_frame->pts + pts;
268 } while (async_state >= DAST_NOT_READY);
273 static int activate(AVFilterContext *filter_ctx)
275 AVFilterLink *inlink = filter_ctx->inputs[0];
276 AVFilterLink *outlink = filter_ctx->outputs[0];
277 DnnProcessingContext *ctx = filter_ctx->priv;
278 AVFrame *in = NULL, *out = NULL;
284 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
287 // drain all input frames
288 ret = ff_inlink_consume_frame(inlink, &in);
292 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
295 return AVERROR(ENOMEM);
297 av_frame_copy_props(out, in);
298 if (ff_dnn_execute_model(&ctx->dnnctx, in, out) != 0) {
304 // drain all processed frames
306 AVFrame *in_frame = NULL;
307 AVFrame *out_frame = NULL;
308 async_state = ff_dnn_get_result(&ctx->dnnctx, &in_frame, &out_frame);
310 if (isPlanarYUV(in_frame->format))
311 copy_uv_planes(ctx, out_frame, in_frame);
312 av_frame_free(&in_frame);
313 ret = ff_filter_frame(outlink, out_frame);
318 } while (async_state == DAST_SUCCESS);
320 // if frame got, schedule to next filter
324 if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
325 if (status == AVERROR_EOF) {
326 int64_t out_pts = pts;
327 ret = flush_frame(outlink, pts, &out_pts);
328 ff_outlink_set_status(outlink, status, out_pts);
333 FF_FILTER_FORWARD_WANTED(outlink, inlink);
338 static av_cold void uninit(AVFilterContext *ctx)
340 DnnProcessingContext *context = ctx->priv;
342 sws_freeContext(context->sws_uv_scale);
343 ff_dnn_uninit(&context->dnnctx);
346 static const AVFilterPad dnn_processing_inputs[] = {
349 .type = AVMEDIA_TYPE_VIDEO,
350 .config_props = config_input,
354 static const AVFilterPad dnn_processing_outputs[] = {
357 .type = AVMEDIA_TYPE_VIDEO,
358 .config_props = config_output,
362 const AVFilter ff_vf_dnn_processing = {
363 .name = "dnn_processing",
364 .description = NULL_IF_CONFIG_SMALL("Apply DNN processing filter to the input."),
365 .priv_size = sizeof(DnnProcessingContext),
368 FILTER_INPUTS(dnn_processing_inputs),
369 FILTER_OUTPUTS(dnn_processing_outputs),
370 FILTER_PIXFMTS_ARRAY(pix_fmts),
371 .priv_class = &dnn_processing_class,
372 .activate = activate,