#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
+#include "libavutil/pixdesc.h"
#include "debug_ch.h"
#define DEFAULT_VIDEO_GOP_SIZE 15
-enum codec_api_type {
- CODEC_INIT = 0,
- CODEC_DECODE_VIDEO,
- CODEC_ENCODE_VIDEO,
- CODEC_DECODE_AUDIO,
- CODEC_ENCODE_AUDIO,
- CODEC_PICTURE_COPY,
- CODEC_DEINIT,
- CODEC_FLUSH_BUFFERS,
- };
+// misc
+#define OFFSET_PICTURE_BUFFER (0x100)
+
+//
+// COMMON
+//
enum codec_type {
CODEC_TYPE_UNKNOWN = -1,
CODEC_TYPE_ENCODE,
};
-struct video_data {
- int32_t width;
- int32_t height;
- int32_t fps_n;
- int32_t fps_d;
- int32_t par_n;
- int32_t par_d;
- int32_t pix_fmt;
- int32_t bpp;
- int32_t ticks_per_frame;
-};
-
-struct audio_data {
- int32_t channels;
- int32_t sample_rate;
- int32_t block_align;
- int32_t depth;
- int32_t sample_fmt;
- int32_t frame_size;
- int32_t bits_per_smp_fmt;
- int32_t reserved;
- int64_t channel_layout;
-};
+typedef struct DataContainer {
+ // common
+ bool is_got;
+ int32_t len;
+ AVCodecContext *avctx;
+ AVFrame *frame;
+
+ union {
+ // for video decoder
+ struct {
+ size_t picture_buffer_offset;
+ };
+
+ // for video/audio encoder
+ struct {
+ AVPacket *avpkt;
+ };
+
+ // for audio decoder
+ struct {
+ bool resampled;
+ int32_t out_sample_fmt;
+ };
+ };
+} DataContainer;
DeviceMemEntry *entry[CODEC_CONTEXT_MAX];
QTAILQ_HEAD_INITIALIZER(codec_rq);
// codec functions
-static bool codec_init(MaruBrillCodecState *, int, void *);
-static bool codec_deinit(MaruBrillCodecState *, int, void *);
-static bool codec_decode_video(MaruBrillCodecState *, int, void *);
-static bool codec_encode_video(MaruBrillCodecState *, int, void *);
-static bool codec_decode_audio(MaruBrillCodecState *, int, void *);
-static bool codec_encode_audio(MaruBrillCodecState *, int, void *);
-static bool codec_picture_copy(MaruBrillCodecState *, int, void *);
-static bool codec_flush_buffers(MaruBrillCodecState *, int, void *);
+static bool init(MaruBrillCodecState *, int, void *);
+static bool deinit(MaruBrillCodecState *, int, void *);
+static bool decode_video(MaruBrillCodecState *, int, void *);
+static bool encode_video(MaruBrillCodecState *, int, void *);
+static bool decode_audio(MaruBrillCodecState *, int, void *);
+static bool encode_audio(MaruBrillCodecState *, int, void *);
+static bool picture_copy(MaruBrillCodecState *, int, void *);
+static bool flush_buffers(MaruBrillCodecState *, int, void *);
+static bool decode_video_and_picture_copy(MaruBrillCodecState *, int, void *);
typedef bool (*CodecFuncEntry)(MaruBrillCodecState *, int, void *);
static CodecFuncEntry codec_func_handler[] = {
- codec_init,
- codec_decode_video,
- codec_encode_video,
- codec_decode_audio,
- codec_encode_audio,
- codec_picture_copy,
- codec_deinit,
- codec_flush_buffers,
+ init,
+ decode_video,
+ encode_video,
+ decode_audio,
+ encode_audio,
+ picture_copy,
+ deinit,
+ flush_buffers,
+ decode_video_and_picture_copy,
};
-static AVCodecParserContext *maru_brill_codec_parser_init(AVCodecContext *avctx);
+// default data handler
+static void default_get_data(void *dst, void *src, size_t size)
+{
+ memcpy(dst, src, size);
+
+ g_free(src);
+}
+
+// queue
+static void *brillcodec_store_inbuf(MaruBrillCodecState *s,
+ uint8_t *mem_base, CodecParam *ioparam)
+{
+ DeviceMemEntry *elem = NULL;
+ uint32_t readbuf_size = 0;
+ size_t size = sizeof(uint32_t);
+ uint8_t *readbuf = NULL;
+ uint8_t *device_mem = mem_base + ioparam->mem_offset;
+
+ elem = g_malloc0(sizeof(DeviceMemEntry));
+ if (s->memory_monopolizing & (1 << ioparam->api_index)) {
+ readbuf = device_mem + size;
+ } else {
+ memcpy(&readbuf_size, device_mem, sizeof(readbuf_size));
-static void maru_brill_codec_push_readqueue(MaruBrillCodecState *s, CodecParam *ioparam);
-static void maru_brill_codec_push_writequeue(MaruBrillCodecState *s, void* opaque,
- size_t data_size, int ctx_id,
- DataHandler *handler);
+ TRACE("readbuf size: %d\n", readbuf_size);
+ if (readbuf_size == 0) {
+ TRACE("inbuf size is 0. api_id %d, ctx_id %d, mem_offset %x\n",
+ ioparam->api_index, ioparam->ctx_index, ioparam->mem_offset);
+ } else {
+ readbuf = g_malloc0(readbuf_size);
+ TRACE("copy input buffer from guest. ctx_id: %d, mem_offset: %x\n",
+ ioparam->ctx_index, ioparam->mem_offset);
+ memcpy(readbuf, device_mem + size, readbuf_size);
+ }
+ }
-static void *maru_brill_codec_store_inbuf(uint8_t *mem_base, CodecParam *ioparam);
+ elem->opaque = readbuf;
+ elem->data_size = readbuf_size;
+ elem->ctx_id = ioparam->ctx_index;
-// default handler
-static void default_get_data(void *dst, void *src, size_t size, enum AVPixelFormat pix_fmt) {
- memcpy(dst, src, size);
+ return elem;
+}
+
+static void brillcodec_push_readqueue(MaruBrillCodecState *s,
+ CodecParam *ioparam)
+{
+ CodecDataStg *elem = NULL;
+ DeviceMemEntry *data_buf = NULL;
+
+ elem = g_malloc0(sizeof(CodecDataStg));
+
+ elem->param_buf = ioparam;
+
+ switch(ioparam->api_index) {
+ case INIT:
+ case DECODE_VIDEO:
+ case ENCODE_VIDEO:
+ case DECODE_AUDIO:
+ case ENCODE_AUDIO:
+ case DECODE_VIDEO_AND_PICTURE_COPY:
+ data_buf = brillcodec_store_inbuf(s, (uint8_t *)s->vaddr, ioparam);
+ break;
+ case PICTURE_COPY:
+ case DEINIT:
+ case FLUSH_BUFFERS:
+ default:
+ TRACE("no buffer from guest\n");
+ break;
+ }
+
+ elem->data_buf = data_buf;
+
+ qemu_mutex_lock(&s->ioparam_queue_mutex);
+ QTAILQ_INSERT_TAIL(&codec_rq, elem, node);
+ qemu_mutex_unlock(&s->ioparam_queue_mutex);
}
-static void default_release(void *opaque) {
- g_free(opaque);
+static CodecDataStg *brillcodec_pop_readqueue(MaruBrillCodecState *s)
+{
+ CodecDataStg *elem = NULL;
+
+ qemu_mutex_lock(&s->ioparam_queue_mutex);
+ elem = QTAILQ_FIRST(&codec_rq);
+ if (elem) {
+ QTAILQ_REMOVE(&codec_rq, elem, node);
+ }
+ qemu_mutex_unlock(&s->ioparam_queue_mutex);
+
+ return elem;
}
-static DataHandler default_data_handler = {
- .get_data = default_get_data,
- .release = default_release,
-};
+static void brillcodec_push_writequeue(MaruBrillCodecState *s, void* opaque,
+ size_t data_size, int ctx_id,
+ void (*get_data)(void *, void *, size_t))
+{
+ DeviceMemEntry *elem = NULL;
+ elem = g_malloc0(sizeof(DeviceMemEntry));
+ elem->opaque = opaque;
+ elem->data_size = data_size;
+ elem->ctx_id = ctx_id;
-// default video decode data handler
-static void extract(void *dst, void *src, size_t size, enum AVPixelFormat pix_fmt) {
- AVFrame *frame = (AVFrame *)src;
- avpicture_layout((AVPicture *)src, pix_fmt, frame->width, frame->height, dst, size);
+ if (get_data) {
+ elem->get_data = get_data;
+ } else {
+ elem->get_data = default_get_data;
+ }
+
+ qemu_mutex_lock(&s->context_queue_mutex);
+ QTAILQ_INSERT_TAIL(&codec_wq, elem, node);
+ qemu_mutex_unlock(&s->context_queue_mutex);
}
-static void release(void *buf) {}
+void brillcodec_pop_writequeue(MaruBrillCodecState *s, uint32_t ctx_idx)
+{
+ DeviceMemEntry *elem = NULL;
+ uint32_t mem_offset = 0;
+
+ TRACE("enter: %s\n", __func__);
-static DataHandler default_video_decode_data_handler = {
- .get_data = extract,
- .release = release,
-};
+ if (ctx_idx < 1 || ctx_idx > (CODEC_CONTEXT_MAX - 1)) {
+ ERR("invalid buffer index. %d\n", ctx_idx);
+ return;
+ }
+
+ TRACE("pop_writeqeue. context index: %d\n", ctx_idx);
+ elem = entry[ctx_idx];
+ if (elem) {
+ mem_offset = s->ioparam.mem_offset;
+
+ // check corrupted mem_offset
+ if (mem_offset < CODEC_MEM_SIZE) {
+ elem->get_data(s->vaddr + mem_offset, elem->opaque, elem->data_size);
+ } else {
+ TRACE("mem_offset is corrupted!!\n");
+ }
+
+ TRACE("pop_writequeue. release elem: %p\n", elem);
+ g_free(elem);
+
+ entry[ctx_idx] = NULL;
+ } else {
+ TRACE("there is no buffer to copy data to guest\n");
+ }
+
+ TRACE("leave: %s\n", __func__);
+}
+// threads
static void maru_brill_codec_thread_exit(MaruBrillCodecState *s)
{
int index;
TRACE("leave: %s\n", __func__);
}
-void maru_brill_codec_wakeup_threads(MaruBrillCodecState *s, int api_index)
+void brillcodec_wakeup_threads(MaruBrillCodecState *s, int api_index)
{
CodecParam *ioparam = NULL;
ioparam = g_malloc0(sizeof(CodecParam));
- if (!ioparam) {
- ERR("failed to allocate ioparam\n");
- return;
- }
memcpy(ioparam, &s->ioparam, sizeof(CodecParam));
qemu_mutex_lock(&s->context_mutex);
- if (ioparam->api_index != CODEC_INIT) {
- if (!CONTEXT(s, ioparam->ctx_index).opened_context) {
+ if (ioparam->api_index != INIT) {
+ if (!CONTEXT(s, ioparam->ctx_index)->opened_context) {
INFO("abandon api %d for context %d\n",
ioparam->api_index, ioparam->ctx_index);
qemu_mutex_unlock(&s->context_mutex);
qemu_mutex_unlock(&s->context_mutex);
- maru_brill_codec_push_readqueue(s, ioparam);
+ brillcodec_push_readqueue(s, ioparam);
qemu_mutex_lock(&s->context_mutex);
// W/A for threads starvation.
TRACE("after sending conditional signal\n");
}
-void *maru_brill_codec_threads(void *opaque)
+void *brillcodec_threads(void *opaque)
{
MaruBrillCodecState *s = (MaruBrillCodecState *)opaque;
bool ret = false;
--(s->idle_thread_cnt); // protected under mutex.
qemu_mutex_unlock(&s->context_mutex);
- qemu_mutex_lock(&s->ioparam_queue_mutex);
- elem = QTAILQ_FIRST(&codec_rq);
- if (elem) {
- QTAILQ_REMOVE(&codec_rq, elem, node);
- qemu_mutex_unlock(&s->ioparam_queue_mutex);
- } else {
- qemu_mutex_unlock(&s->ioparam_queue_mutex);
- continue;
- }
-
- if (!elem->param_buf) {
+ elem = brillcodec_pop_readqueue(s);
+ if (!elem || !elem->param_buf) {
continue;
}
TRACE("api_id: %d ctx_id: %d\n", api_id, ctx_id);
qemu_mutex_lock(&s->context_mutex);
- CONTEXT(s, ctx_id).occupied_thread = true;
+ CONTEXT(s, ctx_id)->occupied_thread = true;
qemu_mutex_unlock(&s->context_mutex);
ret = codec_func_handler[api_id](s, ctx_id, indata_buf);
elem->param_buf = NULL;
if (elem->data_buf) {
- if (elem->data_buf->opaque) {
+ if (elem->data_buf->opaque &&
+ !(s->memory_monopolizing & (1 << api_id))) {
TRACE("release inbuf\n");
g_free(elem->data_buf->opaque);
elem->data_buf->opaque = NULL;
g_free(elem);
qemu_mutex_lock(&s->context_mutex);
- if (CONTEXT(s, ctx_id).requested_close) {
+ if (CONTEXT(s, ctx_id)->requested_close) {
INFO("make worker thread to handle deinit\n");
// codec_deinit(s, ctx_id, NULL);
- maru_brill_codec_release_context(s, ctx_id);
- CONTEXT(s, ctx_id).requested_close = false;
+ brillcodec_release_context(s, ctx_id);
+ CONTEXT(s, ctx_id)->requested_close = false;
}
qemu_mutex_unlock(&s->context_mutex);
qemu_bh_schedule(s->codec_bh);
qemu_mutex_lock(&s->context_mutex);
- CONTEXT(s, ctx_id).occupied_thread = false;
+ CONTEXT(s, ctx_id)->occupied_thread = false;
qemu_mutex_unlock(&s->context_mutex);
}
return NULL;
}
-// queue
-static void maru_brill_codec_push_readqueue(MaruBrillCodecState *s,
- CodecParam *ioparam)
+//
+// DEVICE FUNCTIONS
+//
+
+void brillcodec_release_context(MaruBrillCodecState *s, int32_t ctx_id)
{
- CodecDataStg *elem = NULL;
- DeviceMemEntry *data_buf = NULL;
+ DeviceMemEntry *wq_elem = NULL, *wnext = NULL;
+ CodecDataStg *rq_elem = NULL, *rnext = NULL;
- elem = g_malloc0(sizeof(CodecDataStg));
- if (!elem) {
- ERR("failed to allocate ioparam_queue. %d\n", sizeof(CodecDataStg));
- return;
+ TRACE("enter: %s\n", __func__);
+
+ TRACE("release %d of context\n", ctx_id);
+
+ qemu_mutex_lock(&s->threadpool.mutex);
+ if (CONTEXT(s, ctx_id)->opened_context) {
+ // qemu_mutex_unlock(&s->threadpool.mutex);
+ deinit(s, ctx_id, NULL);
+ // qemu_mutex_lock(&s->threadpool.mutex);
}
+ CONTEXT(s, ctx_id)->occupied_context = false;
+ qemu_mutex_unlock(&s->threadpool.mutex);
- elem->param_buf = ioparam;
+ // TODO: check if foreach statment needs lock or not.
+ QTAILQ_FOREACH_SAFE(rq_elem, &codec_rq, node, rnext) {
+ if (rq_elem && rq_elem->data_buf &&
+ (rq_elem->data_buf->ctx_id == ctx_id)) {
- switch(ioparam->api_index) {
- case CODEC_INIT ... CODEC_ENCODE_AUDIO:
- data_buf = maru_brill_codec_store_inbuf((uint8_t *)s->vaddr, ioparam);
- break;
- default:
- TRACE("no buffer from guest\n");
- break;
+ TRACE("remove unused node from codec_rq. ctx_id: %d\n", ctx_id);
+ qemu_mutex_lock(&s->context_queue_mutex);
+ QTAILQ_REMOVE(&codec_rq, rq_elem, node);
+ qemu_mutex_unlock(&s->context_queue_mutex);
+ if (rq_elem && rq_elem->data_buf) {
+ TRACE("release rq_buffer: %p\n", rq_elem->data_buf);
+ g_free(rq_elem->data_buf);
+ }
+
+ TRACE("release rq_elem: %p\n", rq_elem);
+ g_free(rq_elem);
+ } else {
+ TRACE("no elem of %d context in the codec_rq.\n", ctx_id);
+ }
}
- elem->data_buf = data_buf;
+ QTAILQ_FOREACH_SAFE(wq_elem, &codec_wq, node, wnext) {
+ if (wq_elem && wq_elem->ctx_id == ctx_id) {
+ TRACE("remove unused node from codec_wq. ctx_id: %d\n", ctx_id);
+ qemu_mutex_lock(&s->context_queue_mutex);
+ QTAILQ_REMOVE(&codec_wq, wq_elem, node);
+ qemu_mutex_unlock(&s->context_queue_mutex);
- qemu_mutex_lock(&s->ioparam_queue_mutex);
- QTAILQ_INSERT_TAIL(&codec_rq, elem, node);
- qemu_mutex_unlock(&s->ioparam_queue_mutex);
+ if (wq_elem && wq_elem->opaque) {
+ TRACE("release wq_buffer: %p\n", wq_elem->opaque);
+ g_free(wq_elem->opaque);
+ wq_elem->opaque = NULL;
+ }
+
+ TRACE("release wq_elem: %p\n", wq_elem);
+ g_free(wq_elem);
+ } else {
+ TRACE("no elem of %d context in the codec_wq.\n", ctx_id);
+ }
+ }
+
+ TRACE("leave: %s\n", __func__);
}
-static void *maru_brill_codec_store_inbuf(uint8_t *mem_base,
- CodecParam *ioparam)
+struct codec_element {
+ int32_t codec_type;
+ int32_t media_type;
+ gchar name[32];
+ gchar long_name[64];
+ union {
+ int32_t pix_fmts[4];
+ int32_t sample_fmts[4];
+ };
+} __attribute__((packed));
+
+int brillcodec_query_list (MaruBrillCodecState *s)
{
- DeviceMemEntry *elem = NULL;
- int readbuf_size, size = 0;
- uint8_t *readbuf = NULL;
- uint8_t *device_mem = mem_base + ioparam->mem_offset;
+ AVCodec *codec = NULL;
- elem = g_malloc0(sizeof(DeviceMemEntry));
- if (!elem) {
- ERR("failed to allocate readqueue node. size: %d\n",
- sizeof(DeviceMemEntry));
- return NULL;
+ /* register avcodec */
+ TRACE("register avcodec\n");
+ av_register_all();
+
+ codec = av_codec_next(NULL);
+ if (!codec) {
+ ERR("failed to get codec info.\n");
+ return -1;
}
- memcpy(&readbuf_size, device_mem, sizeof(readbuf_size));
- size = sizeof(readbuf_size);
+ // a region to store the number of codecs.
+ struct codec_element *element = (struct codec_element *)(s->vaddr + sizeof(uint32_t));
+ while (codec) {
+ int32_t codec_fmts[4], i;
- TRACE("readbuf size: %d\n", readbuf_size);
- if (readbuf_size <= 0) {
- TRACE("inbuf size is 0. api_id %d, ctx_id %d, mem_offset %x\n",
- ioparam->api_index, ioparam->ctx_index, ioparam->mem_offset);
- } else {
- readbuf = g_malloc0(readbuf_size);
- if (!readbuf) {
- ERR("failed to allocate a read buffer. size: %d\n", readbuf_size);
+ memset(codec_fmts, -1, sizeof(codec_fmts));
+ if (codec->type == AVMEDIA_TYPE_VIDEO) {
+ if (codec->pix_fmts) {
+ for (i = 0; codec->pix_fmts[i] != -1 && i < 4; i++) {
+ codec_fmts[i] = codec->pix_fmts[i];
+ }
+ }
+ } else if (codec->type == AVMEDIA_TYPE_AUDIO) {
+ if (codec->sample_fmts) {
+ for (i = 0; codec->sample_fmts[i] != -1 && i < 4; i++) {
+ codec_fmts[i] = codec->sample_fmts[i];
+ }
+ }
} else {
- TRACE("copy input buffer from guest. ctx_id: %d, mem_offset: %x\n",
- ioparam->ctx_index, ioparam->mem_offset);
- memcpy(readbuf, device_mem + size, readbuf_size);
+ ERR("unknown media type: %d\n", codec->type);
}
- }
- // memset(device_mem, 0x00, sizeof(readbuf_size));
-
- elem->opaque = readbuf;
- elem->data_size = readbuf_size;
- elem->ctx_id = ioparam->ctx_index;
- return elem;
-}
+ memset(element, 0x00, sizeof(struct codec_element));
+ element->codec_type = codec->decode ? CODEC_TYPE_DECODE : CODEC_TYPE_ENCODE;
+ element->media_type = codec->type;
+ g_strlcpy(element->name, codec->name, sizeof(element->name));
+ g_strlcpy(element->long_name, codec->long_name, sizeof(element->long_name));
+ memcpy(element->pix_fmts, codec_fmts, sizeof(codec_fmts));
-static void maru_brill_codec_push_writequeue(MaruBrillCodecState *s, void* opaque,
- size_t data_size, int ctx_id,
- DataHandler *handler)
-{
- DeviceMemEntry *elem = NULL;
- elem = g_malloc0(sizeof(DeviceMemEntry));
+ TRACE("register %s %s\n", codec->name, codec->decode ? "decoder" : "encoder");
- elem->opaque = opaque;
- elem->data_size = data_size;
- elem->ctx_id = ctx_id;
+ ++element;
- if (handler) {
- elem->handler = handler;
- } else {
- elem->handler = &default_data_handler;
+ codec = av_codec_next(codec);
}
+ uint32_t size = (intptr_t)element - ((intptr_t)s->vaddr + sizeof(uint32_t));
+ memcpy(s->vaddr, &size, sizeof(uint32_t));
- qemu_mutex_lock(&s->context_queue_mutex);
- QTAILQ_INSERT_TAIL(&codec_wq, elem, node);
- qemu_mutex_unlock(&s->context_queue_mutex);
+ return 0;
}
-void maru_brill_codec_pop_writequeue(MaruBrillCodecState *s, uint32_t ctx_idx)
+int brillcodec_get_context_index(MaruBrillCodecState *s)
{
- DeviceMemEntry *elem = NULL;
- uint32_t mem_offset = 0;
+ int ctx_id;
TRACE("enter: %s\n", __func__);
- if (ctx_idx < 1 || ctx_idx > (CODEC_CONTEXT_MAX - 1)) {
- ERR("invalid buffer index. %d\n", ctx_idx);
- return;
- }
-
- TRACE("pop_writeqeue. context index: %d\n", ctx_idx);
- elem = entry[ctx_idx];
- if (elem) {
- mem_offset = s->ioparam.mem_offset;
-
- // check corrupted mem_offset
- if (mem_offset < CODEC_MEM_SIZE) {
- elem->handler->get_data(s->vaddr + mem_offset, elem->opaque, elem->data_size, s->context[ctx_idx].avctx->pix_fmt);
- elem->handler->release(elem->opaque);
- } else {
- TRACE("mem_offset is corrupted!!\n");
+ // requires mutex_lock? its function is protected by critical section.
+ qemu_mutex_lock(&s->threadpool.mutex);
+ for (ctx_id = 1; ctx_id < CODEC_CONTEXT_MAX; ctx_id++) {
+ if (CONTEXT(s, ctx_id)->occupied_context == false) {
+ TRACE("get %d of codec context successfully.\n", ctx_id);
+ CONTEXT(s, ctx_id)->occupied_context = true;
+ break;
}
+ }
+ qemu_mutex_unlock(&s->threadpool.mutex);
- TRACE("pop_writequeue. release elem: %p\n", elem);
- g_free(elem);
-
- entry[ctx_idx] = NULL;
- } else {
- TRACE("there is no buffer to copy data to guest\n");
+ if (ctx_id == CODEC_CONTEXT_MAX) {
+ ERR("failed to get available codec context. ");
+ ERR("try to run codec again.\n");
+ ctx_id = -1;
}
TRACE("leave: %s\n", __func__);
+
+ return ctx_id;
}
-static void serialize_video_data(const struct video_data *video,
- AVCodecContext *avctx)
+//
+// CODEC FUNCTIONS
+// FLUSH BUFFERS
+//
+
+static bool flush_buffers(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
- if (video->width) {
- avctx->width = video->width;
- }
- if (video->height) {
- avctx->height = video->height;
- }
- if (video->fps_n) {
- avctx->time_base.num = video->fps_n;
- }
- if (video->fps_d) {
- avctx->time_base.den = video->fps_d;
- }
- if (video->pix_fmt > PIX_FMT_NONE) {
- avctx->pix_fmt = video->pix_fmt;
- }
- if (video->par_n) {
- avctx->sample_aspect_ratio.num = video->par_n;
- }
- if (video->par_d) {
- avctx->sample_aspect_ratio.den = video->par_d;
- }
- if (video->bpp) {
- avctx->bits_per_coded_sample = video->bpp;
- }
- if (video->ticks_per_frame) {
- avctx->ticks_per_frame = video->ticks_per_frame;
+ AVCodecContext *avctx = NULL;
+ bool ret = true;
+
+ TRACE("enter: %s\n", __func__);
+
+ avctx = CONTEXT(s, ctx_id)->avctx;
+ if (!avctx) {
+ ERR("%d of AVCodecContext is NULL.\n", ctx_id);
+ ret = false;
+ } else if (!avctx->codec) {
+ ERR("%d of AVCodec is NULL.\n", ctx_id);
+ ret = false;
+ } else {
+ TRACE("flush %d context of buffers.\n", ctx_id);
+ AVCodecParserContext *pctx = NULL;
+ uint8_t *poutbuf = NULL;
+ int poutbuf_size = 0;
+ int res = 0;
+
+ uint8_t p_inbuf[FF_INPUT_BUFFER_PADDING_SIZE];
+ int p_inbuf_size = FF_INPUT_BUFFER_PADDING_SIZE;
+
+ memset(&p_inbuf, 0x00, p_inbuf_size);
+
+ pctx = CONTEXT(s, ctx_id)->parser_ctx;
+ if (pctx) {
+ res = av_parser_parse2(pctx, avctx, &poutbuf, &poutbuf_size,
+ p_inbuf, p_inbuf_size, -1, -1, -1);
+ INFO("before flush buffers, using parser. res: %d\n", res);
+ }
+
+ avcodec_flush_buffers(avctx);
}
- INFO("codec_init. video, resolution: %dx%d, framerate: %d/%d "
- "pixel_fmt: %d sample_aspect_ratio: %d/%d bpp %d\n",
- avctx->width, avctx->height, avctx->time_base.num,
- avctx->time_base.den, avctx->pix_fmt, avctx->sample_aspect_ratio.num,
- avctx->sample_aspect_ratio.den, avctx->bits_per_coded_sample);
+ brillcodec_push_writequeue(s, NULL, 0, ctx_id, NULL);
+
+ TRACE("leave: %s\n", __func__);
+
+ return ret;
}
-static void deserialize_video_data (const AVCodecContext *avctx,
+//
+// CODEC FUNCTIONS
+// VIDEO DECODE / ENCODE
+//
+
+struct video_data {
+ int32_t width;
+ int32_t height;
+ int32_t fps_n;
+ int32_t fps_d;
+ int32_t par_n;
+ int32_t par_d;
+ int32_t pix_fmt;
+ int32_t bpp;
+ int32_t ticks_per_frame;
+} __attribute__((packed));
+
+struct video_decode_input {
+ int32_t inbuf_size;
+ int32_t idx;
+ int64_t in_offset;
+ uint8_t inbuf; // for pointing inbuf address
+} __attribute__((packed));
+
+struct video_decode_output {
+ int32_t len;
+ int32_t got_picture;
+ uint8_t data; // for pointing data address
+} __attribute__((packed));
+
+struct video_encode_input {
+ int32_t inbuf_size;
+ int64_t in_timestamp;
+ uint8_t inbuf; // for pointing inbuf address
+} __attribute__((packed));
+
+struct video_encode_output {
+ int32_t len;
+ int32_t coded_frame;
+ int32_t key_frame;
+ uint8_t data; // for pointing data address
+} __attribute__((packed));
+
+static void fill_video_data(const AVCodecContext *avctx,
struct video_data *video)
{
memset(video, 0x00, sizeof(struct video_data));
video->ticks_per_frame = avctx->ticks_per_frame;
}
-static void serialize_audio_data (const struct audio_data *audio,
- AVCodecContext *avctx)
+static void default_get_picture(void *dst, void *src, enum AVPixelFormat pix_fmt)
{
- if (audio->channels) {
- avctx->channels = audio->channels;
+ AVFrame *frame = (AVFrame *)src;
+ int pict_size = avpicture_get_size(pix_fmt, frame->width, frame->height);
+ if (pict_size < 0) {
+ // cannot enter here...
+ ERR("Invalid picture size\n");
+ return;
}
- if (audio->sample_rate) {
- avctx->sample_rate = audio->sample_rate;
+ avpicture_layout((AVPicture *)frame, pix_fmt,
+ frame->width, frame->height, dst, pict_size);
+}
+
+// video decode data handler
+// FIXME: ignore "size" now...
+static void copy_video_decode_data(void *dst, void *opaque, size_t dummy)
+{
+ DataContainer *dc = (DataContainer *)opaque;
+ CodecContext *context = (CodecContext *)dc->avctx->opaque;
+ CodecPlugin *plugin = context->state->hwaccel_plugin;
+
+ if (dc->picture_buffer_offset) {
+ // if output video data is exist...
+ struct video_decode_output *decode_output =
+ (struct video_decode_output *)dst;
+ struct video_data *data = (struct video_data *)&decode_output->data;
+
+ decode_output->len = dc->len;
+ decode_output->got_picture = dc->is_got ? 1 : 0;
+ fill_video_data(dc->avctx, data);
+
+ if (context->is_hwaccel && data->pix_fmt == plugin->pix_fmt) {
+ data->pix_fmt = plugin->output_pix_fmt;
+ }
}
- if (audio->block_align) {
- avctx->block_align = audio->block_align;
+
+ if (dc->frame) {
+ // if picture is exist...
+ if (context->is_hwaccel) {
+ plugin->get_picture(dst + dc->picture_buffer_offset, dc->frame);
+ } else {
+ default_get_picture(dst + dc->picture_buffer_offset, dc->frame, dc->avctx->pix_fmt);
+ }
}
- if (audio->sample_fmt > AV_SAMPLE_FMT_NONE) {
- avctx->sample_fmt = audio->sample_fmt;
+ g_free(dc);
+}
+
+static void copy_video_encode_data(void *dst, void *opaque, size_t dummy)
+{
+ DataContainer *dc = (DataContainer *)opaque;
+ struct video_encode_output *encode_output = (struct video_encode_output *)dst;
+
+ encode_output->len = dc->avpkt->size;
+ if (dc->avpkt->size && dc->is_got) {
+ // inform gstreamer plugin about the status of encoded frames
+ // A flag for output buffer in gstreamer is depending on the status.
+ if (dc->avctx->coded_frame) {
+ encode_output->coded_frame = 1;
+ // if key_frame is 0, this frame cannot be decoded independently.
+ encode_output->key_frame = dc->avctx->coded_frame->key_frame;
+ }
+
+ memcpy(&encode_output->data, dc->avpkt->data, dc->avpkt->size);
}
- INFO("codec_init. audio, channel %d sample_rate %d sample_fmt %d ch_layout %lld\n",
- avctx->channels, avctx->sample_rate, avctx->sample_fmt, avctx->channel_layout);
+ g_free(dc->avpkt->data);
+ g_free(dc->avpkt);
+ g_free(dc);
}
-void maru_brill_codec_release_context(MaruBrillCodecState *s, int32_t ctx_id)
+static uint32_t parse_and_decode_video(AVCodecContext *avctx, AVFrame *picture,
+ AVCodecParserContext *pctx, int ctx_id,
+ AVPacket *packet, uint32_t *got_picture,
+ int idx, int64_t in_offset)
{
- DeviceMemEntry *wq_elem = NULL, *wnext = NULL;
- CodecDataStg *rq_elem = NULL, *rnext = NULL;
+ uint8_t *parser_outbuf = NULL;
+ int parser_outbuf_size = 0;
+ uint8_t *parser_buf = packet->data;
+ int parser_buf_size = packet->size;
+ int ret = 0, len = -1;
+ int64_t pts = 0, dts = 0, pos = 0;
- TRACE("enter: %s\n", __func__);
+ pts = dts = idx;
+ pos = in_offset;
- TRACE("release %d of context\n", ctx_id);
+ do {
+ if (pctx) {
+ ret = av_parser_parse2(pctx, avctx, &parser_outbuf,
+ &parser_outbuf_size, parser_buf, parser_buf_size,
+ pts, dts, pos);
- qemu_mutex_lock(&s->threadpool.mutex);
- if (CONTEXT(s, ctx_id).opened_context) {
- // qemu_mutex_unlock(&s->threadpool.mutex);
- codec_deinit(s, ctx_id, NULL);
- // qemu_mutex_lock(&s->threadpool.mutex);
- }
- CONTEXT(s, ctx_id).occupied_context = false;
- qemu_mutex_unlock(&s->threadpool.mutex);
+ if (ret) {
+ parser_buf_size -= ret;
+ parser_buf += ret;
+ }
- // TODO: check if foreach statment needs lock or not.
- QTAILQ_FOREACH_SAFE(rq_elem, &codec_rq, node, rnext) {
- if (rq_elem && rq_elem->data_buf &&
- (rq_elem->data_buf->ctx_id == ctx_id)) {
+ TRACE("after parsing ret: %d parser_outbuf_size %d parser_buf_size %d pts %lld\n",
+ ret, parser_outbuf_size, parser_buf_size, pctx->pts);
- TRACE("remove unused node from codec_rq. ctx_id: %d\n", ctx_id);
- qemu_mutex_lock(&s->context_queue_mutex);
- QTAILQ_REMOVE(&codec_rq, rq_elem, node);
- qemu_mutex_unlock(&s->context_queue_mutex);
- if (rq_elem && rq_elem->data_buf) {
- TRACE("release rq_buffer: %p\n", rq_elem->data_buf);
- g_free(rq_elem->data_buf);
+ /* if there is no output, we must break and wait for more data.
+ * also the timestamp in the context is not updated.
+ */
+ if (parser_outbuf_size == 0) {
+ if (parser_buf_size > 0) {
+ TRACE("parsing data have been left\n");
+ continue;
+ } else {
+ TRACE("finish parsing data\n");
+ break;
+ }
}
- TRACE("release rq_elem: %p\n", rq_elem);
- g_free(rq_elem);
+ packet->data = parser_outbuf;
+ packet->size = parser_outbuf_size;
} else {
- TRACE("no elem of %d context in the codec_rq.\n", ctx_id);
+ TRACE("not using parser %s\n", avctx->codec->name);
}
- }
- QTAILQ_FOREACH_SAFE(wq_elem, &codec_wq, node, wnext) {
- if (wq_elem && wq_elem->ctx_id == ctx_id) {
- TRACE("remove unused node from codec_wq. ctx_id: %d\n", ctx_id);
- qemu_mutex_lock(&s->context_queue_mutex);
- QTAILQ_REMOVE(&codec_wq, wq_elem, node);
- qemu_mutex_unlock(&s->context_queue_mutex);
+ len = avcodec_decode_video2(avctx, picture, (int *)got_picture, packet);
+ TRACE("decode_video. len %d, got_picture %d\n", len, *got_picture);
- if (wq_elem && wq_elem->opaque) {
- TRACE("release wq_buffer: %p\n", wq_elem->opaque);
- g_free(wq_elem->opaque);
- wq_elem->opaque = NULL;
+ if (!pctx) {
+ if (len == 0 && (*got_picture) == 0) {
+ ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
+ break;
+ } else if (len < 0) {
+ ERR("decoding video error! ctx_id %d len %d\n", ctx_id, len);
+ break;
}
-
- TRACE("release wq_elem: %p\n", wq_elem);
- g_free(wq_elem);
+ parser_buf_size -= len;
+ parser_buf += len;
} else {
- TRACE("no elem of %d context in the codec_wq.\n", ctx_id);
+ if (len == 0) {
+ ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
+ *got_picture = 0;
+ break;
+ } else if (len < 0) {
+ ERR("decoding video error! trying next ctx_id %d len %d\n", ctx_id, len);
+ break;
+ }
}
- }
+ } while (parser_buf_size > 0);
- TRACE("leave: %s\n", __func__);
+ return len;
}
-int maru_brill_codec_query_list (MaruBrillCodecState *s)
+static bool decode_video_common(MaruBrillCodecState *s, int ctx_id,
+ void *data_buf, bool copy_picture)
{
- AVCodec *codec = NULL;
- uint32_t size = 0, mem_size = 0;
- uint32_t data_len = 0, length = 0;
- int32_t codec_type, media_type;
- int32_t codec_fmts[4], i;
-
- /* register avcodec */
- TRACE("register avcodec\n");
- av_register_all();
-
- codec = av_codec_next(NULL);
- if (!codec) {
- ERR("failed to get codec info.\n");
- return -1;
- }
-
- // a region to store the number of codecs.
- length = 32 + 64 + 6 * sizeof(int32_t);
- mem_size = size = sizeof(uint32_t);
+ AVCodecContext *avctx = NULL;
+ AVFrame *frame = NULL;
+ AVCodecParserContext *pctx = NULL;
+ AVPacket avpkt;
- while (codec) {
- codec_type =
- codec->decode ? CODEC_TYPE_DECODE : CODEC_TYPE_ENCODE;
- media_type = codec->type;
+ DeviceMemEntry *elem = NULL;
+ struct video_decode_input empty_input = { 0, };
+ struct video_decode_input *decode_input = &empty_input;
+ uint32_t got_picture = 0;
+ int32_t len = -1;
- memset(codec_fmts, -1, sizeof(codec_fmts));
- if (media_type == AVMEDIA_TYPE_VIDEO) {
- if (codec->pix_fmts) {
- for (i = 0; codec->pix_fmts[i] != -1 && i < 4; i++) {
- codec_fmts[i] = codec->pix_fmts[i];
- }
- }
- } else if (media_type == AVMEDIA_TYPE_AUDIO) {
- if (codec->sample_fmts) {
- for (i = 0; codec->sample_fmts[i] != -1; i++) {
- codec_fmts[i] = codec->sample_fmts[i];
- }
- }
- } else {
- ERR("unknown media type: %d\n", media_type);
- }
+ TRACE("enter: %s\n", __func__);
- memset(s->vaddr + mem_size, 0x00, length);
- mem_size += length;
+ elem = (DeviceMemEntry *)data_buf;
+ if (!elem || !elem->opaque) {
+ TRACE("decode_video. no input buffer\n");
+ } else {
+ decode_input = elem->opaque;
+ }
- data_len += length;
- memcpy(s->vaddr, &data_len, sizeof(data_len));
+ av_init_packet(&avpkt);
+ avpkt.data = &decode_input->inbuf;
+ avpkt.size = decode_input->inbuf_size;
- memcpy(s->vaddr + size, &codec_type, sizeof(codec_type));
- size += sizeof(codec_type);
- memcpy(s->vaddr + size, &media_type, sizeof(media_type));
- size += sizeof(media_type);
- memcpy(s->vaddr + size, codec->name, strlen(codec->name));
- size += 32;
- memcpy(s->vaddr + size,
- codec->long_name, strlen(codec->long_name));
- size += 64;
- memcpy(s->vaddr + size, codec_fmts, sizeof(codec_fmts));
- size += sizeof(codec_fmts);
+ avctx = CONTEXT(s, ctx_id)->avctx;
+ frame = CONTEXT(s, ctx_id)->frame;
+ pctx = CONTEXT(s, ctx_id)->parser_ctx;
- TRACE("register %s %s\n", codec->name, codec->decode ? "decoder" : "encoder");
- codec = av_codec_next(codec);
+ if(!avctx || !avctx->codec || !frame) {
+ ERR("critical error !!!\n");
+ assert(0);
}
- return 0;
-}
+ TRACE("decode_video. bitrate %d resolution(%dx%d)\n",
+ avctx->bit_rate, avctx->width, avctx->height);
-int maru_brill_codec_get_context_index(MaruBrillCodecState *s)
-{
- int ctx_id;
+ len = parse_and_decode_video(avctx, frame, pctx, ctx_id,
+ &avpkt, &got_picture, decode_input->idx, decode_input->in_offset);
- TRACE("enter: %s\n", __func__);
-
- // requires mutex_lock? its function is protected by critical section.
- qemu_mutex_lock(&s->threadpool.mutex);
- for (ctx_id = 1; ctx_id < CODEC_CONTEXT_MAX; ctx_id++) {
- if (CONTEXT(s, ctx_id).occupied_context == false) {
- TRACE("get %d of codec context successfully.\n", ctx_id);
- CONTEXT(s, ctx_id).occupied_context = true;
- break;
- }
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
+ dc->picture_buffer_offset = OFFSET_PICTURE_BUFFER; // we have output video data
+ dc->len = len;
+ dc->is_got = got_picture;
+ dc->avctx = avctx;
+ if (got_picture && copy_picture) { // we have output picture
+ dc->frame = frame;
}
- qemu_mutex_unlock(&s->threadpool.mutex);
- if (ctx_id == CODEC_CONTEXT_MAX) {
- ERR("failed to get available codec context. ");
- ERR("try to run codec again.\n");
- ctx_id = -1;
- }
+ brillcodec_push_writequeue(s, dc, 0, ctx_id, copy_video_decode_data);
TRACE("leave: %s\n", __func__);
- return ctx_id;
+ return true;
+}
+
+static bool decode_video(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+{
+ return decode_video_common(s, ctx_id, data_buf, false);
}
+static bool decode_video_and_picture_copy(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+{
+ return decode_video_common(s, ctx_id, data_buf, true);
+}
-// allocate avcontext and avframe struct.
-static AVCodecContext *maru_brill_codec_alloc_context(MaruBrillCodecState *s, int ctx_id)
+static bool picture_copy(MaruBrillCodecState *s, int ctx_id, void *elem)
{
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
+
TRACE("enter: %s\n", __func__);
- TRACE("allocate %d of context and frame.\n", ctx_id);
- CONTEXT(s, ctx_id).avctx = avcodec_alloc_context3(NULL);
- CONTEXT(s, ctx_id).frame = avcodec_alloc_frame();
- CONTEXT(s, ctx_id).opened_context = false;
+ TRACE("copy decoded image of %d context.\n", ctx_id);
+
+ dc->avctx = CONTEXT(s, ctx_id)->avctx;
+ dc->frame = CONTEXT(s, ctx_id)->frame;
+
+ brillcodec_push_writequeue(s, dc, 0, ctx_id, copy_video_decode_data);
TRACE("leave: %s\n", __func__);
- return CONTEXT(s, ctx_id).avctx;
+ return true;
}
-static AVCodec *maru_brill_codec_find_avcodec(uint8_t *mem_buf)
+static bool encode_video(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
- AVCodec *codec = NULL;
- int32_t encode, size = 0;
- char codec_name[32] = {0, };
+ AVCodecContext *avctx = NULL;
+ AVFrame *pict = NULL;
+ AVPacket *avpkt = g_malloc0(sizeof(AVPacket));
+ uint8_t *inbuf = NULL, *outbuf = NULL;
+ int outbuf_size = 0;
+ int got_frame = 0, ret = 0;
- memcpy(&encode, mem_buf, sizeof(encode));
- size = sizeof(encode);
- memcpy(codec_name, mem_buf + size, sizeof(codec_name));
- size += sizeof(codec_name);
+ DeviceMemEntry *elem = NULL;
+ struct video_encode_input empty_input = { 0, };
+ struct video_encode_input *encode_input = &empty_input;
- TRACE("type: %d, name: %s\n", encode, codec_name);
+ TRACE("enter: %s\n", __func__);
- if (encode) {
- codec = avcodec_find_encoder_by_name (codec_name);
+ elem = (DeviceMemEntry *)data_buf;
+ if (!elem || !elem->opaque) {
+ TRACE("encode_video. no input buffer\n");
} else {
- codec = avcodec_find_decoder_by_name (codec_name);
+ encode_input = elem->opaque;
}
- INFO("%s!! find %s %s\n", codec ? "success" : "failure",
- codec_name, encode ? "encoder" : "decoder");
-
- return codec;
-}
-
-static void read_codec_init_data(AVCodecContext *avctx, uint8_t *mem_buf)
-{
- struct video_data video = { 0, };
- struct audio_data audio = { 0, };
- int bitrate = 0, size = 0;
- memcpy(&video, mem_buf + size, sizeof(video));
- size = sizeof(video);
- serialize_video_data(&video, avctx);
+ // initialize AVPacket
+ av_init_packet(avpkt);
- memcpy(&audio, mem_buf + size, sizeof(audio));
- size += sizeof(audio);
- serialize_audio_data(&audio, avctx);
+ avctx = CONTEXT(s, ctx_id)->avctx;
+ pict = CONTEXT(s, ctx_id)->frame;
- memcpy(&bitrate, mem_buf + size, sizeof(bitrate));
- size += sizeof(bitrate);
- if (bitrate) {
- avctx->bit_rate = bitrate;
+ if(!avctx || !avctx->codec) {
+ ERR("critical error !!!\n");
+ assert(0);
}
- memcpy(&avctx->codec_tag, mem_buf + size, sizeof(avctx->codec_tag));
- size += sizeof(avctx->codec_tag);
- memcpy(&avctx->extradata_size,
- mem_buf + size, sizeof(avctx->extradata_size));
- size += sizeof(avctx->extradata_size);
- INFO("extradata size: %d.\n", avctx->extradata_size);
+ TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
+ avctx->pix_fmt, inbuf, pict->data[0]);
- if (avctx->extradata_size > 0) {
- avctx->extradata =
- av_mallocz(ROUND_UP_X(avctx->extradata_size +
- FF_INPUT_BUFFER_PADDING_SIZE, 4));
- if (avctx->extradata) {
- memcpy(avctx->extradata, mem_buf + size, avctx->extradata_size);
- }
+ ret = avpicture_fill((AVPicture *)pict, &encode_input->inbuf, avctx->pix_fmt,
+ avctx->width, avctx->height);
+ if (ret < 0) {
+ ERR("after avpicture_fill, ret:%d\n", ret);
} else {
- TRACE("no extra data.\n");
- avctx->extradata =
- av_mallocz(ROUND_UP_X(FF_INPUT_BUFFER_PADDING_SIZE, 4));
- }
-}
+ if (avctx->time_base.num == 0) {
+ pict->pts = AV_NOPTS_VALUE;
+ } else {
+ AVRational bq =
+ {1, (G_USEC_PER_SEC * G_GINT64_CONSTANT(1000))};
+ pict->pts = av_rescale_q(encode_input->in_timestamp, bq, avctx->time_base);
+ }
+ TRACE("encode video. ticks_per_frame:%d, pts:%lld\n",
+ avctx->ticks_per_frame, pict->pts);
-// write the result of codec_init
-static int write_codec_init_data(AVCodecContext *avctx, uint8_t *mem_buf)
-{
- int size = 0;
+ outbuf_size =
+ (avctx->width * avctx->height * 6) + FF_MIN_BUFFER_SIZE;
- if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
- int osize = av_get_bytes_per_sample(avctx->sample_fmt);
+ outbuf = g_malloc0(outbuf_size);
- INFO("avcodec_open. sample_fmt %d, bytes_per_sample %d\n", avctx->sample_fmt, osize);
+ avpkt->data = outbuf;
+ avpkt->size = outbuf_size;
- if ((avctx->codec_id == AV_CODEC_ID_AAC) && avctx->codec->encode2) {
- osize = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
+ ret = avcodec_encode_video2(avctx, avpkt, pict, &got_frame);
+
+ TRACE("encode video. ret %d got_frame %d outbuf_size %d\n", ret, got_frame, avpkt->size);
+ if (avctx->coded_frame) {
+ TRACE("encode video. keyframe %d\n", avctx->coded_frame->key_frame);
}
- memcpy(mem_buf, &avctx->sample_fmt, sizeof(avctx->sample_fmt));
- size = sizeof(avctx->sample_fmt);
+ }
- // frame_size: samples per packet, initialized when calling 'init'
- memcpy(mem_buf + size, &avctx->frame_size, sizeof(avctx->frame_size));
- size += sizeof(avctx->frame_size);
+ // write encoded video data
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
+ dc->is_got = got_frame;
+ dc->avctx = avctx;
+ dc->avpkt = avpkt;
- memcpy(mem_buf + size, &osize, sizeof(osize));
- size += sizeof(osize);
- }
+ brillcodec_push_writequeue(s, dc, 0, ctx_id, copy_video_encode_data);
- return size;
+ TRACE("leave: %s\n", __func__);
+ return true;
}
+//
+// CODEC FUNCTIONS
+// AUDIO DECODE / ENCODE
+//
+
+struct audio_data {
+ int32_t channels;
+ int32_t sample_rate;
+ int32_t block_align;
+ int32_t depth;
+ int32_t sample_fmt;
+ int32_t frame_size;
+ int32_t bits_per_smp_fmt;
+ int32_t reserved;
+ int64_t channel_layout;
+} __attribute__((packed));
+
+struct audio_decode_input {
+ int32_t inbuf_size;
+ uint8_t inbuf; // for pointing inbuf address
+} __attribute__((packed));
+
+struct audio_decode_output {
+ int32_t len;
+ int32_t got_frame;
+ uint8_t data; // for pointing data address
+} __attribute__((packed));
+
+struct audio_encode_input {
+ int32_t inbuf_size;
+ uint8_t inbuf; // for pointing inbuf address
+} __attribute__((packed));
+
+struct audio_encode_output {
+ int32_t len;
+ uint8_t data; // for pointing data address
+} __attribute__((packed));
+
static int convert_audio_sample_fmt(const AVCodec *codec, int codec_type, bool encode)
{
int audio_sample_fmt = AV_SAMPLE_FMT_NONE;
return resample_frame;
}
-static int parse_and_decode_video(AVCodecContext *avctx, AVFrame *picture,
- AVCodecParserContext *pctx, int ctx_id,
- AVPacket *packet, int *got_picture,
- int idx, int64_t in_offset)
+// FIXME: ignore "size" now...
+static void copy_audio_decode_data(void *dst, void *opaque, size_t dummy)
{
- uint8_t *parser_outbuf = NULL;
- int parser_outbuf_size = 0;
- uint8_t *parser_buf = packet->data;
- int parser_buf_size = packet->size;
- int ret = 0, len = -1;
- int64_t pts = 0, dts = 0, pos = 0;
-
- pts = dts = idx;
- pos = in_offset;
-
- do {
- if (pctx) {
- ret = av_parser_parse2(pctx, avctx, &parser_outbuf,
- &parser_outbuf_size, parser_buf, parser_buf_size,
- pts, dts, pos);
+ DataContainer *dc = (DataContainer *)opaque;
+ struct audio_decode_output *decode_output =
+ (struct audio_decode_output *)dst;
- if (ret) {
- parser_buf_size -= ret;
- parser_buf += ret;
- }
+ decode_output->len = dc->frame->linesize[0];
+ decode_output->got_frame = dc->is_got ? 1 : 0;
- TRACE("after parsing ret: %d parser_outbuf_size %d parser_buf_size %d pts %lld\n",
- ret, parser_outbuf_size, parser_buf_size, pctx->pts);
+ if (dc->is_got) {
+ struct audio_data *data = (struct audio_data *)&decode_output->data;
+ data->sample_fmt = dc->resampled ? dc->out_sample_fmt : dc->avctx->sample_fmt;
+ data->sample_rate = dc->avctx->sample_rate;
+ data->channels = dc->avctx->channels;
+ data->channel_layout = dc->avctx->channel_layout;
- /* if there is no output, we must break and wait for more data.
- * also the timestamp in the context is not updated.
- */
- if (parser_outbuf_size == 0) {
- if (parser_buf_size > 0) {
- TRACE("parsing data have been left\n");
- continue;
- } else {
- TRACE("finish parsing data\n");
- break;
- }
- }
+ memcpy(dst + OFFSET_PICTURE_BUFFER, dc->frame->data[0], decode_output->len);
- packet->data = parser_outbuf;
- packet->size = parser_outbuf_size;
- } else {
- TRACE("not using parser %s\n", avctx->codec->name);
+ if (dc->resampled) {
+ av_free(dc->frame->data[0]);
+ av_free(dc->frame);
}
+ }
- len = avcodec_decode_video2(avctx, picture, got_picture, packet);
- TRACE("decode_video. len %d, got_picture %d\n", len, *got_picture);
+ g_free(dc);
+}
- if (!pctx) {
- if (len == 0 && (*got_picture) == 0) {
- ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
- break;
- } else if (len < 0) {
- ERR("decoding video error! ctx_id %d len %d\n", ctx_id, len);
- break;
- }
- parser_buf_size -= len;
- parser_buf += len;
- } else {
- if (len == 0) {
- ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
- *got_picture = 0;
- break;
- } else if (len < 0) {
- ERR("decoding video error! trying next ctx_id %d len %d\n", ctx_id, len);
- break;
- }
- }
- } while (parser_buf_size > 0);
+static void copy_audio_encode_data(void *dst, void *opaque, size_t dummy)
+{
+ DataContainer *dc = (DataContainer *)opaque;
+ struct audio_encode_output *encode_output = (struct audio_encode_output *)dst;
- return len;
+ encode_output->len = dc->avpkt->size;
+ if (dc->avpkt->size && dc->is_got) {
+ memcpy(&encode_output->data, dc->avpkt->data, dc->avpkt->size);
+ }
+
+ g_free(dc->avpkt->data);
+ g_free(dc->avpkt);
+ g_free(dc);
}
-// codec functions
-static bool codec_init(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+/*
+ * dc->resampled = resample_frame ? true : false;
+ * decode_audio >> raw audio_buffer >> resample
+ *
+ * audios sink cannot handle planar format, so it is required
+ * to resample audio buffer into linear format.
+ */
+static bool decode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
- AVCodecContext *avctx = NULL;
- AVCodec *codec = NULL;
- int size = 0, ret = -1;
+ AVCodecContext *avctx;
+ AVPacket avpkt;
+ AVFrame *frame = NULL;
+
DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
+ struct audio_decode_input empty_input = { 0, };
+ struct audio_decode_input *decode_input = &empty_input;
+ int len = -1, got_frame = 0;
TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
-
- // allocate AVCodecContext
- avctx = maru_brill_codec_alloc_context(s, ctx_id);
- if (!avctx) {
- ERR("[%d] failed to allocate context.\n", __LINE__);
- ret = -1;
+ if (!elem || !elem->opaque) {
+ TRACE("decode_audio. no input buffer\n");
} else {
- codec = maru_brill_codec_find_avcodec(elem->opaque);
- if (codec) {
- size = sizeof(int32_t) + 32; // buffer size of codec_name
- read_codec_init_data(avctx, elem->opaque + size);
-
- // in case of aac encoder, sample format is float
- if (!strcmp(codec->name, "aac") && codec->encode2) {
- TRACE("convert sample format into SAMPLE_FMT_FLTP\n");
- avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
-
- avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
-
- INFO("aac encoder!! channels %d channel_layout %lld\n",
- avctx->channels, avctx->channel_layout);
- avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
- }
-
- TRACE("audio sample format %d\n", avctx->sample_fmt);
- TRACE("strict_std_compliance %d\n", avctx->strict_std_compliance);
-
- ret = avcodec_open2(avctx, codec, NULL);
- INFO("avcodec_open. ret 0x%x ctx_id %d\n", ret, ctx_id);
-
- INFO("channels %d sample_rate %d sample_fmt %d "
- "channel_layout %lld frame_size %d\n",
- avctx->channels, avctx->sample_rate, avctx->sample_fmt,
- avctx->channel_layout, avctx->frame_size);
-
- tempbuf_size = (sizeof(avctx->sample_fmt) + sizeof(avctx->frame_size)
- + sizeof(avctx->extradata_size) + avctx->extradata_size)
- + sizeof(int);
-
- CONTEXT(s, ctx_id).opened_context = true;
- CONTEXT(s, ctx_id).parser_ctx =
- maru_brill_codec_parser_init(avctx);
- } else {
- ERR("failed to find codec. ctx_id: %d\n", ctx_id);
- ret = -1;
- }
+ decode_input = elem->opaque;
}
- tempbuf_size += sizeof(ret);
-
- tempbuf = g_malloc(tempbuf_size);
- if (!tempbuf) {
- ERR("failed to allocate a buffer\n");
- tempbuf_size = 0;
- } else {
- memcpy(tempbuf, &ret, sizeof(ret));
- size = sizeof(ret);
- if (ret < 0) {
- ERR("failed to open codec contex.\n");
- } else {
- size += write_codec_init_data(avctx, tempbuf + size);
- TRACE("codec_init. copyback!! size %d\n", size);
- {
- memcpy(tempbuf + size, &avctx->extradata_size, sizeof(avctx->extradata_size));
- size += sizeof(avctx->extradata_size);
-
- INFO("codec_init. extradata_size: %d\n", avctx->extradata_size);
- if (avctx->extradata) {
- memcpy(tempbuf + size, avctx->extradata, avctx->extradata_size);
- size += avctx->extradata_size;
- }
+ av_init_packet(&avpkt);
+ avpkt.data = &decode_input->inbuf;
+ avpkt.size = decode_input->inbuf_size;
+
+ avctx = CONTEXT(s, ctx_id)->avctx;
+ frame = CONTEXT(s, ctx_id)->frame;
+
+ if(!avctx || !avctx->codec || !frame) {
+ ERR("critical error !!!\n");
+ assert(0);
+ }
+
+ len = avcodec_decode_audio4(avctx, frame, &got_frame, &avpkt);
+ TRACE("decode_audio. len %d, channel_layout %lld, got_frame %d\n",
+ len, avctx->channel_layout, got_frame);
+
+ AVFrame *resample_frame = NULL;
+ int resample_buf_size = 0;
+ uint32_t out_sample_fmt = -1;
+
+ if (got_frame) {
+ if (av_sample_fmt_is_planar(avctx->sample_fmt)) {
+ out_sample_fmt = convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 0);
+
+ if (avctx->channel_layout == 0) {
+ avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
+ TRACE("decode_audio. channel_layout %lld channels %d\n",
+ avctx->channel_layout, avctx->channels);
+ }
+ resample_frame = resample_audio(avctx, frame, frame->linesize[0],
+ avctx->sample_fmt, NULL, &resample_buf_size,
+ out_sample_fmt);
+ if (!resample_frame) {
+ ERR("failed to resample decoded audio buffer\n");
+ len = -1;
+ got_frame = 0;
}
}
}
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
+ dc->len = len;
+ dc->is_got = got_frame;
+ dc->avctx = avctx;
+ dc->resampled = resample_frame ? true : false;
+ dc->out_sample_fmt = out_sample_fmt;
+ dc->frame = resample_frame ? resample_frame : frame;
- TRACE("leave: %s\n", __func__);
+ brillcodec_push_writequeue(s, dc, 0, ctx_id, ©_audio_decode_data);
+ TRACE("leave: %s\n", __func__);
return true;
}
-static bool codec_deinit(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+static bool encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
AVCodecContext *avctx = NULL;
- AVFrame *frame = NULL;
- AVCodecParserContext *parserctx = NULL;
+ AVPacket *avpkt = g_malloc0(sizeof(AVPacket));
+ int len = 0, got_frame = 0;
+
+ DeviceMemEntry *elem = NULL;
+ AVFrame *in_frame = NULL;
+ AVFrame *resample_frame = NULL;
+
+ struct audio_encode_input empty_input = { 0, };
+ struct audio_encode_input *encode_input = &empty_input;
TRACE("enter: %s\n", __func__);
- avctx = CONTEXT(s, ctx_id).avctx;
- frame = CONTEXT(s, ctx_id).frame;
- parserctx = CONTEXT(s, ctx_id).parser_ctx;
- if (!avctx || !frame) {
- TRACE("%d of AVCodecContext or AVFrame is NULL. "
- " Those resources have been released before.\n", ctx_id);
- return false;
+ /*
+ * copy raw audio data from gstreamer encoder plugin
+ * audio_in_size: size of raw audio data
+ * audio_in : raw audio data
+ */
+ elem = (DeviceMemEntry *)data_buf;
+ if (!elem || !elem->opaque) {
+ TRACE("encode_audio. no input buffer\n");
+ } else {
+ encode_input = elem->opaque;
}
- INFO("close avcontext of %d\n", ctx_id);
- // qemu_mutex_lock(&s->threadpool.mutex);
- avcodec_close(avctx);
- CONTEXT(s, ctx_id).opened_context = false;
- // qemu_mutex_unlock(&s->threadpool.mutex);
+ av_init_packet(avpkt);
- if (avctx->extradata) {
- TRACE("free context extradata\n");
- av_free(avctx->extradata);
- CONTEXT(s, ctx_id).avctx->extradata = NULL;
- }
+ avctx = CONTEXT(s, ctx_id)->avctx;
+ in_frame = CONTEXT(s, ctx_id)->frame;
- if (frame) {
- TRACE("free frame\n");
- avcodec_free_frame(&frame);
- CONTEXT(s, ctx_id).frame = NULL;
+ if (!avctx || !avctx->codec || !in_frame) {
+ ERR("critical error !!!\n");
+ assert(0);
}
- if (avctx) {
- TRACE("free codec context\n");
- av_free(avctx);
- CONTEXT(s, ctx_id).avctx = NULL;
- }
+ int bytes_per_sample = 0;
+ int nb_samples = 0;
+ int audio_in_sample_fmt = AV_SAMPLE_FMT_S16;
+ // audio input src can generate a buffer as an int format.
- if (parserctx) {
- INFO("close parser context\n");
- av_parser_close(parserctx);
- CONTEXT(s, ctx_id).parser_ctx = NULL;
+ int resample_buf_size = 0;
+ int resample_sample_fmt = 0;
+ int ret = 0;
+
+ bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt);
+ TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt);
+
+ nb_samples = encode_input->inbuf_size / (bytes_per_sample * avctx->channels);
+ TRACE("nb_samples %d\n", nb_samples);
+
+ ret = fill_audio_into_frame(avctx, in_frame,
+ &encode_input->inbuf, encode_input->inbuf_size,
+ nb_samples, audio_in_sample_fmt);
+ if (ret < 0) {
+ ERR("failed to fill audio into frame\n");
+ } else {
+ resample_sample_fmt =
+ convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 1);
+
+ resample_frame =
+ resample_audio(avctx, in_frame, encode_input->inbuf_size,
+ audio_in_sample_fmt, NULL, &resample_buf_size,
+ resample_sample_fmt);
+
+ if (resample_frame) {
+ len = avcodec_encode_audio2(avctx, avpkt, (const AVFrame *)resample_frame, &got_frame);
+ TRACE("encode audio. len %d got_frame %d avpkt->size %d frame_number %d\n",
+ len, got_frame, avpkt->size, avctx->frame_number);
+ }
}
- maru_brill_codec_push_writequeue(s, NULL, 0, ctx_id, NULL);
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
+ dc->is_got = got_frame;
+ dc->avctx = avctx;
+ dc->avpkt = avpkt;
- TRACE("leave: %s\n", __func__);
+ brillcodec_push_writequeue(s, dc, 0, ctx_id, ©_audio_encode_data);
+
+ TRACE("[%s] leave:\n", __func__);
return true;
}
-static bool codec_flush_buffers(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+//
+// CODEC FUNCTIONS
+// CODEC INIT / DEINIT
+//
+
+static void serialize_video_data(const struct video_data *video,
+ AVCodecContext *avctx)
{
- AVCodecContext *avctx = NULL;
- bool ret = true;
+ if (video->width) {
+ avctx->width = video->width;
+ }
+ if (video->height) {
+ avctx->height = video->height;
+ }
+ if (video->fps_n) {
+ avctx->time_base.num = video->fps_n;
+ }
+ if (video->fps_d) {
+ avctx->time_base.den = video->fps_d;
+ }
+ if (video->pix_fmt > PIX_FMT_NONE) {
+ avctx->pix_fmt = video->pix_fmt;
+ }
+ if (video->par_n) {
+ avctx->sample_aspect_ratio.num = video->par_n;
+ }
+ if (video->par_d) {
+ avctx->sample_aspect_ratio.den = video->par_d;
+ }
+ if (video->bpp) {
+ avctx->bits_per_coded_sample = video->bpp;
+ }
+ if (video->ticks_per_frame) {
+ avctx->ticks_per_frame = video->ticks_per_frame;
+ }
- TRACE("enter: %s\n", __func__);
+ INFO("codec_init. video, resolution: %dx%d, framerate: %d/%d "
+ "pixel_fmt: %d sample_aspect_ratio: %d/%d bpp %d\n",
+ avctx->width, avctx->height, avctx->time_base.num,
+ avctx->time_base.den, avctx->pix_fmt, avctx->sample_aspect_ratio.num,
+ avctx->sample_aspect_ratio.den, avctx->bits_per_coded_sample);
+}
- avctx = CONTEXT(s, ctx_id).avctx;
- if (!avctx) {
- ERR("%d of AVCodecContext is NULL.\n", ctx_id);
- ret = false;
- } else if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
- ret = false;
- } else {
- TRACE("flush %d context of buffers.\n", ctx_id);
- AVCodecParserContext *pctx = NULL;
- uint8_t *poutbuf = NULL;
- int poutbuf_size = 0;
- int res = 0;
+static void serialize_audio_data(const struct audio_data *audio,
+ AVCodecContext *avctx)
+{
+ if (audio->channels) {
+ avctx->channels = audio->channels;
+ }
+ if (audio->sample_rate) {
+ avctx->sample_rate = audio->sample_rate;
+ }
+ if (audio->block_align) {
+ avctx->block_align = audio->block_align;
+ }
- uint8_t p_inbuf[FF_INPUT_BUFFER_PADDING_SIZE];
- int p_inbuf_size = FF_INPUT_BUFFER_PADDING_SIZE;
+ if (audio->sample_fmt > AV_SAMPLE_FMT_NONE) {
+ avctx->sample_fmt = audio->sample_fmt;
+ }
- memset(&p_inbuf, 0x00, p_inbuf_size);
+ INFO("codec_init. audio, channel %d sample_rate %d sample_fmt %d ch_layout %lld\n",
+ avctx->channels, avctx->sample_rate, avctx->sample_fmt, avctx->channel_layout);
+}
- pctx = CONTEXT(s, ctx_id).parser_ctx;
- if (pctx) {
- res = av_parser_parse2(pctx, avctx, &poutbuf, &poutbuf_size,
- p_inbuf, p_inbuf_size, -1, -1, -1);
- INFO("before flush buffers, using parser. res: %d\n", res);
+static enum PixelFormat get_format(AVCodecContext *avctx,
+ const enum PixelFormat *pi_fmt) {
+ bool can_hwaccel = false;
+ int i;
+
+ CodecContext *context = (CodecContext *)avctx->opaque;
+ MaruBrillCodecState *s = context->state;
+
+ if (!s->hwaccel_plugin) {
+ goto end;
+ }
+
+ for (i = 0; pi_fmt[i] != PIX_FMT_NONE; ++i) {
+ const AVPixFmtDescriptor *dsc = av_pix_fmt_desc_get(pi_fmt[i]);
+ if (dsc == NULL) {
+ continue;
}
+ if ((dsc->flags & PIX_FMT_HWACCEL) != 0) {
+ can_hwaccel = true;
+ }
+ }
- avcodec_flush_buffers(avctx);
+ if (!can_hwaccel) {
+ goto end;
}
- maru_brill_codec_push_writequeue(s, NULL, 0, ctx_id, NULL);
+ void *plugin_context = s->hwaccel_plugin->setup(avctx, avctx->width, avctx->height);
+ if (!plugin_context) {
+ goto end;
+ }
+ set_plugin_context(avctx, plugin_context);
- TRACE("leave: %s\n", __func__);
+ for (i = 0; pi_fmt[i] != PIX_FMT_NONE; ++i) {
+ if (pi_fmt[i] == s->hwaccel_plugin->pix_fmt) {
+ break;
+ }
+ }
- return ret;
-}
+ if (pi_fmt[i] == PIX_FMT_NONE) {
+ goto end;
+ }
-static bool codec_decode_video(MaruBrillCodecState *s, int ctx_id, void *data_buf)
-{
- AVCodecContext *avctx = NULL;
- AVFrame *picture = NULL;
- AVCodecParserContext *pctx = NULL;
- AVPacket avpkt;
+ INFO("HW_ACCEL is enabled with pix_fmt [%s]\n", av_get_pix_fmt_name(pi_fmt[i]));
+ context->is_hwaccel = true;
+ return pi_fmt[i];
- int got_picture = 0, len = -1;
- int idx = 0, size = 0;
- int64_t in_offset = 0;
- uint8_t *inbuf = NULL;
- int inbuf_size = 0;
- DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
+end:
+ INFO("HW_ACCEL is disabled\n");
+ context->is_hwaccel = false;
+ return avcodec_default_get_format(avctx, pi_fmt);
+}
- TRACE("enter: %s\n", __func__);
+static int get_buffer(struct AVCodecContext *avctx, AVFrame *frame) {
+ CodecContext *context = (CodecContext *)avctx->opaque;
- elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
- size += sizeof(inbuf_size);
- memcpy(&idx, elem->opaque + size, sizeof(idx));
- size += sizeof(idx);
- memcpy(&in_offset, elem->opaque + size, sizeof(in_offset));
- size += sizeof(in_offset);
- TRACE("decode_video. inbuf_size %d\n", inbuf_size);
-
- if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
- }
- } else {
- TRACE("decode_video. no input buffer\n");
- // FIXME: improve error handling
- // return false;
+ if (context->is_hwaccel) {
+ return context->state->hwaccel_plugin->get_buffer(avctx, frame);
}
- av_init_packet(&avpkt);
- avpkt.data = inbuf;
- avpkt.size = inbuf_size;
+ return avcodec_default_get_buffer(avctx, frame);
+}
- avctx = CONTEXT(s, ctx_id).avctx;
- picture = CONTEXT(s, ctx_id).frame;
- if (!avctx) {
- ERR("decode_video. %d of AVCodecContext is NULL.\n", ctx_id);
- } else if (!avctx->codec) {
- ERR("decode_video. %d of AVCodec is NULL.\n", ctx_id);
- } else if (!picture) {
- ERR("decode_video. %d of AVFrame is NULL.\n", ctx_id);
- } else {
- pctx = CONTEXT(s, ctx_id).parser_ctx;
+static void release_buffer(struct AVCodecContext *avctx, AVFrame *frame) {
+ CodecContext *context = (CodecContext *)avctx->opaque;
- len = parse_and_decode_video(avctx, picture, pctx, ctx_id,
- &avpkt, &got_picture, idx, in_offset);
+ if (context->is_hwaccel) {
+ return context->state->hwaccel_plugin->release_buffer(avctx, frame);
}
- tempbuf_size = sizeof(len) + sizeof(got_picture) + sizeof(struct video_data);
+ return avcodec_default_release_buffer(avctx, frame);
+}
+
+// allocate avcontext and avframe struct.
+static AVCodecContext *maru_brill_codec_alloc_context(MaruBrillCodecState *s, int ctx_id)
+{
+ TRACE("enter: %s\n", __func__);
- tempbuf = g_malloc(tempbuf_size);
- if (!tempbuf) {
- ERR("failed to allocate decoded audio buffer\n");
- tempbuf_size = 0;
- } else {
- struct video_data video;
-
- memcpy(tempbuf, &len, sizeof(len));
- size = sizeof(len);
- memcpy(tempbuf + size, &got_picture, sizeof(got_picture));
- size += sizeof(got_picture);
- if (avctx) {
- deserialize_video_data(avctx, &video);
- memcpy(tempbuf + size, &video, sizeof(struct video_data));
- }
- }
+ TRACE("allocate %d of context and frame.\n", ctx_id);
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+ CONTEXT(s, ctx_id)->avctx = avcodec_alloc_context3(NULL);
+
+ AVCodecContext *avctx = CONTEXT(s, ctx_id)->avctx;
+ avctx->get_format = get_format;
+ avctx->get_buffer = get_buffer;
+ avctx->reget_buffer = avcodec_default_reget_buffer;
+ avctx->release_buffer = release_buffer;
+ avctx->opaque = CONTEXT(s, ctx_id);
+
+ CONTEXT(s, ctx_id)->frame = avcodec_alloc_frame();
+ CONTEXT(s, ctx_id)->opened_context = false;
+ CONTEXT(s, ctx_id)->state = s;
TRACE("leave: %s\n", __func__);
- return true;
+ return avctx;
}
-static bool codec_picture_copy (MaruBrillCodecState *s, int ctx_id, void *elem)
+static AVCodec *maru_brill_codec_find_avcodec(uint8_t *mem_buf)
{
- AVCodecContext *avctx = NULL;
- AVPicture *src = NULL;
- int pict_size = 0;
- bool ret = true;
+ AVCodec *codec = NULL;
+ int32_t encode, size = 0;
+ char codec_name[32] = {0, };
- TRACE("enter: %s\n", __func__);
+ memcpy(&encode, mem_buf, sizeof(encode));
+ size = sizeof(encode);
+ memcpy(codec_name, mem_buf + size, sizeof(codec_name));
+ size += sizeof(codec_name);
- TRACE("copy decoded image of %d context.\n", ctx_id);
+ TRACE("type: %d, name: %s\n", encode, codec_name);
- avctx = CONTEXT(s, ctx_id).avctx;
- src = (AVPicture *)CONTEXT(s, ctx_id).frame;
- if (!avctx) {
- ERR("picture_copy. %d of AVCodecContext is NULL.\n", ctx_id);
- ret = false;
- } else if (!avctx->codec) {
- ERR("picture_copy. %d of AVCodec is NULL.\n", ctx_id);
- ret = false;
- } else if (!src) {
- ERR("picture_copy. %d of AVFrame is NULL.\n", ctx_id);
- ret = false;
+ if (encode) {
+ codec = avcodec_find_encoder_by_name (codec_name);
} else {
- TRACE("decoded image. pix_fmt: %d width: %d, height: %d\n",
- avctx->pix_fmt, avctx->width, avctx->height);
- pict_size = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
+ codec = avcodec_find_decoder_by_name (codec_name);
+ }
+ INFO("%s!! find %s %s\n", codec ? "success" : "failure",
+ codec_name, encode ? "encoder" : "decoder");
- if ((pict_size) < 0) {
- ERR("picture size: %d\n", pict_size);
- ret = false;
- } else {
- TRACE("picture size: %d\n", pict_size);
- maru_brill_codec_push_writequeue(s, src, pict_size, ctx_id, &default_video_decode_data_handler);
+ return codec;
+}
+
+static void read_codec_init_data(AVCodecContext *avctx, uint8_t *mem_buf)
+{
+ struct video_data video = { 0, };
+ struct audio_data audio = { 0, };
+ int bitrate = 0, size = 0;
+
+ memcpy(&video, mem_buf + size, sizeof(video));
+ size = sizeof(video);
+ serialize_video_data(&video, avctx);
+
+ memcpy(&audio, mem_buf + size, sizeof(audio));
+ size += sizeof(audio);
+ serialize_audio_data(&audio, avctx);
+
+ memcpy(&bitrate, mem_buf + size, sizeof(bitrate));
+ size += sizeof(bitrate);
+ if (bitrate) {
+ avctx->bit_rate = bitrate;
+ }
+
+ memcpy(&avctx->codec_tag, mem_buf + size, sizeof(avctx->codec_tag));
+ size += sizeof(avctx->codec_tag);
+ memcpy(&avctx->extradata_size,
+ mem_buf + size, sizeof(avctx->extradata_size));
+ size += sizeof(avctx->extradata_size);
+ INFO("extradata size: %d.\n", avctx->extradata_size);
+
+ if (avctx->extradata_size > 0) {
+ avctx->extradata =
+ av_mallocz(ROUND_UP_X(avctx->extradata_size +
+ FF_INPUT_BUFFER_PADDING_SIZE, 4));
+ if (avctx->extradata) {
+ memcpy(avctx->extradata, mem_buf + size, avctx->extradata_size);
}
+ } else {
+ TRACE("no extra data.\n");
+ avctx->extradata =
+ av_mallocz(ROUND_UP_X(FF_INPUT_BUFFER_PADDING_SIZE, 4));
}
-
- TRACE("leave: %s\n", __func__);
-
- return ret;
}
-/*
- * decode_audio >> raw audio_buffer >> resample
- *
- * audios sink cannot handle planar format, so it is required
- * to resample audio buffer into linear format.
- */
-static bool codec_decode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+// write the result of codec_init
+static int write_codec_init_data(AVCodecContext *avctx, uint8_t *mem_buf)
{
- AVCodecContext *avctx;
- AVPacket avpkt;
- AVFrame *audio_out = NULL;
- uint8_t *inbuf = NULL;
- int inbuf_size = 0, size = 0;
- int len = -1, got_frame = 0;
-
- DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
-
- AVFrame *resample_frame = NULL;
- uint8_t *resample_buf = NULL;
- int resample_buf_size = 0;
- int out_sample_fmt = -1;
+ int size = 0;
- TRACE("enter: %s\n", __func__);
+ if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ int osize = av_get_bytes_per_sample(avctx->sample_fmt);
- elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
- size = sizeof(inbuf_size);
- TRACE("decode_audio. inbuf_size %d\n", inbuf_size);
+ INFO("avcodec_open. sample_fmt %d, bytes_per_sample %d\n", avctx->sample_fmt, osize);
- if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
+ if ((avctx->codec_id == AV_CODEC_ID_AAC) && avctx->codec->encode2) {
+ osize = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
}
- } else {
- ERR("decode_audio. no input buffer\n");
- // FIXME: improve error handling
- // return false;
- }
+ memcpy(mem_buf, &avctx->sample_fmt, sizeof(avctx->sample_fmt));
+ size = sizeof(avctx->sample_fmt);
- av_init_packet(&avpkt);
- avpkt.data = inbuf;
- avpkt.size = inbuf_size;
+ // frame_size: samples per packet, initialized when calling 'init'
+ memcpy(mem_buf + size, &avctx->frame_size, sizeof(avctx->frame_size));
+ size += sizeof(avctx->frame_size);
- avctx = CONTEXT(s, ctx_id).avctx;
- audio_out = CONTEXT(s, ctx_id).frame;
- if (!avctx) {
- ERR("decode_audio. %d of AVCodecContext is NULL\n", ctx_id);
- } else if (!avctx->codec) {
- ERR("decode_audio. %d of AVCodec is NULL\n", ctx_id);
- } else if (!audio_out) {
- ERR("decode_audio. %d of AVFrame is NULL\n", ctx_id);
- } else {
- len = avcodec_decode_audio4(avctx, audio_out, &got_frame, &avpkt);
- TRACE("decode_audio. len %d, channel_layout %lld got_frame %d\n",
- len, avctx->channel_layout, got_frame);
+ memcpy(mem_buf + size, &osize, sizeof(osize));
+ size += sizeof(osize);
+ }
- if (got_frame) {
- if (av_sample_fmt_is_planar(avctx->sample_fmt)) {
- out_sample_fmt = convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 0);
+ return size;
+}
- if (avctx->channel_layout == 0) {
- avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
- TRACE("decode_audio. channel_layout %lld channels %d\n",
- avctx->channel_layout, avctx->channels);
- }
- resample_frame = resample_audio(avctx, audio_out, audio_out->linesize[0],
- avctx->sample_fmt, NULL, &resample_buf_size,
- out_sample_fmt);
- if (resample_frame) {
- resample_buf = resample_frame->data[0];
- } else {
- ERR("failed to resample decoded audio buffer\n");
- len = -1;
- got_frame = 0;
- }
- } else {
- INFO("decode_audio. linear audio format\n");
- resample_buf = audio_out->data[0];
- resample_buf_size = audio_out->linesize[0];
- }
- }
- }
+static AVCodecParserContext *maru_brill_codec_parser_init(AVCodecContext *avctx)
+{
+ AVCodecParserContext *parser = NULL;
- tempbuf_size = (sizeof(len) + sizeof(got_frame));
- if (len < 0) {
- ERR("failed to decode audio. ctx_id: %d len: %d got_frame: %d\n",
- ctx_id, len, got_frame);
- got_frame = 0;
- } else {
- tempbuf_size += (sizeof(out_sample_fmt) + sizeof(avctx->sample_rate)
- + sizeof(avctx->channels) + sizeof(avctx->channel_layout)
- + sizeof(resample_buf_size) + resample_buf_size);
+ if (!avctx) {
+ ERR("context is NULL\n");
+ return NULL;
}
- tempbuf = g_malloc(tempbuf_size);
- if (!tempbuf) {
- ERR("failed to allocate decoded audio buffer\n");
- } else {
- memcpy(tempbuf, &len, sizeof(len));
- size = sizeof(len);
- memcpy(tempbuf + size, &got_frame, sizeof(got_frame));
- size += sizeof(got_frame);
- if (got_frame) {
- memcpy(tempbuf + size, &out_sample_fmt, sizeof(out_sample_fmt));
- size += sizeof(out_sample_fmt);
- memcpy(tempbuf + size, &avctx->sample_rate, sizeof(avctx->sample_rate));
- size += sizeof(avctx->sample_rate);
- memcpy(tempbuf + size, &avctx->channels, sizeof(avctx->channels));
- size += sizeof(avctx->channels);
- memcpy(tempbuf + size, &avctx->channel_layout, sizeof(avctx->channel_layout));
- size += sizeof(avctx->channel_layout);
-
- memcpy(tempbuf + size, &resample_buf_size, sizeof(resample_buf_size));
- size += sizeof(resample_buf_size);
- if (resample_buf) {
- TRACE("copy resampled audio buffer\n");
- memcpy(tempbuf + size, resample_buf, resample_buf_size);
- }
+ switch (avctx->codec_id) {
+ case CODEC_ID_MPEG4:
+ case CODEC_ID_VC1:
+ TRACE("not using parser\n");
+ break;
+ case CODEC_ID_H264:
+ if (avctx->extradata_size == 0) {
+ TRACE("H.264 with no extradata, creating parser.\n");
+ parser = av_parser_init (avctx->codec_id);
}
+ break;
+ default:
+ parser = av_parser_init(avctx->codec_id);
+ if (parser) {
+ INFO("using parser: %s\n", avctx->codec->name);
+ }
+ break;
}
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
-
- if (resample_frame) {
- TRACE("release decoded frame\n");
- av_free(resample_buf);
- av_free(resample_frame);
- }
-
- TRACE("leave: %s\n", __func__);
- return true;
+ return parser;
}
-static bool codec_encode_video(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+static bool init(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
AVCodecContext *avctx = NULL;
- AVFrame *pict = NULL;
- AVPacket avpkt;
- uint8_t *inbuf = NULL, *outbuf = NULL;
- int inbuf_size = 0, outbuf_size = 0;
- int got_frame = 0, ret = 0, size = 0;
- int64_t in_timestamp = 0;
- int coded_frame = 0, key_frame = 0;
-
+ AVCodec *codec = NULL;
+ int size = 0, ret = -1;
DeviceMemEntry *elem = NULL;
uint8_t *tempbuf = NULL;
int tempbuf_size = 0;
TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
- size += sizeof(inbuf_size);
- memcpy(&in_timestamp, elem->opaque + size, sizeof(in_timestamp));
- size += sizeof(in_timestamp);
- TRACE("encode video. inbuf_size %d\n", inbuf_size);
-
- if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
- }
+
+ // allocate AVCodecContext
+ avctx = maru_brill_codec_alloc_context(s, ctx_id);
+ if (!avctx) {
+ ERR("[%d] failed to allocate context.\n", __LINE__);
+ ret = -1;
} else {
- TRACE("encode video. no input buffer.\n");
- // FIXME: improve error handling
- // return false;
- }
+ codec = maru_brill_codec_find_avcodec(elem->opaque);
+ if (codec) {
+ size = sizeof(int32_t) + 32; // buffer size of codec_name
+ read_codec_init_data(avctx, elem->opaque + size);
- // initialize AVPacket
- av_init_packet(&avpkt);
- avpkt.data = NULL;
- avpkt.size = 0;
+ // in case of aac encoder, sample format is float
+ if (!strcmp(codec->name, "aac") && codec->encode2) {
+ TRACE("convert sample format into SAMPLE_FMT_FLTP\n");
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
- avctx = CONTEXT(s, ctx_id).avctx;
- pict = CONTEXT(s, ctx_id).frame;
- if (!avctx || !pict) {
- ERR("%d of context or frame is NULL\n", ctx_id);
- } else if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
- } else {
- TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
- avctx->pix_fmt, inbuf, pict->data[0]);
+ avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
- ret = avpicture_fill((AVPicture *)pict, inbuf, avctx->pix_fmt,
- avctx->width, avctx->height);
- if (ret < 0) {
- ERR("after avpicture_fill, ret:%d\n", ret);
- } else {
- if (avctx->time_base.num == 0) {
- pict->pts = AV_NOPTS_VALUE;
- } else {
- AVRational bq =
- {1, (G_USEC_PER_SEC * G_GINT64_CONSTANT(1000))};
- pict->pts = av_rescale_q(in_timestamp, bq, avctx->time_base);
+ INFO("aac encoder!! channels %d channel_layout %lld\n", avctx->channels, avctx->channel_layout);
+ avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
}
- TRACE("encode video. ticks_per_frame:%d, pts:%lld\n",
- avctx->ticks_per_frame, pict->pts);
- outbuf_size =
- (avctx->width * avctx->height * 6) + FF_MIN_BUFFER_SIZE;
+ TRACE("audio sample format %d\n", avctx->sample_fmt);
+ TRACE("strict_std_compliance %d\n", avctx->strict_std_compliance);
- outbuf = g_malloc0(outbuf_size);
+ ret = avcodec_open2(avctx, codec, NULL);
+ INFO("avcodec_open. ret 0x%x ctx_id %d\n", ret, ctx_id);
- avpkt.data = outbuf;
- avpkt.size = outbuf_size;
+ INFO("channels %d sample_rate %d sample_fmt %d "
+ "channel_layout %lld frame_size %d\n",
+ avctx->channels, avctx->sample_rate, avctx->sample_fmt,
+ avctx->channel_layout, avctx->frame_size);
- if (!outbuf) {
- ERR("failed to allocate a buffer of encoding video.\n");
- } else {
- ret = avcodec_encode_video2(avctx, &avpkt, pict, &got_frame);
+ tempbuf_size = (sizeof(avctx->sample_fmt) + sizeof(avctx->frame_size)
+ + sizeof(avctx->extradata_size) + avctx->extradata_size)
+ + sizeof(int);
- TRACE("encode video. ret %d got_picture %d outbuf_size %d\n", ret, got_frame, avpkt.size);
- if (avctx->coded_frame) {
- TRACE("encode video. keyframe %d\n", avctx->coded_frame->key_frame);
- }
- }
+ CONTEXT(s, ctx_id)->opened_context = true;
+ CONTEXT(s, ctx_id)->parser_ctx =
+ maru_brill_codec_parser_init(avctx);
+ } else {
+ ERR("failed to find codec. ctx_id: %d\n", ctx_id);
+ ret = -1;
}
}
- tempbuf_size = sizeof(ret);
- if (ret < 0) {
- ERR("failed to encode video. ctx_id %d ret %d\n", ctx_id, ret);
- } else {
- tempbuf_size += avpkt.size + sizeof(coded_frame) + sizeof(key_frame);
- }
+ tempbuf_size += sizeof(ret);
- // write encoded video data
- tempbuf = g_malloc0(tempbuf_size);
+ tempbuf = g_malloc(tempbuf_size);
if (!tempbuf) {
- ERR("encode video. failed to allocate encoded out buffer.\n");
+ ERR("failed to allocate a buffer\n");
+ tempbuf_size = 0;
} else {
- memcpy(tempbuf, &avpkt.size, sizeof(avpkt.size));
- size = sizeof(avpkt.size);
-
- if ((got_frame) && outbuf) {
- // inform gstreamer plugin about the status of encoded frames
- // A flag for output buffer in gstreamer is depending on the status.
- if (avctx->coded_frame) {
- coded_frame = 1;
- // if key_frame is 0, this frame cannot be decoded independently.
- key_frame = avctx->coded_frame->key_frame;
+ memcpy(tempbuf, &ret, sizeof(ret));
+ size = sizeof(ret);
+ if (ret < 0) {
+ ERR("failed to open codec contex.\n");
+ } else {
+ size += write_codec_init_data(avctx, tempbuf + size);
+ TRACE("codec_init. copyback!! size %d\n", size);
+ {
+ memcpy(tempbuf + size, &avctx->extradata_size, sizeof(avctx->extradata_size));
+ size += sizeof(avctx->extradata_size);
+
+ INFO("codec_init. extradata_size: %d\n", avctx->extradata_size);
+ if (avctx->extradata) {
+ memcpy(tempbuf + size, avctx->extradata, avctx->extradata_size);
+ size += avctx->extradata_size;
+ }
}
- memcpy(tempbuf + size, &coded_frame, sizeof(coded_frame));
- size += sizeof(coded_frame);
- memcpy(tempbuf + size, &key_frame, sizeof(key_frame));
- size += sizeof(key_frame);
- memcpy(tempbuf + size, outbuf, avpkt.size);
}
}
- if (outbuf) {
- TRACE("release encoded output buffer. %p\n", outbuf);
- g_free(outbuf);
- }
-
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+ brillcodec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
TRACE("leave: %s\n", __func__);
+
return true;
}
-static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+static bool deinit(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
AVCodecContext *avctx = NULL;
- AVPacket avpkt;
- uint8_t *audio_in = NULL;
- int32_t audio_in_size = 0;
- int ret = -1, got_pkt = 0, size = 0;
-
- DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
-
- AVFrame *in_frame = NULL;
- AVFrame *resample_frame = NULL;
- int64_t in_timestamp = 0;
+ AVFrame *frame = NULL;
+ AVCodecParserContext *parserctx = NULL;
TRACE("enter: %s\n", __func__);
- /*
- * copy raw audio data from gstreamer encoder plugin
- * audio_in_size: size of raw audio data
- * audio_in : raw audio data
- */
- elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&audio_in_size, elem->opaque, sizeof(audio_in_size));
- size += sizeof(audio_in_size);
-
- memcpy(&in_timestamp, elem->opaque + size, sizeof(in_timestamp));
- size += sizeof(in_timestamp);
-
- TRACE("encode_audio. audio_in_size %d\n", audio_in_size);
- if (audio_in_size > 0) {
- // audio_in = g_malloc0(audio_in_size);
- // memcpy(audio_in, elem->buf + size, audio_in_size);
- audio_in = elem->opaque + size;
- }
- } else {
- TRACE("encode_audio. no input buffer\n");
- // FIXME: improve error handling
- // return false;
+ avctx = CONTEXT(s, ctx_id)->avctx;
+ frame = CONTEXT(s, ctx_id)->frame;
+ parserctx = CONTEXT(s, ctx_id)->parser_ctx;
+ if (!avctx || !frame) {
+ TRACE("%d of AVCodecContext or AVFrame is NULL. "
+ " Those resources have been released before.\n", ctx_id);
+ return false;
}
- av_init_packet(&avpkt);
- avpkt.data = NULL;
- avpkt.size = 0;
-
- avctx = CONTEXT(s, ctx_id).avctx;
- if (!avctx) {
- ERR("encode_audio. %d of Context is NULL\n", ctx_id);
- } else if (!avctx->codec) {
- ERR("encode_audio. %d of AVCodec is NULL\n", ctx_id);
- } else {
- int bytes_per_sample = 0;
- int nb_samples = 0;
- int audio_in_sample_fmt = AV_SAMPLE_FMT_S16;
- // audio input src can generate a buffer as an int format.
-
- int resample_buf_size = 0;
- int resample_sample_fmt = 0;
-
- bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt);
- TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt);
-
- nb_samples = audio_in_size / (bytes_per_sample * avctx->channels);
- TRACE("nb_samples %d\n", nb_samples);
+ INFO("close avcontext of %d\n", ctx_id);
+ avcodec_close(avctx);
- in_frame = avcodec_alloc_frame();
- if (!in_frame) {
- ERR("encode_audio. failed to allocate in_frame\n");
- } else {
- // prepare audio_in frame
- ret = fill_audio_into_frame(avctx, in_frame, audio_in, audio_in_size, nb_samples, audio_in_sample_fmt);
- if (ret < 0) {
- ERR("failed to fill audio into frame\n");
- } else {
- resample_sample_fmt =
- convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 1);
- resample_frame = resample_audio(avctx, in_frame, audio_in_size,
- audio_in_sample_fmt, NULL, &resample_buf_size,
- resample_sample_fmt);
-
- if (resample_frame) {
- ret = avcodec_encode_audio2(avctx, &avpkt, (const AVFrame *)resample_frame, &got_pkt);
- TRACE("encode audio. ret %d got_pkt %d avpkt.size %d frame_number %d\n",
- ret, got_pkt, avpkt.size, avctx->frame_number);
- }
- }
- }
+ if (CONTEXT(s, ctx_id)->is_hwaccel) {
+ CodecPlugin *plugin = CONTEXT(s, ctx_id)->state->hwaccel_plugin;
+ plugin->cleanup(get_plugin_context(avctx));
}
- tempbuf_size = sizeof(ret);
- if (ret < 0) {
- ERR("failed to encode audio. ctx_id %d ret %d\n", ctx_id, ret);
- } else {
- tempbuf_size += (sizeof(avpkt.size) + avpkt.size);
+ if (avctx->extradata) {
+ TRACE("free context extradata\n");
+ av_free(avctx->extradata);
}
- TRACE("encode_audio. writequeue elem buffer size %d\n", tempbuf_size);
-
- // write encoded audio data
- tempbuf = g_malloc0(tempbuf_size);
- if (!tempbuf) {
- ERR("encode audio. failed to allocate encoded out buffer.\n");
- } else {
- memcpy(tempbuf, &ret, sizeof(ret));
- size = sizeof(ret);
- if (ret == 0) {
- memcpy(tempbuf + size, &avpkt.size, sizeof(avpkt.size));
- size += sizeof(avpkt.size);
- if (got_pkt) {
- memcpy(tempbuf + size, avpkt.data, avpkt.size);
- av_free_packet(&avpkt);
- }
- }
+ if (frame) {
+ TRACE("free frame\n");
+ avcodec_free_frame(&frame);
}
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
-
- if (in_frame) {
- av_free(in_frame);
+ if (avctx) {
+ TRACE("free codec context\n");
+ av_free(avctx);
}
- if (resample_frame) {
- av_free(resample_frame->data[0]);
- av_free(resample_frame);
+ if (parserctx) {
+ INFO("close parser context\n");
+ av_parser_close(parserctx);
}
- TRACE("[%s] leave:\n", __func__);
+ memset(CONTEXT(s, ctx_id), 0x00, sizeof(CodecContext));
+
+ brillcodec_push_writequeue(s, NULL, 0, ctx_id, NULL);
+
+ TRACE("leave: %s\n", __func__);
return true;
}
-static AVCodecParserContext *maru_brill_codec_parser_init(AVCodecContext *avctx)
+inline void set_plugin_context(AVCodecContext *avctx, void *plugin_context)
{
- AVCodecParserContext *parser = NULL;
-
- if (!avctx) {
- ERR("context is NULL\n");
- return NULL;
- }
-
- switch (avctx->codec_id) {
- case CODEC_ID_MPEG4:
- case CODEC_ID_VC1:
- TRACE("not using parser\n");
- break;
- case CODEC_ID_H264:
- if (avctx->extradata_size == 0) {
- TRACE("H.264 with no extradata, creating parser.\n");
- parser = av_parser_init (avctx->codec_id);
- }
- break;
- default:
- parser = av_parser_init(avctx->codec_id);
- if (parser) {
- INFO("using parser: %s\n", avctx->codec->name);
- }
- break;
- }
+ ((CodecContext *)avctx->opaque)->plugin_context = plugin_context;
+}
- return parser;
+inline void *get_plugin_context(AVCodecContext *avctx)
+{
+ return ((CodecContext *)avctx->opaque)->plugin_context;
}
--- /dev/null
+/*
+ * DXVA2 Module for Decoder Device
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Contact:
+ * GunSoo Kim <gunsoo83.kim@samsung.com>
+ * SangHo Park <sangho1206.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x600
+#else
+# if _WIN32_WINNT < 0x600
+/* dxva2 needs Vista support */
+# undef _WIN32_WINNT
+# define _WIN32_WINNT 0x600
+# endif
+#endif
+
+#define DXVA2API_USE_BITFIELDS
+#define COBJMACROS
+
+#ifndef INITGUID
+#define INITGUID
+#endif
+
+#include "glib.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/dxva2.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+
+#include <assert.h>
+#include <windows.h>
+#include <windowsx.h>
+#include <ole2.h>
+#include <commctrl.h>
+#include <shlwapi.h>
+#include <d3d9.h>
+#include <dxva2api.h>
+
+#include <initguid.h> /* must be last included to not redefine existing GUIDs */
+
+/* dxva2api.h GUIDs: http://msdn.microsoft.com/en-us/library/windows/desktop/ms697067(v=vs100).aspx
+ * assume that they are declared in dxva2api.h */
+#define MS_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8)
+
+#ifdef __MINGW32__
+# include <_mingw.h>
+
+# if !defined(__MINGW64_VERSION_MAJOR)
+# undef MS_GUID
+# define MS_GUID DEFINE_GUID /* dxva2api.h fails to declare those, redefine as static */
+# define DXVA2_E_NEW_VIDEO_DEVICE MAKE_HRESULT(1, 4, 4097)
+# else
+# include <dxva.h>
+# endif
+
+#endif /* __MINGW32__ */
+
+#include "maru_brillcodec_plugin.h"
+#include "debug_ch.h"
+
+/* define debug channel */
+MULTI_DEBUG_CHANNEL(qemu, dxva2_plugin);
+
+
+MS_GUID(IID_IDirectXVideoDecoderService, 0xfc51a551, 0xd5e7, 0x11d9, 0xaf,0x55,0x00,0x05,0x4e,0x43,0xff,0x02);
+MS_GUID(IID_IDirectXVideoAccelerationService, 0xfc51a550, 0xd5e7, 0x11d9, 0xaf,0x55,0x00,0x05,0x4e,0x43,0xff,0x02);
+
+MS_GUID (DXVA_NoEncrypt, 0x1b81bed0, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+
+/* Codec capabilities GUID, sorted by codec */
+MS_GUID (DXVA2_ModeMPEG2_MoComp, 0xe6a9f44b, 0x61b0, 0x4563, 0x9e, 0xa4, 0x63, 0xd2, 0xa3, 0xc6, 0xfe, 0x66);
+MS_GUID (DXVA2_ModeMPEG2_IDCT, 0xbf22ad00, 0x03ea, 0x4690, 0x80, 0x77, 0x47, 0x33, 0x46, 0x20, 0x9b, 0x7e);
+MS_GUID (DXVA2_ModeMPEG2_VLD, 0xee27417f, 0x5e28, 0x4e65, 0xbe, 0xea, 0x1d, 0x26, 0xb5, 0x08, 0xad, 0xc9);
+DEFINE_GUID(DXVA2_ModeMPEG2and1_VLD, 0x86695f12, 0x340e, 0x4f04, 0x9f, 0xd3, 0x92, 0x53, 0xdd, 0x32, 0x74, 0x60);
+DEFINE_GUID(DXVA2_ModeMPEG1_VLD, 0x6f3ec719, 0x3735, 0x42cc, 0x80, 0x63, 0x65, 0xcc, 0x3c, 0xb3, 0x66, 0x16);
+
+MS_GUID (DXVA2_ModeH264_A, 0x1b81be64, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeH264_B, 0x1b81be65, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeH264_C, 0x1b81be66, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeH264_D, 0x1b81be67, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeH264_E, 0x1b81be68, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeH264_F, 0x1b81be69, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+DEFINE_GUID(DXVA_ModeH264_VLD_Multiview, 0x9901CCD3, 0xca12, 0x4b7e, 0x86, 0x7a, 0xe2, 0x22, 0x3d, 0x92, 0x55, 0xc3); // MVC
+DEFINE_GUID(DXVA_ModeH264_VLD_WithFMOASO_NoFGT, 0xd5f04ff9, 0x3418, 0x45d8, 0x95, 0x61, 0x32, 0xa7, 0x6a, 0xae, 0x2d, 0xdd);
+DEFINE_GUID(DXVADDI_Intel_ModeH264_A, 0x604F8E64, 0x4951, 0x4c54, 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6);
+DEFINE_GUID(DXVADDI_Intel_ModeH264_C, 0x604F8E66, 0x4951, 0x4c54, 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6);
+DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951, 0x4c54, 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6); // DXVA_Intel_H264_NoFGT_ClearVideo
+DEFINE_GUID(DXVA_ModeH264_VLD_NoFGT_Flash, 0x4245F676, 0x2BBC, 0x4166, 0xa0, 0xBB, 0x54, 0xE7, 0xB8, 0x49, 0xC3, 0x80);
+
+MS_GUID (DXVA2_ModeWMV8_A, 0x1b81be80, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeWMV8_B, 0x1b81be81, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+
+MS_GUID (DXVA2_ModeWMV9_A, 0x1b81be90, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeWMV9_B, 0x1b81be91, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeWMV9_C, 0x1b81be94, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+
+MS_GUID (DXVA2_ModeVC1_A, 0x1b81beA0, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeVC1_B, 0x1b81beA1, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeVC1_C, 0x1b81beA2, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+MS_GUID (DXVA2_ModeVC1_D, 0x1b81beA3, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5);
+DEFINE_GUID(DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7, 0x11d3, 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5); // August 2010 update
+DEFINE_GUID(DXVA_Intel_VC1_ClearVideo, 0xBCC5DB6D, 0xA2B6, 0x4AF0, 0xAC, 0xE4, 0xAD, 0xB1, 0xF7, 0x87, 0xBC, 0x89);
+DEFINE_GUID(DXVA_Intel_VC1_ClearVideo_2, 0xE07EC519, 0xE651, 0x4CD6, 0xAC, 0x84, 0x13, 0x70, 0xCC, 0xEE, 0xC8, 0x51);
+
+DEFINE_GUID(DXVA_nVidia_MPEG4_ASP, 0x9947EC6F, 0x689B, 0x11DC, 0xA3, 0x20, 0x00, 0x19, 0xDB, 0xBC, 0x41, 0x84);
+DEFINE_GUID(DXVA_ModeMPEG4pt2_VLD_Simple, 0xefd64d74, 0xc9e8, 0x41d7, 0xa5, 0xe9, 0xe9, 0xb0, 0xe3, 0x9f, 0xa3, 0x19);
+DEFINE_GUID(DXVA_ModeMPEG4pt2_VLD_AdvSimple_NoGMC, 0xed418a9f, 0x010d, 0x4eda, 0x9a, 0xe3, 0x9a, 0x65, 0x35, 0x8d, 0x8d, 0x2e);
+DEFINE_GUID(DXVA_ModeMPEG4pt2_VLD_AdvSimple_GMC, 0xab998b5b, 0x4258, 0x44a9, 0x9f, 0xeb, 0x94, 0xe5, 0x97, 0xa6, 0xba, 0xae);
+DEFINE_GUID(DXVA_ModeMPEG4pt2_VLD_AdvSimple_Avivo, 0x7C74ADC6, 0xe2ba, 0x4ade, 0x86, 0xde, 0x30, 0xbe, 0xab, 0xb4, 0x0c, 0xc1);
+
+/* */
+typedef struct {
+ const char *name;
+ const GUID *guid;
+ int codec;
+} dxva2_mode_t;
+/* XXX Prefered modes must come first */
+static const dxva2_mode_t dxva2_modes[] = {
+ /* MPEG-1/2 */
+ { "MPEG-2 variable-length decoder", &DXVA2_ModeMPEG2_VLD, AV_CODEC_ID_MPEG2VIDEO },
+ { "MPEG-2 & MPEG-1 variable-length decoder", &DXVA2_ModeMPEG2and1_VLD, AV_CODEC_ID_MPEG2VIDEO },
+ { "MPEG-2 motion compensation", &DXVA2_ModeMPEG2_MoComp, 0 },
+ { "MPEG-2 inverse discrete cosine transform", &DXVA2_ModeMPEG2_IDCT, 0 },
+
+ { "MPEG-1 variable-length decoder", &DXVA2_ModeMPEG1_VLD, 0 },
+
+ /* H.264 */
+ { "H.264 variable-length decoder, film grain technology", &DXVA2_ModeH264_F, AV_CODEC_ID_H264 },
+ { "H.264 variable-length decoder, no film grain technology (Intel ClearVideo)", &DXVADDI_Intel_ModeH264_E, AV_CODEC_ID_H264 },
+ { "H.264 variable-length decoder, no film grain technology", &DXVA2_ModeH264_E, AV_CODEC_ID_H264 },
+ { "H.264 variable-length decoder, no film grain technology, FMO/ASO", &DXVA_ModeH264_VLD_WithFMOASO_NoFGT, AV_CODEC_ID_H264 },
+ { "H.264 variable-length decoder, no film grain technology, Flash", &DXVA_ModeH264_VLD_NoFGT_Flash, AV_CODEC_ID_H264 },
+
+ { "H.264 inverse discrete cosine transform, film grain technology", &DXVA2_ModeH264_D, 0 },
+ { "H.264 inverse discrete cosine transform, no film grain technology", &DXVA2_ModeH264_C, 0 },
+ { "H.264 inverse discrete cosine transform, no film grain technology (Intel)", &DXVADDI_Intel_ModeH264_C, 0 },
+
+ { "H.264 motion compensation, film grain technology", &DXVA2_ModeH264_B, 0 },
+ { "H.264 motion compensation, no film grain technology", &DXVA2_ModeH264_A, 0 },
+ { "H.264 motion compensation, no film grain technology (Intel)", &DXVADDI_Intel_ModeH264_A, 0 },
+
+ /* WMV */
+ { "Windows Media Video 8 motion compensation", &DXVA2_ModeWMV8_B, 0 },
+ { "Windows Media Video 8 post processing", &DXVA2_ModeWMV8_A, 0 },
+
+ { "Windows Media Video 9 IDCT", &DXVA2_ModeWMV9_C, 0 },
+ { "Windows Media Video 9 motion compensation", &DXVA2_ModeWMV9_B, 0 },
+ { "Windows Media Video 9 post processing", &DXVA2_ModeWMV9_A, 0 },
+
+ /* VC-1 */
+ { "VC-1 variable-length decoder", &DXVA2_ModeVC1_D, AV_CODEC_ID_VC1 },
+ { "VC-1 variable-length decoder", &DXVA2_ModeVC1_D, AV_CODEC_ID_WMV3 },
+ { "VC-1 variable-length decoder", &DXVA2_ModeVC1_D2010, AV_CODEC_ID_VC1 },
+ { "VC-1 variable-length decoder", &DXVA2_ModeVC1_D2010, AV_CODEC_ID_WMV3 },
+ { "VC-1 variable-length decoder 2 (Intel)", &DXVA_Intel_VC1_ClearVideo_2, 0 },
+ { "VC-1 variable-length decoder (Intel)", &DXVA_Intel_VC1_ClearVideo, 0 },
+
+ { "VC-1 inverse discrete cosine transform", &DXVA2_ModeVC1_C, 0 },
+ { "VC-1 motion compensation", &DXVA2_ModeVC1_B, 0 },
+ { "VC-1 post processing", &DXVA2_ModeVC1_A, 0 },
+
+ /* Xvid/Divx: TODO */
+ { "MPEG-4 Part 2 nVidia bitstream decoder", &DXVA_nVidia_MPEG4_ASP, 0 },
+ { "MPEG-4 Part 2 variable-length decoder, Simple Profile", &DXVA_ModeMPEG4pt2_VLD_Simple, 0 },
+ { "MPEG-4 Part 2 variable-length decoder, Simple&Advanced Profile, no GMC", &DXVA_ModeMPEG4pt2_VLD_AdvSimple_NoGMC, 0 },
+ { "MPEG-4 Part 2 variable-length decoder, Simple&Advanced Profile, GMC", &DXVA_ModeMPEG4pt2_VLD_AdvSimple_GMC, 0 },
+ { "MPEG-4 Part 2 variable-length decoder, Simple&Advanced Profile, Avivo", &DXVA_ModeMPEG4pt2_VLD_AdvSimple_Avivo, 0 },
+
+ { NULL, NULL, 0 }
+};
+
+/* */
+typedef struct {
+ const char *name;
+ D3DFORMAT format;
+} d3d_format_t;
+/* XXX Prefered format must come first */
+static const d3d_format_t d3d_formats[] = {
+ { "YV12", MAKEFOURCC('Y','V','1','2')},
+ { "NV12", MAKEFOURCC('N','V','1','2')},
+ { "IMC3", MAKEFOURCC('I','M','C','3')},
+ { NULL, 0 }
+};
+
+struct DXVAPluginSurface;
+typedef struct DXVAPluginSurface DXVAPluginSurface;
+
+#define VA_DXVA2_MAX_SURFACE_COUNT (64)
+typedef struct DXVAPluginContext
+{
+ /* Video service */
+ GUID guid_decdev;
+ D3DFORMAT render_fmt;
+
+ /* Video decoder */
+ DXVA2_ConfigPictureDecode cfg;
+ IDirectXVideoDecoder *decoder;
+
+ /* Option conversion */
+ D3DFORMAT output;
+
+ /* Surfaces */
+ unsigned surface_count;
+ int surface_width;
+ int surface_height;
+ DXVAPluginSurface *surface;
+ LPDIRECT3DSURFACE9 hw_surface[VA_DXVA2_MAX_SURFACE_COUNT];
+
+ int thread_count;
+
+ struct dxva_context *hw_context;
+} DXVAPluginContext;
+
+struct DXVAPluginSurface {
+ LPDIRECT3DSURFACE9 d3d;
+ bool is_occupied;
+ DXVAPluginContext *dxva_ctx;
+};
+
+typedef struct DXVADeviceHandle
+{
+ /* DLL */
+ HINSTANCE hd3d9_dll;
+ HINSTANCE hdxva2_dll;
+
+ /* Direct3D */
+ LPDIRECT3D9 d3dobj;
+ LPDIRECT3DDEVICE9 d3ddev;
+
+ /* Device manager */
+ IDirect3DDeviceManager9 *devmng;
+ HANDLE hd3ddev;
+
+ /* Video service */
+ IDirectXVideoDecoderService *vs;
+} DXVADeviceHandle;
+static DXVADeviceHandle *dxva_dev = &(DXVADeviceHandle) {};
+
+static const dxva2_mode_t *Dxva2FindMode(const GUID *guid)
+{
+ unsigned i = 0;
+
+ for (i = 0; dxva2_modes[i].name; i++) {
+ if (IsEqualGUID(dxva2_modes[i].guid, guid))
+ return &dxva2_modes[i];
+ }
+ return NULL;
+}
+
+static const d3d_format_t *D3dFindFormat(D3DFORMAT format)
+{
+ unsigned i = 0;
+
+ for (i = 0; d3d_formats[i].name; i++) {
+ if (d3d_formats[i].format == format)
+ return &d3d_formats[i];
+ }
+ return NULL;
+}
+
+/**
+ * It creates a Direct3D device usable for DXVA 2
+ */
+static int D3dCreateDevice(void)
+{
+ /* */
+ LPDIRECT3D9 (WINAPI *Create9)(UINT SDKVersion);
+ Create9 = (void *)GetProcAddress(dxva_dev->hd3d9_dll, "Direct3DCreate9");
+ if (!Create9) {
+ ERR("Cannot locate reference to Direct3DCreate9 ABI in DLL\n");
+ return -1;
+ }
+
+ /* */
+ LPDIRECT3D9 d3dobj;
+ d3dobj = Create9(D3D_SDK_VERSION);
+ if (!d3dobj) {
+ ERR("Direct3DCreate9 failed\n");
+ return -1;
+ }
+ dxva_dev->d3dobj = d3dobj;
+
+ /* */
+ D3DPRESENT_PARAMETERS d3dpp;
+ ZeroMemory(&d3dpp, sizeof(d3dpp));
+ d3dpp.Flags = D3DPRESENTFLAG_VIDEO;
+ d3dpp.Windowed = TRUE;
+ d3dpp.hDeviceWindow = NULL;
+ d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ d3dpp.MultiSampleType = D3DMULTISAMPLE_NONE;
+ d3dpp.PresentationInterval = D3DPRESENT_INTERVAL_DEFAULT;
+ d3dpp.BackBufferCount = 0; /* FIXME what to put here */
+ d3dpp.BackBufferFormat = D3DFMT_X8R8G8B8; /* FIXME what to put here */
+ d3dpp.BackBufferWidth = 0;
+ d3dpp.BackBufferHeight = 0;
+ d3dpp.EnableAutoDepthStencil = FALSE;
+
+ /* Direct3D needs a HWND to create a device, even without using ::Present
+ this HWND is used to alert Direct3D when there's a change of focus window.
+ For now, use GetDesktopWindow, as it looks harmless */
+ LPDIRECT3DDEVICE9 d3ddev;
+ if (FAILED(IDirect3D9_CreateDevice(d3dobj, D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL, GetDesktopWindow(),
+ D3DCREATE_SOFTWARE_VERTEXPROCESSING |
+ D3DCREATE_MULTITHREADED,
+ &d3dpp, &d3ddev))) {
+ ERR("IDirect3D9_CreateDevice failed\n");
+ return -1;
+ }
+ dxva_dev->d3ddev = d3ddev;
+
+ return 0;
+}
+
+/**
+ * It creates a Direct3D device manager
+ */
+static int D3dCreateDeviceManager(void)
+{
+ HRESULT (WINAPI *CreateDeviceManager9)(UINT *pResetToken,
+ IDirect3DDeviceManager9 **);
+ CreateDeviceManager9 =
+ (void *)GetProcAddress(dxva_dev->hdxva2_dll,
+ "DXVA2CreateDirect3DDeviceManager9");
+
+ if (!CreateDeviceManager9) {
+ ERR("cannot load function\n");
+ return -1;
+ }
+
+ UINT token;
+ IDirect3DDeviceManager9 *devmng;
+ if (FAILED(CreateDeviceManager9(&token, &devmng))) {
+ ERR("OurDirect3DCreateDeviceManager9 failed\n");
+ return -1;
+ }
+ TRACE("OurDirect3DCreateDeviceManager9 Success!\n");
+
+ dxva_dev->devmng = devmng;
+ TRACE("obtained IDirect3DDeviceManager9\n");
+
+ HRESULT hr = IDirect3DDeviceManager9_ResetDevice(devmng, dxva_dev->d3ddev, token);
+ if (FAILED(hr)) {
+ ERR("IDirect3DDeviceManager9_ResetDevice failed: %08x\n", (unsigned)hr);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * It creates a DirectX video service
+ */
+static int DxCreateVideoService(void)
+{
+ HRESULT (WINAPI *CreateVideoService)(IDirect3DDevice9 *,
+ REFIID riid,
+ void **ppService);
+ CreateVideoService =
+ (void *)GetProcAddress(dxva_dev->hdxva2_dll, "DXVA2CreateVideoService");
+
+ if (!CreateVideoService) {
+ ERR("cannot load function\n");
+ return -1;
+ }
+ TRACE("DXVA2CreateVideoService Success!\n");
+
+ HRESULT hr;
+
+ HANDLE hd3ddev;
+ hr = IDirect3DDeviceManager9_OpenDeviceHandle(dxva_dev->devmng, &hd3ddev);
+ if (FAILED(hr)) {
+ ERR("OpenDeviceHandle failed\n");
+ return -1;
+ }
+ dxva_dev->hd3ddev = hd3ddev;
+
+ void *pv;
+ hr = IDirect3DDeviceManager9_GetVideoService(dxva_dev->devmng, hd3ddev,
+ &IID_IDirectXVideoDecoderService, &pv);
+ if (FAILED(hr)) {
+ ERR("GetVideoService failed\n");
+ return -1;
+ }
+ dxva_dev->vs = pv;
+
+ return 0;
+}
+
+/**
+ * Find the best suited decoder mode GUID and render format.
+ */
+static int DxFindVideoServiceConversion(DXVAPluginContext *dxva_ctx, int codec_id)
+{
+ unsigned i = 0;
+ GUID *guid_decdev = &dxva_ctx->guid_decdev;
+ D3DFORMAT *render_fmt = &dxva_ctx->render_fmt;
+
+ /* Retreive supported modes from the decoder service */
+ UINT decdev_count = 0;
+ GUID *guid_decdev_list = NULL;
+ if (FAILED(IDirectXVideoDecoderService_GetDecoderDeviceGuids(dxva_dev->vs,
+ &decdev_count,
+ &guid_decdev_list))) {
+ ERR("IDirectXVideoDecoderService_GetDecoderDeviceGuids failed\n");
+ return -1;
+ }
+
+ INFO("IDirectXVideoDecoderService_GetDecoderDeviceGuids success. count=%d\n", decdev_count);
+
+ for (i = 0; i < decdev_count; i++) {
+ const GUID *g = &guid_decdev_list[i];
+ const dxva2_mode_t *mode = Dxva2FindMode(g);
+ if (mode) {
+ INFO("- '%s' is supported by hardware\n", mode->name);
+ } else {
+ WARN("- Unknown GUID = %08X-%04x-%04x-XXXX\n",
+ (unsigned)g->Data1, g->Data2, g->Data3);
+ }
+ }
+
+ /* Try all supported mode by our priority */
+ for (i = 0; dxva2_modes[i].name; i++) {
+ const dxva2_mode_t *mode = &dxva2_modes[i];
+ unsigned j = 0;
+ if (!mode->codec || mode->codec != codec_id)
+ continue;
+
+ /* */
+ bool is_supported = false;
+ const GUID *g = &guid_decdev_list[0];
+ for (; !is_supported && g < &guid_decdev_list[decdev_count]; g++) {
+ is_supported = IsEqualGUID(mode->guid, g);
+ }
+ if (!is_supported)
+ continue;
+
+ /* */
+ INFO("Trying to use '%s' as input\n", mode->name);
+ UINT render_fmt_count = 0;
+ D3DFORMAT *render_fmt_list = NULL;
+ if (FAILED(IDirectXVideoDecoderService_GetDecoderRenderTargets(dxva_dev->vs, mode->guid,
+ &render_fmt_count,
+ &render_fmt_list))) {
+ ERR("IDirectXVideoDecoderService_GetDecoderRenderTargets failed\n");
+ continue;
+ }
+ for (j = 0; j < render_fmt_count; j++) {
+ const D3DFORMAT f = render_fmt_list[j];
+ const d3d_format_t *format = D3dFindFormat(f);
+
+ INFO("HOST supported format %d (%4.4s)\n", f, (const char*)&f);
+ if (format) {
+ INFO("%s is supported for output\n", format->name);
+ } else {
+ INFO("%d is supported for output (%4.4s)\n", f, (const char*)&f);
+ }
+ }
+
+ /* */
+ for (j = 0; d3d_formats[j].name; j++) {
+ const d3d_format_t *format = &d3d_formats[j];
+ unsigned k = 0;
+
+ /* */
+ bool is_supported = false;
+ for (k = 0; !is_supported && k < render_fmt_count; k++) {
+ is_supported = format->format == render_fmt_list[k];
+ }
+ if (!is_supported)
+ continue;
+
+ /* We have our solution */
+ INFO("Using '%s' to decode to '%s'\n", mode->name, format->name);
+ *guid_decdev = *mode->guid;
+ *render_fmt = format->format;
+ CoTaskMemFree(render_fmt_list);
+ CoTaskMemFree(guid_decdev_list);
+ return 0;
+ }
+ CoTaskMemFree(render_fmt_list);
+ }
+ CoTaskMemFree(guid_decdev_list);
+ return -1;
+}
+
+static void DxDestroyVideoDecoder(DXVAPluginContext *dxva_ctx)
+{
+ unsigned i = 0;
+
+ if (dxva_ctx->decoder) {
+ IDirectXVideoDecoder_Release(dxva_ctx->decoder);
+ }
+ dxva_ctx->decoder = NULL;
+
+ for (i = 0; i < dxva_ctx->surface_count; i++) {
+ IDirect3DSurface9_Release(dxva_ctx->surface[i].d3d);
+ }
+ dxva_ctx->surface_count = 0;
+}
+
+/**
+ * It creates a DXVA2 decoder using the given video format
+ */
+static int DxCreateVideoDecoder(DXVAPluginContext *dxva_ctx, AVCodecContext *dec_ctx)
+{
+ int surface_count = 0;
+ unsigned i = 0;
+ int width = 0;
+ int height = 0;
+ int codec_id = 0;
+
+ width = dec_ctx->width;
+ height = dec_ctx->height;
+ codec_id = dec_ctx->codec_id;
+
+
+ TRACE("DxCreateVideoDecoder id %d %dx%d\n",
+ codec_id, width, height);
+
+ /* Allocates all surfaces needed for the decoder */
+ dxva_ctx->surface_width = (width + 15) & ~15;
+ dxva_ctx->surface_height = (height + 15) & ~15;
+ switch (codec_id) {
+ case AV_CODEC_ID_H264:
+ surface_count = 16 + dxva_ctx->thread_count + 2;
+ break;
+ case AV_CODEC_ID_MPEG1VIDEO:
+ case AV_CODEC_ID_MPEG2VIDEO:
+ surface_count = 2 + 2;
+ break;
+ default:
+ surface_count = 2 + 1;
+ break;
+ }
+ if (surface_count > VA_DXVA2_MAX_SURFACE_COUNT)
+ return -1;
+ dxva_ctx->surface_count = surface_count;
+ if (FAILED(IDirectXVideoDecoderService_CreateSurface(dxva_dev->vs,
+ dxva_ctx->surface_width,
+ dxva_ctx->surface_height,
+ dxva_ctx->surface_count - 1,
+ dxva_ctx->render_fmt,
+ D3DPOOL_DEFAULT,
+ 0,
+ DXVA2_VideoDecoderRenderTarget,
+ dxva_ctx->hw_surface,
+ NULL))) {
+ ERR("IDirectXVideoAccelerationService_CreateSurface failed\n");
+ dxva_ctx->surface_count = 0;
+ return -1;
+ }
+
+ dxva_ctx->surface = g_malloc_n(dxva_ctx->surface_count, sizeof(DXVAPluginSurface));
+ for (i = 0; i < dxva_ctx->surface_count; i++) {
+ dxva_ctx->surface[i].d3d = dxva_ctx->hw_surface[i];
+ dxva_ctx->surface[i].is_occupied = false;
+ dxva_ctx->surface[i].dxva_ctx = dxva_ctx;
+ }
+ TRACE("IDirectXVideoAccelerationService_CreateSurface succeed with %d surfaces (%dx%d)\n",
+ dxva_ctx->surface_count, width, height);
+
+ /* */
+ DXVA2_VideoDesc dsc;
+ ZeroMemory(&dsc, sizeof(dsc));
+ dsc.SampleWidth = width;
+ dsc.SampleHeight = height;
+ dsc.Format = dxva_ctx->render_fmt;
+ dsc.InputSampleFreq.Numerator = 0;
+ dsc.InputSampleFreq.Denominator = 0;
+ dsc.OutputFrameFreq = dsc.InputSampleFreq;
+ dsc.UABProtectionLevel = FALSE;
+ dsc.Reserved = 0;
+
+ /* FIXME I am unsure we can let unknown everywhere */
+ DXVA2_ExtendedFormat *ext = &dsc.SampleFormat;
+ ext->SampleFormat = 0;//DXVA2_SampleUnknown;
+ ext->VideoChromaSubsampling = 0;//DXVA2_VideoChromaSubsampling_Unknown;
+ ext->NominalRange = 0;//DXVA2_NominalRange_Unknown;
+ ext->VideoTransferMatrix = 0;//DXVA2_VideoTransferMatrix_Unknown;
+ ext->VideoLighting = 0;//DXVA2_VideoLighting_Unknown;
+ ext->VideoPrimaries = 0;//DXVA2_VideoPrimaries_Unknown;
+ ext->VideoTransferFunction = 0;//DXVA2_VideoTransFunc_Unknown;
+
+ /* List all configurations available for the decoder */
+ UINT cfg_count = 0;
+ DXVA2_ConfigPictureDecode *cfg_list = NULL;
+ if (FAILED(IDirectXVideoDecoderService_GetDecoderConfigurations(dxva_dev->vs,
+ &dxva_ctx->guid_decdev,
+ &dsc,
+ NULL,
+ &cfg_count,
+ &cfg_list))) {
+ ERR("IDirectXVideoDecoderService_GetDecoderConfigurations failed\n");
+ return -1;
+ }
+ TRACE("we got %d decoder configurations\n", cfg_count);
+
+ /* Select the best decoder configuration */
+ int cfg_score = 0;
+ for (i = 0; i < cfg_count; i++) {
+ const DXVA2_ConfigPictureDecode *cfg = &cfg_list[i];
+
+ /* */
+ TRACE("configuration[%d] ConfigBitstreamRaw %d\n",
+ i, cfg->ConfigBitstreamRaw);
+
+ /* */
+ int score;
+ if (cfg->ConfigBitstreamRaw == 1)
+ score = 1;
+ else if (codec_id == AV_CODEC_ID_H264 && cfg->ConfigBitstreamRaw == 2)
+ score = 2;
+ else
+ continue;
+ if (IsEqualGUID(&cfg->guidConfigBitstreamEncryption, &DXVA_NoEncrypt))
+ score += 16;
+
+ if (cfg_score < score) {
+ dxva_ctx->cfg = *cfg;
+ cfg_score = score;
+ }
+ }
+ CoTaskMemFree(cfg_list);
+ if (cfg_score <= 0) {
+ ERR("Failed to find a supported decoder configuration\n");
+ return -1;
+ }
+
+ /* Create the decoder */
+ IDirectXVideoDecoder *decoder;
+ if (FAILED(IDirectXVideoDecoderService_CreateVideoDecoder(dxva_dev->vs,
+ &dxva_ctx->guid_decdev,
+ &dsc,
+ &dxva_ctx->cfg,
+ dxva_ctx->hw_surface,
+ dxva_ctx->surface_count,
+ &decoder))) {
+ ERR("IDirectXVideoDecoderService_CreateVideoDecoder failed\n");
+ return -1;
+ }
+ dxva_ctx->decoder = decoder;
+ TRACE("IDirectXVideoDecoderService_CreateVideoDecoder succeed\n");
+ return 0;
+}
+
+static void DxCreateVideoConversion(DXVAPluginContext *dxva_ctx)
+{
+ unsigned int output = dxva_ctx->render_fmt;
+
+ switch (output) {
+ case MAKEFOURCC('N','V','1','2'):
+ case MAKEFOURCC('I','M','C','3'):
+ dxva_ctx->output = MAKEFOURCC('Y','V','1','2');
+ break;
+ default:
+ dxva_ctx->output = dxva_ctx->render_fmt;
+ break;
+ }
+}
+
+/**
+ * It destroys a DirectX video service
+ */
+static void DxDestroyVideoService(void)
+{
+ if (dxva_dev->hd3ddev)
+ IDirect3DDeviceManager9_CloseDeviceHandle(dxva_dev->devmng, dxva_dev->hd3ddev);
+ if (dxva_dev->vs)
+ IDirectXVideoDecoderService_Release(dxva_dev->vs);
+}
+
+/**
+ * It destroys a Direct3D device manager
+ */
+static void D3dDestroyDeviceManager(void)
+{
+ if (dxva_dev->devmng)
+ IDirect3DDeviceManager9_Release(dxva_dev->devmng);
+}
+
+/**
+ * It releases a Direct3D device and its resources.
+ */
+static void D3dDestroyDevice(void)
+{
+ if (dxva_dev->d3ddev)
+ IDirect3DDevice9_Release(dxva_dev->d3ddev);
+ if (dxva_dev->d3dobj)
+ IDirect3D9_Release(dxva_dev->d3dobj);
+}
+
+#if 0
+static int DxResetVideoDecoder(void)
+{
+ ERR("DxResetVideoDecoder unimplemented\n");
+ return -1;
+}
+#endif
+
+static void split_planes(uint8_t *dstu, size_t dstu_pitch,
+ uint8_t *dstv, size_t dstv_pitch,
+ const uint8_t *src, size_t src_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned int y = 0;
+ unsigned int x = 0;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ dstu[x] = src[2*x+0];
+ dstv[x] = src[2*x+1];
+ }
+ src += src_pitch;
+ dstu += dstu_pitch;
+ dstv += dstv_pitch;
+ }
+}
+
+static void copy_plane(uint8_t *dst, size_t dst_pitch,
+ const uint8_t *src, size_t src_pitch,
+ unsigned width, unsigned height)
+{
+ unsigned int y;
+#ifndef TESTING1
+ for (y = 0; y < height; y++) {
+ memcpy(dst, src, width);
+ src += src_pitch;
+ dst += dst_pitch;
+ }
+#else
+ memcpy(dst, src, width * height);
+#endif
+}
+
+static void copy_yv12(uint8_t *dst[3], size_t dst_pitch[3],
+ uint8_t *src[3], size_t src_pitch[3],
+ unsigned width, unsigned height)
+{
+ copy_plane(dst[0], dst_pitch[0],
+ src[0], src_pitch[0], width, height);
+ copy_plane(dst[1], dst_pitch[1],
+ src[2], src_pitch[2], width / 2, height / 2);
+ copy_plane(dst[2], dst_pitch[2],
+ src[1], src_pitch[1], width / 2, height / 2);
+}
+
+static void copy_nv12(uint8_t *dst[3], int linesizes[4],
+ uint8_t *src[2], size_t src_pitch[2],
+ unsigned width, unsigned height)
+{
+ copy_plane(dst[0], linesizes[0],
+ src[0], src_pitch[0], width, height);
+ split_planes(dst[1], linesizes[1], dst[2], linesizes[2],
+ src[1], src_pitch[1], width / 2, height / 2);
+}
+
+
+static void maru_dxva2_close(void)
+{
+ DxDestroyVideoService();
+ D3dDestroyDeviceManager();
+ D3dDestroyDevice();
+
+ if (dxva_dev->hdxva2_dll) {
+ FreeLibrary(dxva_dev->hdxva2_dll);
+ dxva_dev->hdxva2_dll = NULL;
+ }
+ if (dxva_dev->hd3d9_dll) {
+ FreeLibrary(dxva_dev->hd3d9_dll);
+ dxva_dev->hd3d9_dll = NULL;
+ }
+}
+
+static bool probe(void)
+{
+ if (dxva_dev->hd3d9_dll == NULL || dxva_dev->hdxva2_dll == NULL) {
+ dxva_dev->hd3d9_dll = LoadLibrary(TEXT("D3D9.DLL"));
+ if (!dxva_dev->hd3d9_dll) {
+ ERR("cannot load d3d9.dll\n");
+ goto error;
+ }
+ dxva_dev->hdxva2_dll = LoadLibrary(TEXT("DXVA2.DLL"));
+ if (!dxva_dev->hdxva2_dll) {
+ ERR("cannot load dxva2.dll\n");
+ goto error;
+ }
+ TRACE("DLLs loaded\n");
+
+ if (D3dCreateDevice() < 0) {
+ ERR("Failed to create Direct3D device\n");
+ goto error;
+ }
+ TRACE("D3dCreateDevice succeed\n");
+
+ if (D3dCreateDeviceManager() < 0) {
+ ERR("D3dCreateDeviceManager failed\n");
+ goto error;
+ }
+
+ if (DxCreateVideoService() < 0) {
+ ERR("DxCreateVideoService failed\n");
+ goto error;
+ }
+ }
+
+ return true;
+
+error:
+ maru_dxva2_close();
+ return false;
+}
+
+static void cleanup(void *opaque)
+{
+ DXVAPluginContext *dxva_ctx = (DXVAPluginContext *)opaque;
+
+ DxDestroyVideoDecoder(dxva_ctx);
+
+ if (dxva_ctx->hw_context) {
+ g_free(dxva_ctx->hw_context);
+ }
+
+ g_free(dxva_ctx);
+
+}
+
+static void *dxva_setup(AVCodecContext *dec_ctx, int width, int height)
+{
+ DXVAPluginContext *dxva_ctx = g_malloc0(sizeof(DXVAPluginContext));
+
+ if (DxFindVideoServiceConversion(dxva_ctx, dec_ctx->codec_id)) {
+ ERR("DxFindVideoServiceConversion failed\n");
+ return NULL;
+ }
+
+ dxva_ctx->thread_count = dec_ctx->thread_count;
+
+ if (DxCreateVideoDecoder(dxva_ctx, dec_ctx) < -1) {
+ ERR("DxCreateVideoDecoder failed\n");
+ return NULL;
+ }
+
+ // prepare the libav hardware context
+ if (dxva_ctx->hw_context == NULL) {
+ dxva_ctx->hw_context = g_malloc0(sizeof(struct dxva_context));
+ }
+ dec_ctx->hwaccel_context = dxva_ctx->hw_context;
+
+ dxva_ctx->hw_context->decoder = dxva_ctx->decoder;
+ dxva_ctx->hw_context->cfg = &dxva_ctx->cfg;
+ dxva_ctx->hw_context->surface_count = dxva_ctx->surface_count;
+ dxva_ctx->hw_context->surface = dxva_ctx->hw_surface;
+
+ DxCreateVideoConversion(dxva_ctx);
+
+ return dxva_ctx;
+}
+
+static int dxva_get_surface(AVCodecContext *dec_ctx, AVFrame *frame)
+{
+ unsigned i = 0;
+ DXVAPluginSurface *surface = NULL;
+ DXVAPluginContext *dxva_ctx = (DXVAPluginContext *)get_plugin_context(dec_ctx);
+
+ /* Check the device */
+#if 0
+ HRESULT hr = IDirect3DDeviceManager9_TestDevice(dxva_dev->devmng, dxva_dev->hd3ddev);
+ if (hr == DXVA2_E_NEW_VIDEO_DEVICE) {
+ if (DxResetVideoDecoder())
+ return -1;
+ } else if (FAILED(hr)) {
+ ERR("IDirect3DDeviceManager9_TestDevice %u\n", (unsigned)hr);
+ return -1;
+ }
+#endif
+
+ for (i = 0; i < dxva_ctx->surface_count; ++i) {
+ surface = &dxva_ctx->surface[i];
+ if (surface->is_occupied) continue;
+
+ break;
+ }
+
+ surface->is_occupied = true;
+
+ frame->data[0] = (void *)surface->d3d;
+ frame->data[3] = frame->data[0];
+ frame->type = FF_BUFFER_TYPE_USER;
+ frame->opaque = surface;
+
+ return 0;
+}
+
+static void dxva_release_surface(AVCodecContext *dec_ctx, AVFrame *frame)
+{
+ DXVAPluginSurface *surface = NULL;
+ unsigned int i;
+
+ for (i = 0; i < 4; ++i) {
+ frame->data[i] = NULL;
+ }
+
+ surface = frame->opaque;
+ surface->is_occupied = false;
+}
+
+static void extract(void *dst, void *src)
+{
+ AVFrame *frame = (AVFrame *)src;
+ LPDIRECT3DSURFACE9 d3d = (LPDIRECT3DSURFACE9)(uintptr_t)frame->data[3];
+ DXVAPluginContext *dxva_ctx = (DXVAPluginContext *)((DXVAPluginSurface *)frame->opaque)->dxva_ctx;
+
+ /* */
+ assert(dxva_ctx->output == MAKEFOURCC('Y','V','1','2'));
+
+ /* */
+ D3DLOCKED_RECT lock;
+ if (FAILED(IDirect3DSurface9_LockRect(d3d, &lock, NULL, D3DLOCK_READONLY))) {
+ ERR("Failed to lock surface\n");
+ return;
+ }
+
+ if (dxva_ctx->render_fmt == MAKEFOURCC('Y','V','1','2') ||
+ dxva_ctx->render_fmt == MAKEFOURCC('I','M','C','3')) {
+ bool imc3 = dxva_ctx->render_fmt == MAKEFOURCC('I','M','C','3');
+ size_t chroma_pitch = imc3 ? lock.Pitch : (lock.Pitch / 2);
+
+ size_t pitch[3] = {
+ lock.Pitch,
+ chroma_pitch,
+ chroma_pitch,
+ };
+
+ uint8_t *plane[3] = {
+ (uint8_t*)lock.pBits,
+ (uint8_t*)lock.pBits + pitch[0] * dxva_ctx->surface_height,
+ (uint8_t*)lock.pBits + pitch[0] * dxva_ctx->surface_height
+ + pitch[1] * dxva_ctx->surface_height / 2,
+ };
+
+ if (imc3) {
+ uint8_t *V = plane[1];
+ plane[1] = plane[2];
+ plane[2] = V;
+ }
+
+ uint8_t *data[4];
+ av_image_fill_pointers(data, PIX_FMT_YUV420P, frame->height, dst, (const int *)pitch);
+ copy_yv12(data, pitch, plane, pitch, frame->width, frame->height);
+ } else if (dxva_ctx->render_fmt == MAKEFOURCC('N','V','1','2')) {
+ uint8_t *plane[2] = {
+ lock.pBits,
+ (uint8_t*)lock.pBits + lock.Pitch * dxva_ctx->surface_height
+ };
+ size_t pitch[2] = {
+ lock.Pitch,
+ lock.Pitch,
+ };
+
+ uint8_t *data[4];
+ int linesizes[4];
+ av_image_fill_linesizes(linesizes, AV_PIX_FMT_YUV420P, frame->width);
+ av_image_fill_pointers(data, PIX_FMT_YUV420P, frame->height, dst, linesizes);
+ copy_nv12(data, linesizes, plane, pitch, frame->width, frame->height);
+ } else {
+ ERR("Not supported format.(%x)\n", dxva_ctx->render_fmt);
+ IDirect3DSurface9_UnlockRect(d3d);
+ return;
+ }
+
+ /* */
+ IDirect3DSurface9_UnlockRect(d3d);
+}
+
+CodecPlugin dxva_plugin = {
+ .name = "DXVA2",
+ .pix_fmt = PIX_FMT_DXVA2_VLD,
+ .output_pix_fmt = PIX_FMT_YUV420P,
+ .probe = probe,
+ .setup = dxva_setup,
+ .cleanup = cleanup,
+ .get_buffer = dxva_get_surface,
+ .release_buffer = dxva_release_surface,
+ .get_picture = extract,
+};