#define DEFAULT_VIDEO_GOP_SIZE 15
+//
+// COMMON
+//
+
enum codec_api_type {
CODEC_INIT = 0,
CODEC_DECODE_VIDEO,
CODEC_TYPE_ENCODE,
};
-struct audio_data {
- int32_t channels;
- int32_t sample_rate;
- int32_t block_align;
- int32_t depth;
- int32_t sample_fmt;
- int32_t frame_size;
- int32_t bits_per_smp_fmt;
- int32_t reserved;
- int64_t channel_layout;
-};
-
-struct video_data {
- int32_t width;
- int32_t height;
- int32_t fps_n;
- int32_t fps_d;
- int32_t par_n;
- int32_t par_d;
- int32_t pix_fmt;
- int32_t bpp;
- int32_t ticks_per_frame;
-} __attribute__((packed));
-
-struct video_decode_input {
- int32_t inbuf_size;
- int32_t idx;
- int64_t in_offset;
- uint8_t inbuf; // for pointing inbuf address
-} __attribute__((packed));
-
-struct video_decode_output {
- int32_t len;
- int32_t got_picture;
- struct video_data data;
-} __attribute__((packed));
-
-struct video_encode_input {
- int32_t inbuf_size;
- int64_t in_timestamp;
- uint8_t inbuf; // for pointing inbuf address
-} __attribute__((packed));
-
-struct video_encode_output {
- int32_t len;
- int32_t coded_frame;
- int32_t key_frame;
- uint8_t data; // for pointing outbuf address
-} __attribute__((packed));
-
typedef struct DataContainer {
// common
bool is_got;
int32_t len;
AVCodecContext *avctx;
- // for decoder
+ // for video decoder
size_t picture_buffer_offset;
AVFrame *frame;
- // for encoder
+ // for video encoder
AVPacket *avpkt;
} DataContainer;
-static void fill_video_data(const AVCodecContext *avctx,
- struct video_data *video)
-{
- memset(video, 0x00, sizeof(struct video_data));
-
- video->width = avctx->width;
- video->height = avctx->height;
- video->fps_n = avctx->time_base.num;
- video->fps_d = avctx->time_base.den;
- video->pix_fmt = avctx->pix_fmt;
- video->par_n = avctx->sample_aspect_ratio.num;
- video->par_d = avctx->sample_aspect_ratio.den;
- video->bpp = avctx->bits_per_coded_sample;
- video->ticks_per_frame = avctx->ticks_per_frame;
-}
-
-
DeviceMemEntry *entry[CODEC_CONTEXT_MAX];
// define a queue to manage ioparam, context data
codec_decode_video2,
};
-static AVCodecParserContext *maru_brill_codec_parser_init(AVCodecContext *avctx);
-
-static void brillcodec_push_readqueue(MaruBrillCodecState *s, CodecParam *ioparam);
-static void brillcodec_push_write_queue(MaruBrillCodecState *s, void* opaque,
- size_t data_size, int ctx_id,
- DataHandler *handler);
-
-static void *brillcodec_store_inbuf(uint8_t *mem_base, CodecParam *ioparam);
-
-// default handler
+// default data handler
static void default_get_data(void *dst, void *src, size_t size)
{
memcpy(dst, src, size);
.release = default_release,
};
-static void default_get_picture(void *dst, void *src, enum AVPixelFormat pix_fmt)
+// queue
+static void *brillcodec_store_inbuf(uint8_t *mem_base,
+ CodecParam *ioparam)
{
- AVFrame *frame = (AVFrame *)src;
- int pict_size = avpicture_get_size(pix_fmt, frame->width, frame->height);
- if (pict_size < 0) {
- // cannot enter here...
- ERR("Invalid picture size\n");
- return;
+ DeviceMemEntry *elem = NULL;
+ int readbuf_size, size = 0;
+ uint8_t *readbuf = NULL;
+ uint8_t *device_mem = mem_base + ioparam->mem_offset;
+
+ elem = g_malloc0(sizeof(DeviceMemEntry));
+
+ memcpy(&readbuf_size, device_mem, sizeof(readbuf_size));
+ size = sizeof(readbuf_size);
+
+ TRACE("readbuf size: %d\n", readbuf_size);
+ if (readbuf_size <= 0) {
+ TRACE("inbuf size is 0. api_id %d, ctx_id %d, mem_offset %x\n",
+ ioparam->api_index, ioparam->ctx_index, ioparam->mem_offset);
+ } else {
+ readbuf = g_malloc0(readbuf_size);
+ TRACE("copy input buffer from guest. ctx_id: %d, mem_offset: %x\n",
+ ioparam->ctx_index, ioparam->mem_offset);
+ memcpy(readbuf, device_mem + size, readbuf_size);
}
- avpicture_layout((AVPicture *)frame, pix_fmt,
- frame->width, frame->height, dst, pict_size);
+ // memset(device_mem, 0x00, sizeof(readbuf_size));
+
+ elem->opaque = readbuf;
+ elem->data_size = readbuf_size;
+ elem->ctx_id = ioparam->ctx_index;
+
+ return elem;
}
-// video decode data handler
-// FIXME: ignore "size" now...
-static void copy_decode_data(void *dst, void *opaque, size_t dummy)
+static void brillcodec_push_readqueue(MaruBrillCodecState *s,
+ CodecParam *ioparam)
{
- DataContainer *dc = (DataContainer *)opaque;
- CodecContext *context = (CodecContext *)dc->avctx->opaque;
+ CodecDataStg *elem = NULL;
+ DeviceMemEntry *data_buf = NULL;
- if (dc->picture_buffer_offset) {
- // if output video data is exist...
- struct video_decode_output *decode_output =
- (struct video_decode_output *)dst;
- decode_output->len = dc->len;
- decode_output->got_picture = dc->is_got ? 1 : 0;
- fill_video_data(dc->avctx, &decode_output->data);
+ elem = g_malloc0(sizeof(CodecDataStg));
- if (context->is_hwaccel) {
- decode_output->data.pix_fmt = context->state->hwaccel_plugin->output_pix_fmt;
- }
- }
+ elem->param_buf = ioparam;
- if (dc->frame) {
- // if picture is exist...
- if (context->is_hwaccel) {
- context->state->hwaccel_plugin->get_picture(dst + dc->picture_buffer_offset, dc->frame);
- } else {
- default_get_picture(dst + dc->picture_buffer_offset, dc->frame, dc->avctx->pix_fmt);
- }
+ switch(ioparam->api_index) {
+ case CODEC_INIT:
+ case CODEC_DECODE_VIDEO:
+ case CODEC_ENCODE_VIDEO:
+ case CODEC_DECODE_AUDIO:
+ case CODEC_ENCODE_AUDIO:
+ case CODEC_PICTURE_COPY:
+ case CODEC_DECODE_VIDEO2:
+ data_buf = brillcodec_store_inbuf((uint8_t *)s->vaddr, ioparam);
+ break;
+ default:
+ TRACE("no buffer from guest\n");
+ break;
}
+
+ elem->data_buf = data_buf;
+
+ qemu_mutex_lock(&s->ioparam_queue_mutex);
+ QTAILQ_INSERT_TAIL(&codec_rq, elem, node);
+ qemu_mutex_unlock(&s->ioparam_queue_mutex);
}
-static void release(void *opaque) {
- DataContainer *dc = (DataContainer *)opaque;
- if (dc->avpkt) {
- g_free(dc->avpkt->data);
- g_free(dc->avpkt);
+static CodecDataStg *brillcodec_pop_readqueue(MaruBrillCodecState *s)
+{
+ CodecDataStg *elem = NULL;
+
+ qemu_mutex_lock(&s->ioparam_queue_mutex);
+ elem = QTAILQ_FIRST(&codec_rq);
+ if (elem) {
+ QTAILQ_REMOVE(&codec_rq, elem, node);
}
- g_free(dc);
+ qemu_mutex_unlock(&s->ioparam_queue_mutex);
+
+ return elem;
}
-static DataHandler video_decode_data_handler = {
- .get_data = copy_decode_data,
- .release = release,
-};
+static void brillcodec_push_writequeue(MaruBrillCodecState *s, void* opaque,
+ size_t data_size, int ctx_id,
+ DataHandler *handler)
+{
+ DeviceMemEntry *elem = NULL;
+ elem = g_malloc0(sizeof(DeviceMemEntry));
-static void copy_encode_data(void *dst, void *opaque, size_t dummy)
+ elem->opaque = opaque;
+ elem->data_size = data_size;
+ elem->ctx_id = ctx_id;
+
+ if (handler) {
+ elem->handler = handler;
+ } else {
+ elem->handler = &default_data_handler;
+ }
+
+ qemu_mutex_lock(&s->context_queue_mutex);
+ QTAILQ_INSERT_TAIL(&codec_wq, elem, node);
+ qemu_mutex_unlock(&s->context_queue_mutex);
+}
+
+void brillcodec_pop_writequeue(MaruBrillCodecState *s, uint32_t ctx_idx)
{
- DataContainer *dc = (DataContainer *)opaque;
- struct video_encode_output *encode_output =
- (struct video_encode_output *)dst;
+ DeviceMemEntry *elem = NULL;
+ uint32_t mem_offset = 0;
- encode_output->len = dc->len;
- if (dc->len && dc->is_got) {
- // inform gstreamer plugin about the status of encoded frames
- // A flag for output buffer in gstreamer is depending on the status.
- if (dc->avctx->coded_frame) {
- encode_output->coded_frame = 1;
- // if key_frame is 0, this frame cannot be decoded independently.
- encode_output->key_frame = dc->avctx->coded_frame->key_frame;
+ TRACE("enter: %s\n", __func__);
+
+ if (ctx_idx < 1 || ctx_idx > (CODEC_CONTEXT_MAX - 1)) {
+ ERR("invalid buffer index. %d\n", ctx_idx);
+ return;
+ }
+
+ TRACE("pop_writeqeue. context index: %d\n", ctx_idx);
+ elem = entry[ctx_idx];
+ if (elem) {
+ mem_offset = s->ioparam.mem_offset;
+
+ // check corrupted mem_offset
+ if (mem_offset < CODEC_MEM_SIZE) {
+ elem->handler->get_data(s->vaddr + mem_offset, elem->opaque, elem->data_size);
+ elem->handler->release(elem->opaque);
+ } else {
+ TRACE("mem_offset is corrupted!!\n");
}
- memcpy(&encode_output->data, dc->avpkt->data, dc->avpkt->size);
+ TRACE("pop_writequeue. release elem: %p\n", elem);
+ g_free(elem);
+
+ entry[ctx_idx] = NULL;
+ } else {
+ TRACE("there is no buffer to copy data to guest\n");
}
-}
-static DataHandler video_encode_data_handler = {
- .get_data = copy_encode_data,
- .release = release,
-};
+ TRACE("leave: %s\n", __func__);
+}
+// threads
static void maru_brill_codec_thread_exit(MaruBrillCodecState *s)
{
int index;
--(s->idle_thread_cnt); // protected under mutex.
qemu_mutex_unlock(&s->context_mutex);
- qemu_mutex_lock(&s->ioparam_queue_mutex);
- elem = QTAILQ_FIRST(&codec_rq);
- if (elem) {
- QTAILQ_REMOVE(&codec_rq, elem, node);
- qemu_mutex_unlock(&s->ioparam_queue_mutex);
- } else {
- qemu_mutex_unlock(&s->ioparam_queue_mutex);
- continue;
- }
-
- if (!elem->param_buf) {
+ elem = brillcodec_pop_readqueue(s);
+ if (!elem || !elem->param_buf) {
continue;
}
return NULL;
}
-// queue
-static void brillcodec_push_readqueue(MaruBrillCodecState *s,
- CodecParam *ioparam)
-{
- CodecDataStg *elem = NULL;
- DeviceMemEntry *data_buf = NULL;
+//
+// DEVICE FUNCTIONS
+//
- elem = g_malloc0(sizeof(CodecDataStg));
+void brillcodec_release_context(MaruBrillCodecState *s, int32_t ctx_id)
+{
+ DeviceMemEntry *wq_elem = NULL, *wnext = NULL;
+ CodecDataStg *rq_elem = NULL, *rnext = NULL;
- elem->param_buf = ioparam;
+ TRACE("enter: %s\n", __func__);
- switch(ioparam->api_index) {
- case CODEC_INIT ... CODEC_ENCODE_AUDIO:
- case CODEC_DECODE_VIDEO2:
- data_buf = brillcodec_store_inbuf((uint8_t *)s->vaddr, ioparam);
- break;
- default:
- TRACE("no buffer from guest\n");
- break;
- }
-
- elem->data_buf = data_buf;
-
- qemu_mutex_lock(&s->ioparam_queue_mutex);
- QTAILQ_INSERT_TAIL(&codec_rq, elem, node);
- qemu_mutex_unlock(&s->ioparam_queue_mutex);
-}
-
-static void *brillcodec_store_inbuf(uint8_t *mem_base,
- CodecParam *ioparam)
-{
- DeviceMemEntry *elem = NULL;
- int readbuf_size, size = 0;
- uint8_t *readbuf = NULL;
- uint8_t *device_mem = mem_base + ioparam->mem_offset;
-
- elem = g_malloc0(sizeof(DeviceMemEntry));
-
- memcpy(&readbuf_size, device_mem, sizeof(readbuf_size));
- size = sizeof(readbuf_size);
-
- TRACE("readbuf size: %d\n", readbuf_size);
- if (readbuf_size <= 0) {
- TRACE("inbuf size is 0. api_id %d, ctx_id %d, mem_offset %x\n",
- ioparam->api_index, ioparam->ctx_index, ioparam->mem_offset);
- } else {
- readbuf = g_malloc0(readbuf_size);
- TRACE("copy input buffer from guest. ctx_id: %d, mem_offset: %x\n",
- ioparam->ctx_index, ioparam->mem_offset);
- memcpy(readbuf, device_mem + size, readbuf_size);
- }
- // memset(device_mem, 0x00, sizeof(readbuf_size));
-
- elem->opaque = readbuf;
- elem->data_size = readbuf_size;
- elem->ctx_id = ioparam->ctx_index;
-
- return elem;
-}
-
-static void brillcodec_push_write_queue(MaruBrillCodecState *s, void* opaque,
- size_t data_size, int ctx_id,
- DataHandler *handler)
-{
- DeviceMemEntry *elem = NULL;
- elem = g_malloc0(sizeof(DeviceMemEntry));
-
- elem->opaque = opaque;
- elem->data_size = data_size;
- elem->ctx_id = ctx_id;
-
- if (handler) {
- elem->handler = handler;
- } else {
- elem->handler = &default_data_handler;
- }
-
- qemu_mutex_lock(&s->context_queue_mutex);
- QTAILQ_INSERT_TAIL(&codec_wq, elem, node);
- qemu_mutex_unlock(&s->context_queue_mutex);
-}
-
-void brillcodec_pop_writequeue(MaruBrillCodecState *s, uint32_t ctx_idx)
-{
- DeviceMemEntry *elem = NULL;
- uint32_t mem_offset = 0;
-
- TRACE("enter: %s\n", __func__);
-
- if (ctx_idx < 1 || ctx_idx > (CODEC_CONTEXT_MAX - 1)) {
- ERR("invalid buffer index. %d\n", ctx_idx);
- return;
- }
-
- TRACE("pop_writeqeue. context index: %d\n", ctx_idx);
- elem = entry[ctx_idx];
- if (elem) {
- mem_offset = s->ioparam.mem_offset;
-
- // check corrupted mem_offset
- if (mem_offset < CODEC_MEM_SIZE) {
- elem->handler->get_data(s->vaddr + mem_offset, elem->opaque, elem->data_size);
- elem->handler->release(elem->opaque);
- } else {
- TRACE("mem_offset is corrupted!!\n");
- }
-
- TRACE("pop_writequeue. release elem: %p\n", elem);
- g_free(elem);
-
- entry[ctx_idx] = NULL;
- } else {
- TRACE("there is no buffer to copy data to guest\n");
- }
-
- TRACE("leave: %s\n", __func__);
-}
-
-static void serialize_video_data(const struct video_data *video,
- AVCodecContext *avctx)
-{
- if (video->width) {
- avctx->width = video->width;
- }
- if (video->height) {
- avctx->height = video->height;
- }
- if (video->fps_n) {
- avctx->time_base.num = video->fps_n;
- }
- if (video->fps_d) {
- avctx->time_base.den = video->fps_d;
- }
- if (video->pix_fmt > PIX_FMT_NONE) {
- avctx->pix_fmt = video->pix_fmt;
- }
- if (video->par_n) {
- avctx->sample_aspect_ratio.num = video->par_n;
- }
- if (video->par_d) {
- avctx->sample_aspect_ratio.den = video->par_d;
- }
- if (video->bpp) {
- avctx->bits_per_coded_sample = video->bpp;
- }
- if (video->ticks_per_frame) {
- avctx->ticks_per_frame = video->ticks_per_frame;
- }
-
- INFO("codec_init. video, resolution: %dx%d, framerate: %d/%d "
- "pixel_fmt: %d sample_aspect_ratio: %d/%d bpp %d\n",
- avctx->width, avctx->height, avctx->time_base.num,
- avctx->time_base.den, avctx->pix_fmt, avctx->sample_aspect_ratio.num,
- avctx->sample_aspect_ratio.den, avctx->bits_per_coded_sample);
-}
-
-static void serialize_audio_data (const struct audio_data *audio,
- AVCodecContext *avctx)
-{
- if (audio->channels) {
- avctx->channels = audio->channels;
- }
- if (audio->sample_rate) {
- avctx->sample_rate = audio->sample_rate;
- }
- if (audio->block_align) {
- avctx->block_align = audio->block_align;
- }
-
- if (audio->sample_fmt > AV_SAMPLE_FMT_NONE) {
- avctx->sample_fmt = audio->sample_fmt;
- }
-
- INFO("codec_init. audio, channel %d sample_rate %d sample_fmt %d ch_layout %lld\n",
- avctx->channels, avctx->sample_rate, avctx->sample_fmt, avctx->channel_layout);
-}
-
-void brillcodec_release_context(MaruBrillCodecState *s, int32_t ctx_id)
-{
- DeviceMemEntry *wq_elem = NULL, *wnext = NULL;
- CodecDataStg *rq_elem = NULL, *rnext = NULL;
-
- TRACE("enter: %s\n", __func__);
-
- TRACE("release %d of context\n", ctx_id);
+ TRACE("release %d of context\n", ctx_id);
qemu_mutex_lock(&s->threadpool.mutex);
if (CONTEXT(s, ctx_id)->opened_context) {
return ctx_id;
}
-static enum PixelFormat get_format(AVCodecContext *avctx,
- const enum PixelFormat *pi_fmt) {
- bool can_hwaccel = false;
- int i;
-
- CodecContext *context = (CodecContext *)avctx->opaque;
- MaruBrillCodecState *s = context->state;
+//
+// CODEC FUNCTIONS
+// FLUSH BUFFERS
+//
- if (!s->hwaccel_plugin) {
- goto end;
- }
+static bool codec_flush_buffers(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+{
+ AVCodecContext *avctx = NULL;
+ bool ret = true;
- for (i = 0; pi_fmt[i] != PIX_FMT_NONE; ++i) {
- const AVPixFmtDescriptor *dsc = av_pix_fmt_desc_get(pi_fmt[i]);
- if (dsc == NULL) {
- continue;
- }
- bool hwaccel = (dsc->flags & PIX_FMT_HWACCEL) != 0;
+ TRACE("enter: %s\n", __func__);
- if (hwaccel) {
- can_hwaccel = true;
- }
- }
+ avctx = CONTEXT(s, ctx_id)->avctx;
+ if (!avctx) {
+ ERR("%d of AVCodecContext is NULL.\n", ctx_id);
+ ret = false;
+ } else if (!avctx->codec) {
+ ERR("%d of AVCodec is NULL.\n", ctx_id);
+ ret = false;
+ } else {
+ TRACE("flush %d context of buffers.\n", ctx_id);
+ AVCodecParserContext *pctx = NULL;
+ uint8_t *poutbuf = NULL;
+ int poutbuf_size = 0;
+ int res = 0;
- if (!can_hwaccel) {
- goto end;
- }
+ uint8_t p_inbuf[FF_INPUT_BUFFER_PADDING_SIZE];
+ int p_inbuf_size = FF_INPUT_BUFFER_PADDING_SIZE;
- if (!s->hwaccel_plugin->setup(avctx, avctx->width, avctx->height)) {
- goto end;
- }
+ memset(&p_inbuf, 0x00, p_inbuf_size);
- for (i = 0; pi_fmt[i] != PIX_FMT_NONE; ++i) {
- if (pi_fmt[i] == s->hwaccel_plugin->pix_fmt) {
- break;
+ pctx = CONTEXT(s, ctx_id)->parser_ctx;
+ if (pctx) {
+ res = av_parser_parse2(pctx, avctx, &poutbuf, &poutbuf_size,
+ p_inbuf, p_inbuf_size, -1, -1, -1);
+ INFO("before flush buffers, using parser. res: %d\n", res);
}
- }
- if (pi_fmt[i] == PIX_FMT_NONE) {
- goto end;
+ avcodec_flush_buffers(avctx);
}
- INFO("HW_ACCEL is enabled with pix_fmt [%s]\n", av_get_pix_fmt_name(pi_fmt[i]));
- context->is_hwaccel = true;
- return pi_fmt[i];
-
-end:
- INFO("HW_ACCEL is disabled\n");
- context->is_hwaccel = false;
- return avcodec_default_get_format(avctx, pi_fmt);
-}
-
-static int get_buffer(struct AVCodecContext *avctx, AVFrame *frame) {
- CodecContext *context = (CodecContext *)avctx->opaque;
+ brillcodec_push_writequeue(s, NULL, 0, ctx_id, NULL);
- if (context->is_hwaccel) {
- return context->state->hwaccel_plugin->get_buffer(avctx, frame);
- }
+ TRACE("leave: %s\n", __func__);
- return avcodec_default_get_buffer(avctx, frame);
+ return ret;
}
-static void release_buffer(struct AVCodecContext *avctx, AVFrame *frame) {
- CodecContext *context = (CodecContext *)avctx->opaque;
+//
+// CODEC FUNCTIONS
+// VIDEO DECODE / ENCODE
+//
- if (context->is_hwaccel) {
- return context->state->hwaccel_plugin->release_buffer(avctx, frame);
- }
+struct video_data {
+ int32_t width;
+ int32_t height;
+ int32_t fps_n;
+ int32_t fps_d;
+ int32_t par_n;
+ int32_t par_d;
+ int32_t pix_fmt;
+ int32_t bpp;
+ int32_t ticks_per_frame;
+} __attribute__((packed));
- return avcodec_default_release_buffer(avctx, frame);
-}
-
-// allocate avcontext and avframe struct.
-static AVCodecContext *maru_brill_codec_alloc_context(MaruBrillCodecState *s, int ctx_id)
-{
- TRACE("enter: %s\n", __func__);
-
- TRACE("allocate %d of context and frame.\n", ctx_id);
+struct video_decode_input {
+ int32_t inbuf_size;
+ int32_t idx;
+ int64_t in_offset;
+ uint8_t inbuf; // for pointing inbuf address
+} __attribute__((packed));
- CONTEXT(s, ctx_id)->avctx = avcodec_alloc_context3(NULL);
+struct video_decode_output {
+ int32_t len;
+ int32_t got_picture;
+ uint8_t data; // for pointing data address
+} __attribute__((packed));
- AVCodecContext *avctx = CONTEXT(s, ctx_id)->avctx;
- avctx->get_format = get_format;
- avctx->get_buffer = get_buffer;
- avctx->reget_buffer = avcodec_default_reget_buffer;
- avctx->release_buffer = release_buffer;
- avctx->opaque = CONTEXT(s, ctx_id);
+struct video_encode_input {
+ int32_t inbuf_size;
+ int64_t in_timestamp;
+ uint8_t inbuf; // for pointing inbuf address
+} __attribute__((packed));
- CONTEXT(s, ctx_id)->frame = avcodec_alloc_frame();
- CONTEXT(s, ctx_id)->opened_context = false;
- CONTEXT(s, ctx_id)->state = s;
+struct video_encode_output {
+ int32_t len;
+ int32_t coded_frame;
+ int32_t key_frame;
+ uint8_t data; // for pointing data address
+} __attribute__((packed));
- TRACE("leave: %s\n", __func__);
+static void fill_video_data(const AVCodecContext *avctx,
+ struct video_data *video)
+{
+ memset(video, 0x00, sizeof(struct video_data));
- return avctx;
+ video->width = avctx->width;
+ video->height = avctx->height;
+ video->fps_n = avctx->time_base.num;
+ video->fps_d = avctx->time_base.den;
+ video->pix_fmt = avctx->pix_fmt;
+ video->par_n = avctx->sample_aspect_ratio.num;
+ video->par_d = avctx->sample_aspect_ratio.den;
+ video->bpp = avctx->bits_per_coded_sample;
+ video->ticks_per_frame = avctx->ticks_per_frame;
}
-static AVCodec *maru_brill_codec_find_avcodec(uint8_t *mem_buf)
+static void default_get_picture(void *dst, void *src, enum AVPixelFormat pix_fmt)
{
- AVCodec *codec = NULL;
- int32_t encode, size = 0;
- char codec_name[32] = {0, };
-
- memcpy(&encode, mem_buf, sizeof(encode));
- size = sizeof(encode);
- memcpy(codec_name, mem_buf + size, sizeof(codec_name));
- size += sizeof(codec_name);
-
- TRACE("type: %d, name: %s\n", encode, codec_name);
-
- if (encode) {
- codec = avcodec_find_encoder_by_name (codec_name);
- } else {
- codec = avcodec_find_decoder_by_name (codec_name);
+ AVFrame *frame = (AVFrame *)src;
+ int pict_size = avpicture_get_size(pix_fmt, frame->width, frame->height);
+ if (pict_size < 0) {
+ // cannot enter here...
+ ERR("Invalid picture size\n");
+ return;
}
- INFO("%s!! find %s %s\n", codec ? "success" : "failure",
- codec_name, encode ? "encoder" : "decoder");
-
- return codec;
+ avpicture_layout((AVPicture *)frame, pix_fmt,
+ frame->width, frame->height, dst, pict_size);
}
-static void read_codec_init_data(AVCodecContext *avctx, uint8_t *mem_buf)
+// video decode data handler
+// FIXME: ignore "size" now...
+static void copy_decode_data(void *dst, void *opaque, size_t dummy)
{
- struct video_data video = { 0, };
- struct audio_data audio = { 0, };
- int bitrate = 0, size = 0;
-
- memcpy(&video, mem_buf + size, sizeof(video));
- size = sizeof(video);
- serialize_video_data(&video, avctx);
-
- memcpy(&audio, mem_buf + size, sizeof(audio));
- size += sizeof(audio);
- serialize_audio_data(&audio, avctx);
+ DataContainer *dc = (DataContainer *)opaque;
+ CodecContext *context = (CodecContext *)dc->avctx->opaque;
+ CodecPlugin *plugin = context->state->hwaccel_plugin;
- memcpy(&bitrate, mem_buf + size, sizeof(bitrate));
- size += sizeof(bitrate);
- if (bitrate) {
- avctx->bit_rate = bitrate;
- }
+ if (dc->picture_buffer_offset) {
+ // if output video data is exist...
+ struct video_decode_output *decode_output =
+ (struct video_decode_output *)dst;
+ struct video_data *data = (struct video_data *)&decode_output->data;
- memcpy(&avctx->codec_tag, mem_buf + size, sizeof(avctx->codec_tag));
- size += sizeof(avctx->codec_tag);
- memcpy(&avctx->extradata_size,
- mem_buf + size, sizeof(avctx->extradata_size));
- size += sizeof(avctx->extradata_size);
- INFO("extradata size: %d.\n", avctx->extradata_size);
+ decode_output->len = dc->len;
+ decode_output->got_picture = dc->is_got ? 1 : 0;
+ fill_video_data(dc->avctx, data);
- if (avctx->extradata_size > 0) {
- avctx->extradata =
- av_mallocz(ROUND_UP_X(avctx->extradata_size +
- FF_INPUT_BUFFER_PADDING_SIZE, 4));
- if (avctx->extradata) {
- memcpy(avctx->extradata, mem_buf + size, avctx->extradata_size);
+ if (context->is_hwaccel && data->pix_fmt == plugin->pix_fmt) {
+ data->pix_fmt = plugin->output_pix_fmt;
}
- } else {
- TRACE("no extra data.\n");
- avctx->extradata =
- av_mallocz(ROUND_UP_X(FF_INPUT_BUFFER_PADDING_SIZE, 4));
}
-}
-
-// write the result of codec_init
-static int write_codec_init_data(AVCodecContext *avctx, uint8_t *mem_buf)
-{
- int size = 0;
-
- if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
- int osize = av_get_bytes_per_sample(avctx->sample_fmt);
-
- INFO("avcodec_open. sample_fmt %d, bytes_per_sample %d\n", avctx->sample_fmt, osize);
- if ((avctx->codec_id == AV_CODEC_ID_AAC) && avctx->codec->encode2) {
- osize = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
+ if (dc->frame) {
+ // if picture is exist...
+ if (context->is_hwaccel) {
+ plugin->get_picture(dst + dc->picture_buffer_offset, dc->frame);
+ } else {
+ default_get_picture(dst + dc->picture_buffer_offset, dc->frame, dc->avctx->pix_fmt);
}
- memcpy(mem_buf, &avctx->sample_fmt, sizeof(avctx->sample_fmt));
- size = sizeof(avctx->sample_fmt);
-
- // frame_size: samples per packet, initialized when calling 'init'
- memcpy(mem_buf + size, &avctx->frame_size, sizeof(avctx->frame_size));
- size += sizeof(avctx->frame_size);
-
- memcpy(mem_buf + size, &osize, sizeof(osize));
- size += sizeof(osize);
}
-
- return size;
}
-static uint8_t *resample_audio_buffer(AVCodecContext * avctx, AVFrame *samples,
- int *out_size, int out_sample_fmt)
-{
- AVAudioResampleContext *avr = NULL;
- uint8_t *resampled_audio = NULL;
- int buffer_size = 0, out_linesize = 0;
- int nb_samples = samples->nb_samples;
- // int out_sample_fmt = avctx->sample_fmt - 5;
-
- avr = avresample_alloc_context();
- if (!avr) {
- ERR("failed to allocate avresample context\n");
- return NULL;
- }
-
- TRACE("resample audio. channel_layout %lld sample_rate %d "
- "in_sample_fmt %d out_sample_fmt %d\n",
- avctx->channel_layout, avctx->sample_rate,
- avctx->sample_fmt, out_sample_fmt);
-
- av_opt_set_int(avr, "in_channel_layout", avctx->channel_layout, 0);
- av_opt_set_int(avr, "in_sample_fmt", avctx->sample_fmt, 0);
- av_opt_set_int(avr, "in_sample_rate", avctx->sample_rate, 0);
- av_opt_set_int(avr, "out_channel_layout", avctx->channel_layout, 0);
- av_opt_set_int(avr, "out_sample_fmt", out_sample_fmt, 0);
- av_opt_set_int(avr, "out_sample_rate", avctx->sample_rate, 0);
-
- TRACE("open avresample context\n");
- if (avresample_open(avr) < 0) {
- ERR("failed to open avresample context\n");
- avresample_free(&avr);
- return NULL;
+static void release(void *opaque) {
+ DataContainer *dc = (DataContainer *)opaque;
+ if (dc->avpkt) {
+ g_free(dc->avpkt->data);
+ g_free(dc->avpkt);
}
+ g_free(dc);
+}
- *out_size =
- av_samples_get_buffer_size(&out_linesize, avctx->channels,
- nb_samples, out_sample_fmt, 0);
+static DataHandler video_decode_data_handler = {
+ .get_data = copy_decode_data,
+ .release = release,
+};
- TRACE("resample audio. out_linesize %d nb_samples %d\n", out_linesize, nb_samples);
+static void copy_encode_data(void *dst, void *opaque, size_t dummy)
+{
+ DataContainer *dc = (DataContainer *)opaque;
+ struct video_encode_output *encode_output =
+ (struct video_encode_output *)dst;
- if (*out_size < 0) {
- ERR("failed to get size of sample buffer %d\n", *out_size);
- avresample_close(avr);
- avresample_free(&avr);
- return NULL;
- }
+ encode_output->len = dc->avpkt->size;
+ if (dc->avpkt->size && dc->is_got) {
+ // inform gstreamer plugin about the status of encoded frames
+ // A flag for output buffer in gstreamer is depending on the status.
+ if (dc->avctx->coded_frame) {
+ encode_output->coded_frame = 1;
+ // if key_frame is 0, this frame cannot be decoded independently.
+ encode_output->key_frame = dc->avctx->coded_frame->key_frame;
+ }
- resampled_audio = av_mallocz(*out_size);
- if (!resampled_audio) {
- ERR("failed to allocate resample buffer\n");
- avresample_close(avr);
- avresample_free(&avr);
- return NULL;
+ memcpy(&encode_output->data, dc->avpkt->data, dc->avpkt->size);
}
+}
- buffer_size = avresample_convert(avr, &resampled_audio,
- out_linesize, nb_samples,
- samples->data, samples->linesize[0],
- samples->nb_samples);
- TRACE("resample_audio out_size %d buffer_size %d\n", *out_size, buffer_size);
-
- avresample_close(avr);
- avresample_free(&avr);
+static DataHandler video_encode_data_handler = {
+ .get_data = copy_encode_data,
+ .release = release,
+};
- return resampled_audio;
-}
static uint32_t parse_and_decode_video(AVCodecContext *avctx, AVFrame *picture,
AVCodecParserContext *pctx, int ctx_id,
return len;
}
-// codec functions
-static bool codec_init(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+static bool codec_decode_video_common(MaruBrillCodecState *s, int ctx_id,
+ void *data_buf, bool copy_picture)
{
AVCodecContext *avctx = NULL;
- AVCodec *codec = NULL;
- int size = 0, ret = -1;
- DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
-
- TRACE("enter: %s\n", __func__);
-
- elem = (DeviceMemEntry *)data_buf;
-
- // allocate AVCodecContext
- avctx = maru_brill_codec_alloc_context(s, ctx_id);
- if (!avctx) {
- ERR("[%d] failed to allocate context.\n", __LINE__);
- ret = -1;
- } else {
- codec = maru_brill_codec_find_avcodec(elem->opaque);
- if (codec) {
- size = sizeof(int32_t) + 32; // buffer size of codec_name
- read_codec_init_data(avctx, elem->opaque + size);
-
- // in case of aac encoder, sample format is float
- if (!strcmp(codec->name, "aac") && codec->encode2) {
- TRACE("convert sample format into SAMPLE_FMT_FLTP\n");
- avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
-
- avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
-
- INFO("aac encoder!! channels %d channel_layout %lld\n", avctx->channels, avctx->channel_layout);
- avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
- }
-
- TRACE("audio sample format %d\n", avctx->sample_fmt);
- TRACE("strict_std_compliance %d\n", avctx->strict_std_compliance);
-
- ret = avcodec_open2(avctx, codec, NULL);
- INFO("avcodec_open. ret 0x%x ctx_id %d\n", ret, ctx_id);
-
- TRACE("channels %d sample_rate %d sample_fmt %d "
- "channel_layout %lld frame_size %d\n",
- avctx->channels, avctx->sample_rate, avctx->sample_fmt,
- avctx->channel_layout, avctx->frame_size);
-
- tempbuf_size = (sizeof(avctx->sample_fmt) + sizeof(avctx->frame_size)
- + sizeof(avctx->extradata_size) + avctx->extradata_size)
- + sizeof(int);
-
- CONTEXT(s, ctx_id)->opened_context = true;
- CONTEXT(s, ctx_id)->parser_ctx =
- maru_brill_codec_parser_init(avctx);
- } else {
- ERR("failed to find codec. ctx_id: %d\n", ctx_id);
- ret = -1;
- }
- }
-
- tempbuf_size += sizeof(ret);
-
- tempbuf = g_malloc(tempbuf_size);
- if (!tempbuf) {
- ERR("failed to allocate a buffer\n");
- tempbuf_size = 0;
- } else {
- memcpy(tempbuf, &ret, sizeof(ret));
- size = sizeof(ret);
- if (ret < 0) {
- ERR("failed to open codec contex.\n");
- } else {
- size += write_codec_init_data(avctx, tempbuf + size);
- TRACE("codec_init. copyback!! size %d\n", size);
- {
- memcpy(tempbuf + size, &avctx->extradata_size, sizeof(avctx->extradata_size));
- size += sizeof(avctx->extradata_size);
-
- INFO("codec_init. extradata_size: %d\n", avctx->extradata_size);
- if (avctx->extradata) {
- memcpy(tempbuf + size, avctx->extradata, avctx->extradata_size);
- size += avctx->extradata_size;
- }
- }
- }
- }
-
- brillcodec_push_write_queue(s, tempbuf, tempbuf_size, ctx_id, NULL);
-
- TRACE("leave: %s\n", __func__);
-
- return true;
-}
-
-static bool codec_deinit(MaruBrillCodecState *s, int ctx_id, void *data_buf)
-{
- AVCodecContext *avctx = NULL;
- AVFrame *frame = NULL;
- AVCodecParserContext *parserctx = NULL;
-
- TRACE("enter: %s\n", __func__);
-
- avctx = CONTEXT(s, ctx_id)->avctx;
- frame = CONTEXT(s, ctx_id)->frame;
- parserctx = CONTEXT(s, ctx_id)->parser_ctx;
- if (!avctx || !frame) {
- TRACE("%d of AVCodecContext or AVFrame is NULL. "
- " Those resources have been released before.\n", ctx_id);
- return false;
- }
-
- INFO("close avcontext of %d\n", ctx_id);
- // qemu_mutex_lock(&s->threadpool.mutex);
- avcodec_close(avctx);
- CONTEXT(s, ctx_id)->opened_context = false;
- // qemu_mutex_unlock(&s->threadpool.mutex);
-
- if (avctx->extradata) {
- TRACE("free context extradata\n");
- av_free(avctx->extradata);
- CONTEXT(s, ctx_id)->avctx->extradata = NULL;
- }
-
- if (frame) {
- TRACE("free frame\n");
- // av_free(frame);
- avcodec_free_frame(&frame);
- CONTEXT(s, ctx_id)->frame = NULL;
- }
-
- if (avctx) {
- TRACE("free codec context\n");
- av_free(avctx);
- CONTEXT(s, ctx_id)->avctx = NULL;
- }
-
- if (parserctx) {
- INFO("close parser context\n");
- av_parser_close(parserctx);
- CONTEXT(s, ctx_id)->parser_ctx = NULL;
- }
-
- brillcodec_push_write_queue(s, NULL, 0, ctx_id, NULL);
-
- TRACE("leave: %s\n", __func__);
-
- return true;
-}
-
-static bool codec_flush_buffers(MaruBrillCodecState *s, int ctx_id, void *data_buf)
-{
- AVCodecContext *avctx = NULL;
- bool ret = true;
-
- TRACE("enter: %s\n", __func__);
-
- avctx = CONTEXT(s, ctx_id)->avctx;
- if (!avctx) {
- ERR("%d of AVCodecContext is NULL.\n", ctx_id);
- ret = false;
- } else if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
- ret = false;
- } else {
- TRACE("flush %d context of buffers.\n", ctx_id);
- AVCodecParserContext *pctx = NULL;
- uint8_t *poutbuf = NULL;
- int poutbuf_size = 0;
- int res = 0;
-
- uint8_t p_inbuf[FF_INPUT_BUFFER_PADDING_SIZE];
- int p_inbuf_size = FF_INPUT_BUFFER_PADDING_SIZE;
-
- memset(&p_inbuf, 0x00, p_inbuf_size);
-
- pctx = CONTEXT(s, ctx_id)->parser_ctx;
- if (pctx) {
- res = av_parser_parse2(pctx, avctx, &poutbuf, &poutbuf_size,
- p_inbuf, p_inbuf_size, -1, -1, -1);
- INFO("before flush buffers, using parser. res: %d\n", res);
- }
-
- avcodec_flush_buffers(avctx);
- }
-
- brillcodec_push_write_queue(s, NULL, 0, ctx_id, NULL);
-
- TRACE("leave: %s\n", __func__);
-
- return ret;
-}
-
-static bool codec_decode_video_common(MaruBrillCodecState *s, int ctx_id,
- void *data_buf, bool copy_picture)
-{
- AVCodecContext *avctx = NULL;
- AVFrame *frame = NULL;
- AVCodecParserContext *pctx = NULL;
- AVPacket avpkt;
+ AVFrame *frame = NULL;
+ AVCodecParserContext *pctx = NULL;
+ AVPacket avpkt;
DeviceMemEntry *elem = NULL;
struct video_decode_input empty_input = { 0, };
elem = (DeviceMemEntry *)data_buf;
if (!elem || !elem->opaque) {
TRACE("decode_video. no input buffer\n");
- }
- else {
+ } else {
decode_input = elem->opaque;
}
dc->frame = frame;
}
- brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_decode_data_handler);
+ brillcodec_push_writequeue(s, dc, 0, ctx_id, &video_decode_data_handler);
TRACE("leave: %s\n", __func__);
dc->avctx = CONTEXT(s, ctx_id)->avctx;
dc->frame = CONTEXT(s, ctx_id)->frame;
- brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_decode_data_handler);
+ brillcodec_push_writequeue(s, dc, 0, ctx_id, &video_decode_data_handler);
TRACE("leave: %s\n", __func__);
return true;
}
-static bool codec_decode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+static bool codec_encode_video(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
- AVCodecContext *avctx;
- AVPacket avpkt;
- AVFrame *audio_out = NULL;
- uint8_t *inbuf = NULL;
- int inbuf_size = 0, size = 0;
- int len = -1, got_frame = 0;
+ AVCodecContext *avctx = NULL;
+ AVFrame *pict = NULL;
+ AVPacket *avpkt = g_malloc0(sizeof(AVPacket));
+ uint8_t *inbuf = NULL, *outbuf = NULL;
+ int outbuf_size = 0;
+ int got_frame = 0, ret = 0;
DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
-
- uint8_t *out_buf = NULL;
- int out_buf_size = 0;
- int out_sample_fmt = -1;
+ struct video_encode_input empty_input = { 0, };
+ struct video_encode_input *encode_input = &empty_input;
TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
- size = sizeof(inbuf_size);
- TRACE("decode_audio. inbuf_size %d\n", inbuf_size);
-
- if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
- }
+ if (!elem || !elem->opaque) {
+ TRACE("encode_video. no input buffer\n");
} else {
- ERR("decode_audio. no input buffer\n");
- // FIXME: improve error handling
- // return false;
+ encode_input = elem->opaque;
}
- av_init_packet(&avpkt);
- avpkt.data = inbuf;
- avpkt.size = inbuf_size;
+ // initialize AVPacket
+ av_init_packet(avpkt);
avctx = CONTEXT(s, ctx_id)->avctx;
- // audio_out = CONTEXT(s, ctx_id)->frame;
- audio_out = avcodec_alloc_frame();
- if (!avctx) {
- ERR("decode_audio. %d of AVCodecContext is NULL\n", ctx_id);
- } else if (!avctx->codec) {
- ERR("decode_audio. %d of AVCodec is NULL\n", ctx_id);
- } else if (!audio_out) {
- ERR("decode_audio. %d of AVFrame is NULL\n", ctx_id);
- } else {
- // avcodec_get_frame_defaults(audio_out);
-
- len = avcodec_decode_audio4(avctx, audio_out, &got_frame, &avpkt);
- TRACE("decode_audio. len %d, channel_layout %lld, got_frame %d\n",
- len, avctx->channel_layout, got_frame);
- if (got_frame) {
- if (av_sample_fmt_is_planar(avctx->sample_fmt)) {
- // convert PLANAR to LINEAR format
- out_sample_fmt = avctx->sample_fmt - 5;
+ pict = CONTEXT(s, ctx_id)->frame;
- out_buf = resample_audio_buffer(avctx, audio_out, &out_buf_size, out_sample_fmt);
- } else {
- // TODO: not planar format
- INFO("decode_audio. cannot handle planar audio format\n");
- len = -1;
- }
- }
- }
-
- tempbuf_size = (sizeof(len) + sizeof(got_frame));
- if (len < 0) {
- ERR("failed to decode audio. ctx_id: %d len: %d got_frame: %d\n",
- ctx_id, len, got_frame);
- got_frame = 0;
- } else {
- tempbuf_size += (sizeof(out_sample_fmt) + sizeof(avctx->sample_rate)
- + sizeof(avctx->channels) + sizeof(avctx->channel_layout)
- + sizeof(out_buf_size) + out_buf_size);
- }
-
- tempbuf = g_malloc(tempbuf_size);
- if (!tempbuf) {
- ERR("failed to allocate decoded audio buffer\n");
- } else {
- memcpy(tempbuf, &len, sizeof(len));
- size = sizeof(len);
- memcpy(tempbuf + size, &got_frame, sizeof(got_frame));
- size += sizeof(got_frame);
- if (got_frame) {
- memcpy(tempbuf + size, &out_sample_fmt, sizeof(out_sample_fmt));
- size += sizeof(out_sample_fmt);
- memcpy(tempbuf + size, &avctx->sample_rate, sizeof(avctx->sample_rate));
- size += sizeof(avctx->sample_rate);
- memcpy(tempbuf + size, &avctx->channels, sizeof(avctx->channels));
- size += sizeof(avctx->channels);
- memcpy(tempbuf + size, &avctx->channel_layout, sizeof(avctx->channel_layout));
- size += sizeof(avctx->channel_layout);
-
- memcpy(tempbuf + size, &out_buf_size, sizeof(out_buf_size));
- size += sizeof(out_buf_size);
- if (out_buf) {
- TRACE("copy resampled audio buffer\n");
- memcpy(tempbuf + size, out_buf, out_buf_size);
- }
- }
- }
-
- brillcodec_push_write_queue(s, tempbuf, tempbuf_size, ctx_id, NULL);
-
- if (audio_out) {
- avcodec_free_frame(&audio_out);
- }
-
- if (out_buf) {
- TRACE("and release decoded_audio buffer\n");
- av_free(out_buf);
- }
-
- TRACE("leave: %s\n", __func__);
- return true;
-}
-
-static bool codec_encode_video(MaruBrillCodecState *s, int ctx_id, void *data_buf)
-{
- AVCodecContext *avctx = NULL;
- AVFrame *pict = NULL;
- AVPacket *avpkt = g_malloc0(sizeof(AVPacket));
- uint8_t *inbuf = NULL, *outbuf = NULL;
- int outbuf_size = 0;
- int got_frame = 0, ret = 0;
-
- DeviceMemEntry *elem = NULL;
- struct video_encode_input empty_input = { 0, };
- struct video_encode_input *encode_input = &empty_input;
-
- TRACE("enter: %s\n", __func__);
-
- elem = (DeviceMemEntry *)data_buf;
- if (!elem || !elem->opaque) {
- TRACE("encode_video. no input buffer\n");
- }
- else {
- encode_input = elem->opaque;
- }
-
- // initialize AVPacket
- av_init_packet(avpkt);
-
- avctx = CONTEXT(s, ctx_id)->avctx;
- pict = CONTEXT(s, ctx_id)->frame;
-
- if(!avctx || !avctx->codec) {
- ERR("critical error !!!\n");
- assert(0);
+ if(!avctx || !avctx->codec) {
+ ERR("critical error !!!\n");
+ assert(0);
}
TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
// write encoded video data
DataContainer *dc = g_malloc0(sizeof(DataContainer));
- dc->len = ret;
dc->is_got = got_frame;
dc->avctx = avctx;
dc->avpkt = avpkt;
- brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_encode_data_handler);
+ brillcodec_push_writequeue(s, dc, 0, ctx_id, &video_encode_data_handler);
TRACE("leave: %s\n", __func__);
return true;
}
-static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+//
+// CODEC FUNCTIONS
+// AUDIO DECODE / ENCODE
+//
+
+struct audio_data {
+ int32_t channels;
+ int32_t sample_rate;
+ int32_t block_align;
+ int32_t depth;
+ int32_t sample_fmt;
+ int32_t frame_size;
+ int32_t bits_per_smp_fmt;
+ int32_t reserved;
+ int64_t channel_layout;
+} __attribute__((packed));
+
+static uint8_t *resample_audio_buffer(AVCodecContext * avctx, AVFrame *samples,
+ int *out_size, int out_sample_fmt)
{
- AVCodecContext *avctx = NULL;
+ AVAudioResampleContext *avr = NULL;
+ uint8_t *resampled_audio = NULL;
+ int buffer_size = 0, out_linesize = 0;
+ int nb_samples = samples->nb_samples;
+ // int out_sample_fmt = avctx->sample_fmt - 5;
+
+ avr = avresample_alloc_context();
+ if (!avr) {
+ ERR("failed to allocate avresample context\n");
+ return NULL;
+ }
+
+ TRACE("resample audio. channel_layout %lld sample_rate %d "
+ "in_sample_fmt %d out_sample_fmt %d\n",
+ avctx->channel_layout, avctx->sample_rate,
+ avctx->sample_fmt, out_sample_fmt);
+
+ av_opt_set_int(avr, "in_channel_layout", avctx->channel_layout, 0);
+ av_opt_set_int(avr, "in_sample_fmt", avctx->sample_fmt, 0);
+ av_opt_set_int(avr, "in_sample_rate", avctx->sample_rate, 0);
+ av_opt_set_int(avr, "out_channel_layout", avctx->channel_layout, 0);
+ av_opt_set_int(avr, "out_sample_fmt", out_sample_fmt, 0);
+ av_opt_set_int(avr, "out_sample_rate", avctx->sample_rate, 0);
+
+ TRACE("open avresample context\n");
+ if (avresample_open(avr) < 0) {
+ ERR("failed to open avresample context\n");
+ avresample_free(&avr);
+ return NULL;
+ }
+
+ *out_size =
+ av_samples_get_buffer_size(&out_linesize, avctx->channels,
+ nb_samples, out_sample_fmt, 0);
+
+ TRACE("resample audio. out_linesize %d nb_samples %d\n", out_linesize, nb_samples);
+
+ if (*out_size < 0) {
+ ERR("failed to get size of sample buffer %d\n", *out_size);
+ avresample_close(avr);
+ avresample_free(&avr);
+ return NULL;
+ }
+
+ resampled_audio = av_mallocz(*out_size);
+ if (!resampled_audio) {
+ ERR("failed to allocate resample buffer\n");
+ avresample_close(avr);
+ avresample_free(&avr);
+ return NULL;
+ }
+
+ buffer_size = avresample_convert(avr, &resampled_audio,
+ out_linesize, nb_samples,
+ samples->data, samples->linesize[0],
+ samples->nb_samples);
+ TRACE("resample_audio out_size %d buffer_size %d\n", *out_size, buffer_size);
+
+ avresample_close(avr);
+ avresample_free(&avr);
+
+ return resampled_audio;
+}
+
+static bool codec_decode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+{
+ AVCodecContext *avctx;
AVPacket avpkt;
- uint8_t *audio_in = NULL;
- int32_t audio_in_size = 0;
- int ret = 0, got_pkt = 0, size = 0;
+ AVFrame *audio_out = NULL;
+ uint8_t *inbuf = NULL;
+ int inbuf_size = 0, size = 0;
+ int len = -1, got_frame = 0;
DeviceMemEntry *elem = NULL;
uint8_t *tempbuf = NULL;
int tempbuf_size = 0;
- AVFrame *in_frame = NULL;
- AVFrame *resampled_frame = NULL;
- uint8_t *samples = NULL;
- int64_t in_timestamp = 0;
+ uint8_t *out_buf = NULL;
+ int out_buf_size = 0;
+ int out_sample_fmt = -1;
TRACE("enter: %s\n", __func__);
- /*
- * copy raw audio data from gstreamer encoder plugin
- * audio_in_size: size of raw audio data
- * audio_in : raw audio data
- */
elem = (DeviceMemEntry *)data_buf;
if (elem && elem->opaque) {
- memcpy(&audio_in_size, elem->opaque, sizeof(audio_in_size));
- size += sizeof(audio_in_size);
-
- memcpy(&in_timestamp, elem->opaque + size, sizeof(in_timestamp));
- size += sizeof(in_timestamp);
+ memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
+ size = sizeof(inbuf_size);
+ TRACE("decode_audio. inbuf_size %d\n", inbuf_size);
- TRACE("encode_audio. audio_in_size %d\n", audio_in_size);
- if (audio_in_size > 0) {
- // audio_in = g_malloc0(audio_in_size);
- // memcpy(audio_in, elem->buf + size, audio_in_size);
- audio_in = elem->opaque + size;
+ if (inbuf_size > 0) {
+ inbuf = elem->opaque + size;
}
} else {
- TRACE("encode_audio. no input buffer\n");
+ ERR("decode_audio. no input buffer\n");
// FIXME: improve error handling
// return false;
}
+ av_init_packet(&avpkt);
+ avpkt.data = inbuf;
+ avpkt.size = inbuf_size;
+
avctx = CONTEXT(s, ctx_id)->avctx;
+ // audio_out = CONTEXT(s, ctx_id)->frame;
+ audio_out = avcodec_alloc_frame();
if (!avctx) {
- ERR("[%s] %d of Context is NULL!\n", __func__, ctx_id);
+ ERR("decode_audio. %d of AVCodecContext is NULL\n", ctx_id);
} else if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
+ ERR("decode_audio. %d of AVCodec is NULL\n", ctx_id);
+ } else if (!audio_out) {
+ ERR("decode_audio. %d of AVFrame is NULL\n", ctx_id);
} else {
- int bytes_per_sample = 0;
- int audio_in_buffer_size = 0;
- int audio_in_sample_fmt = AV_SAMPLE_FMT_S16;
+ // avcodec_get_frame_defaults(audio_out);
- in_frame = avcodec_alloc_frame();
- if (!in_frame) {
- // FIXME: error handling
- ERR("encode_audio. failed to allocate in_frame\n");
- ret = -1;
- }
+ len = avcodec_decode_audio4(avctx, audio_out, &got_frame, &avpkt);
+ TRACE("decode_audio. len %d, channel_layout %lld, got_frame %d\n",
+ len, avctx->channel_layout, got_frame);
+ if (got_frame) {
+ if (av_sample_fmt_is_planar(avctx->sample_fmt)) {
+ // convert PLANAR to LINEAR format
+ out_sample_fmt = avctx->sample_fmt - 5;
- bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt);
- TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt);
+ out_buf = resample_audio_buffer(avctx, audio_out, &out_buf_size, out_sample_fmt);
+ } else {
+ // TODO: not planar format
+ INFO("decode_audio. cannot handle planar audio format\n");
+ len = -1;
+ }
+ }
+ }
- in_frame->nb_samples = audio_in_size / (bytes_per_sample * avctx->channels);
- TRACE("in_frame->nb_samples %d\n", in_frame->nb_samples);
+ tempbuf_size = (sizeof(len) + sizeof(got_frame));
+ if (len < 0) {
+ ERR("failed to decode audio. ctx_id: %d len: %d got_frame: %d\n",
+ ctx_id, len, got_frame);
+ got_frame = 0;
+ } else {
+ tempbuf_size += (sizeof(out_sample_fmt) + sizeof(avctx->sample_rate)
+ + sizeof(avctx->channels) + sizeof(avctx->channel_layout)
+ + sizeof(out_buf_size) + out_buf_size);
+ }
- in_frame->format = audio_in_sample_fmt;
- in_frame->channel_layout = avctx->channel_layout;
+ tempbuf = g_malloc(tempbuf_size);
+ if (!tempbuf) {
+ ERR("failed to allocate decoded audio buffer\n");
+ } else {
+ memcpy(tempbuf, &len, sizeof(len));
+ size = sizeof(len);
+ memcpy(tempbuf + size, &got_frame, sizeof(got_frame));
+ size += sizeof(got_frame);
+ if (got_frame) {
+ memcpy(tempbuf + size, &out_sample_fmt, sizeof(out_sample_fmt));
+ size += sizeof(out_sample_fmt);
+ memcpy(tempbuf + size, &avctx->sample_rate, sizeof(avctx->sample_rate));
+ size += sizeof(avctx->sample_rate);
+ memcpy(tempbuf + size, &avctx->channels, sizeof(avctx->channels));
+ size += sizeof(avctx->channels);
+ memcpy(tempbuf + size, &avctx->channel_layout, sizeof(avctx->channel_layout));
+ size += sizeof(avctx->channel_layout);
+
+ memcpy(tempbuf + size, &out_buf_size, sizeof(out_buf_size));
+ size += sizeof(out_buf_size);
+ if (out_buf) {
+ TRACE("copy resampled audio buffer\n");
+ memcpy(tempbuf + size, out_buf, out_buf_size);
+ }
+ }
+ }
+
+ brillcodec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+
+ if (audio_out) {
+ avcodec_free_frame(&audio_out);
+ }
+
+ if (out_buf) {
+ TRACE("and release decoded_audio buffer\n");
+ av_free(out_buf);
+ }
+
+ TRACE("leave: %s\n", __func__);
+ return true;
+}
+
+static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+{
+ AVCodecContext *avctx = NULL;
+ AVPacket avpkt;
+ uint8_t *audio_in = NULL;
+ int32_t audio_in_size = 0;
+ int ret = 0, got_pkt = 0, size = 0;
+
+ DeviceMemEntry *elem = NULL;
+ uint8_t *tempbuf = NULL;
+ int tempbuf_size = 0;
+
+ AVFrame *in_frame = NULL;
+ AVFrame *resampled_frame = NULL;
+ uint8_t *samples = NULL;
+ int64_t in_timestamp = 0;
+
+ TRACE("enter: %s\n", __func__);
+
+ /*
+ * copy raw audio data from gstreamer encoder plugin
+ * audio_in_size: size of raw audio data
+ * audio_in : raw audio data
+ */
+ elem = (DeviceMemEntry *)data_buf;
+ if (elem && elem->opaque) {
+ memcpy(&audio_in_size, elem->opaque, sizeof(audio_in_size));
+ size += sizeof(audio_in_size);
+
+ memcpy(&in_timestamp, elem->opaque + size, sizeof(in_timestamp));
+ size += sizeof(in_timestamp);
+
+ TRACE("encode_audio. audio_in_size %d\n", audio_in_size);
+ if (audio_in_size > 0) {
+ // audio_in = g_malloc0(audio_in_size);
+ // memcpy(audio_in, elem->buf + size, audio_in_size);
+ audio_in = elem->opaque + size;
+ }
+ } else {
+ TRACE("encode_audio. no input buffer\n");
+ // FIXME: improve error handling
+ // return false;
+ }
+
+ avctx = CONTEXT(s, ctx_id)->avctx;
+ if (!avctx) {
+ ERR("[%s] %d of Context is NULL!\n", __func__, ctx_id);
+ } else if (!avctx->codec) {
+ ERR("%d of AVCodec is NULL.\n", ctx_id);
+ } else {
+ int bytes_per_sample = 0;
+ int audio_in_buffer_size = 0;
+ int audio_in_sample_fmt = AV_SAMPLE_FMT_S16;
+
+ in_frame = avcodec_alloc_frame();
+ if (!in_frame) {
+ // FIXME: error handling
+ ERR("encode_audio. failed to allocate in_frame\n");
+ ret = -1;
+ }
+
+ bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt);
+ TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt);
+
+ in_frame->nb_samples = audio_in_size / (bytes_per_sample * avctx->channels);
+ TRACE("in_frame->nb_samples %d\n", in_frame->nb_samples);
+
+ in_frame->format = audio_in_sample_fmt;
+ in_frame->channel_layout = avctx->channel_layout;
audio_in_buffer_size = av_samples_get_buffer_size(NULL, avctx->channels, avctx->frame_size, audio_in_sample_fmt, 0);
TRACE("audio_in_buffer_size: %d, audio_in_size %d\n", audio_in_buffer_size, audio_in_size);
}
}
- brillcodec_push_write_queue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+ brillcodec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
if (in_frame) {
avcodec_free_frame(&in_frame);
}
return true;
}
+//
+// CODEC FUNCTIONS
+// CODEC INIT / DEINIT
+//
+
+static void serialize_video_data(const struct video_data *video,
+ AVCodecContext *avctx)
+{
+ if (video->width) {
+ avctx->width = video->width;
+ }
+ if (video->height) {
+ avctx->height = video->height;
+ }
+ if (video->fps_n) {
+ avctx->time_base.num = video->fps_n;
+ }
+ if (video->fps_d) {
+ avctx->time_base.den = video->fps_d;
+ }
+ if (video->pix_fmt > PIX_FMT_NONE) {
+ avctx->pix_fmt = video->pix_fmt;
+ }
+ if (video->par_n) {
+ avctx->sample_aspect_ratio.num = video->par_n;
+ }
+ if (video->par_d) {
+ avctx->sample_aspect_ratio.den = video->par_d;
+ }
+ if (video->bpp) {
+ avctx->bits_per_coded_sample = video->bpp;
+ }
+ if (video->ticks_per_frame) {
+ avctx->ticks_per_frame = video->ticks_per_frame;
+ }
+
+ INFO("codec_init. video, resolution: %dx%d, framerate: %d/%d "
+ "pixel_fmt: %d sample_aspect_ratio: %d/%d bpp %d\n",
+ avctx->width, avctx->height, avctx->time_base.num,
+ avctx->time_base.den, avctx->pix_fmt, avctx->sample_aspect_ratio.num,
+ avctx->sample_aspect_ratio.den, avctx->bits_per_coded_sample);
+}
+
+static void serialize_audio_data(const struct audio_data *audio,
+ AVCodecContext *avctx)
+{
+ if (audio->channels) {
+ avctx->channels = audio->channels;
+ }
+ if (audio->sample_rate) {
+ avctx->sample_rate = audio->sample_rate;
+ }
+ if (audio->block_align) {
+ avctx->block_align = audio->block_align;
+ }
+
+ if (audio->sample_fmt > AV_SAMPLE_FMT_NONE) {
+ avctx->sample_fmt = audio->sample_fmt;
+ }
+
+ INFO("codec_init. audio, channel %d sample_rate %d sample_fmt %d ch_layout %lld\n",
+ avctx->channels, avctx->sample_rate, avctx->sample_fmt, avctx->channel_layout);
+}
+
+static enum PixelFormat get_format(AVCodecContext *avctx,
+ const enum PixelFormat *pi_fmt) {
+ bool can_hwaccel = false;
+ int i;
+
+ CodecContext *context = (CodecContext *)avctx->opaque;
+ MaruBrillCodecState *s = context->state;
+
+ if (!s->hwaccel_plugin) {
+ goto end;
+ }
+
+ for (i = 0; pi_fmt[i] != PIX_FMT_NONE; ++i) {
+ const AVPixFmtDescriptor *dsc = av_pix_fmt_desc_get(pi_fmt[i]);
+ if (dsc == NULL) {
+ continue;
+ }
+ if ((dsc->flags & PIX_FMT_HWACCEL) != 0) {
+ can_hwaccel = true;
+ }
+ }
+
+ if (!can_hwaccel) {
+ goto end;
+ }
+
+ if (!s->hwaccel_plugin->setup(avctx, avctx->width, avctx->height)) {
+ goto end;
+ }
+
+ for (i = 0; pi_fmt[i] != PIX_FMT_NONE; ++i) {
+ if (pi_fmt[i] == s->hwaccel_plugin->pix_fmt) {
+ break;
+ }
+ }
+
+ if (pi_fmt[i] == PIX_FMT_NONE) {
+ goto end;
+ }
+
+ INFO("HW_ACCEL is enabled with pix_fmt [%s]\n", av_get_pix_fmt_name(pi_fmt[i]));
+ context->is_hwaccel = true;
+ return pi_fmt[i];
+
+end:
+ INFO("HW_ACCEL is disabled\n");
+ context->is_hwaccel = false;
+ return avcodec_default_get_format(avctx, pi_fmt);
+}
+
+static int get_buffer(struct AVCodecContext *avctx, AVFrame *frame) {
+ CodecContext *context = (CodecContext *)avctx->opaque;
+
+ if (context->is_hwaccel) {
+ return context->state->hwaccel_plugin->get_buffer(avctx, frame);
+ }
+
+ return avcodec_default_get_buffer(avctx, frame);
+}
+
+static void release_buffer(struct AVCodecContext *avctx, AVFrame *frame) {
+ CodecContext *context = (CodecContext *)avctx->opaque;
+
+ if (context->is_hwaccel) {
+ return context->state->hwaccel_plugin->release_buffer(avctx, frame);
+ }
+
+ return avcodec_default_release_buffer(avctx, frame);
+}
+
+// allocate avcontext and avframe struct.
+static AVCodecContext *maru_brill_codec_alloc_context(MaruBrillCodecState *s, int ctx_id)
+{
+ TRACE("enter: %s\n", __func__);
+
+ TRACE("allocate %d of context and frame.\n", ctx_id);
+
+ CONTEXT(s, ctx_id)->avctx = avcodec_alloc_context3(NULL);
+
+ AVCodecContext *avctx = CONTEXT(s, ctx_id)->avctx;
+ avctx->get_format = get_format;
+ avctx->get_buffer = get_buffer;
+ avctx->reget_buffer = avcodec_default_reget_buffer;
+ avctx->release_buffer = release_buffer;
+ avctx->opaque = CONTEXT(s, ctx_id);
+
+ CONTEXT(s, ctx_id)->frame = avcodec_alloc_frame();
+ CONTEXT(s, ctx_id)->opened_context = false;
+ CONTEXT(s, ctx_id)->state = s;
+
+ TRACE("leave: %s\n", __func__);
+
+ return avctx;
+}
+
+static AVCodec *maru_brill_codec_find_avcodec(uint8_t *mem_buf)
+{
+ AVCodec *codec = NULL;
+ int32_t encode, size = 0;
+ char codec_name[32] = {0, };
+
+ memcpy(&encode, mem_buf, sizeof(encode));
+ size = sizeof(encode);
+ memcpy(codec_name, mem_buf + size, sizeof(codec_name));
+ size += sizeof(codec_name);
+
+ TRACE("type: %d, name: %s\n", encode, codec_name);
+
+ if (encode) {
+ codec = avcodec_find_encoder_by_name (codec_name);
+ } else {
+ codec = avcodec_find_decoder_by_name (codec_name);
+ }
+ INFO("%s!! find %s %s\n", codec ? "success" : "failure",
+ codec_name, encode ? "encoder" : "decoder");
+
+ return codec;
+}
+
+static void read_codec_init_data(AVCodecContext *avctx, uint8_t *mem_buf)
+{
+ struct video_data video = { 0, };
+ struct audio_data audio = { 0, };
+ int bitrate = 0, size = 0;
+
+ memcpy(&video, mem_buf + size, sizeof(video));
+ size = sizeof(video);
+ serialize_video_data(&video, avctx);
+
+ memcpy(&audio, mem_buf + size, sizeof(audio));
+ size += sizeof(audio);
+ serialize_audio_data(&audio, avctx);
+
+ memcpy(&bitrate, mem_buf + size, sizeof(bitrate));
+ size += sizeof(bitrate);
+ if (bitrate) {
+ avctx->bit_rate = bitrate;
+ }
+
+ memcpy(&avctx->codec_tag, mem_buf + size, sizeof(avctx->codec_tag));
+ size += sizeof(avctx->codec_tag);
+ memcpy(&avctx->extradata_size,
+ mem_buf + size, sizeof(avctx->extradata_size));
+ size += sizeof(avctx->extradata_size);
+ INFO("extradata size: %d.\n", avctx->extradata_size);
+
+ if (avctx->extradata_size > 0) {
+ avctx->extradata =
+ av_mallocz(ROUND_UP_X(avctx->extradata_size +
+ FF_INPUT_BUFFER_PADDING_SIZE, 4));
+ if (avctx->extradata) {
+ memcpy(avctx->extradata, mem_buf + size, avctx->extradata_size);
+ }
+ } else {
+ TRACE("no extra data.\n");
+ avctx->extradata =
+ av_mallocz(ROUND_UP_X(FF_INPUT_BUFFER_PADDING_SIZE, 4));
+ }
+}
+
+// write the result of codec_init
+static int write_codec_init_data(AVCodecContext *avctx, uint8_t *mem_buf)
+{
+ int size = 0;
+
+ if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ int osize = av_get_bytes_per_sample(avctx->sample_fmt);
+
+ INFO("avcodec_open. sample_fmt %d, bytes_per_sample %d\n", avctx->sample_fmt, osize);
+
+ if ((avctx->codec_id == AV_CODEC_ID_AAC) && avctx->codec->encode2) {
+ osize = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
+ }
+ memcpy(mem_buf, &avctx->sample_fmt, sizeof(avctx->sample_fmt));
+ size = sizeof(avctx->sample_fmt);
+
+ // frame_size: samples per packet, initialized when calling 'init'
+ memcpy(mem_buf + size, &avctx->frame_size, sizeof(avctx->frame_size));
+ size += sizeof(avctx->frame_size);
+
+ memcpy(mem_buf + size, &osize, sizeof(osize));
+ size += sizeof(osize);
+ }
+
+ return size;
+}
+
static AVCodecParserContext *maru_brill_codec_parser_init(AVCodecContext *avctx)
{
AVCodecParserContext *parser = NULL;
return parser;
}
+
+static bool codec_init(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+{
+ AVCodecContext *avctx = NULL;
+ AVCodec *codec = NULL;
+ int size = 0, ret = -1;
+ DeviceMemEntry *elem = NULL;
+ uint8_t *tempbuf = NULL;
+ int tempbuf_size = 0;
+
+ TRACE("enter: %s\n", __func__);
+
+ elem = (DeviceMemEntry *)data_buf;
+
+ // allocate AVCodecContext
+ avctx = maru_brill_codec_alloc_context(s, ctx_id);
+ if (!avctx) {
+ ERR("[%d] failed to allocate context.\n", __LINE__);
+ ret = -1;
+ } else {
+ codec = maru_brill_codec_find_avcodec(elem->opaque);
+ if (codec) {
+ size = sizeof(int32_t) + 32; // buffer size of codec_name
+ read_codec_init_data(avctx, elem->opaque + size);
+
+ // in case of aac encoder, sample format is float
+ if (!strcmp(codec->name, "aac") && codec->encode2) {
+ TRACE("convert sample format into SAMPLE_FMT_FLTP\n");
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
+
+ avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+
+ INFO("aac encoder!! channels %d channel_layout %lld\n", avctx->channels, avctx->channel_layout);
+ avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
+ }
+
+ TRACE("audio sample format %d\n", avctx->sample_fmt);
+ TRACE("strict_std_compliance %d\n", avctx->strict_std_compliance);
+
+ ret = avcodec_open2(avctx, codec, NULL);
+ INFO("avcodec_open. ret 0x%x ctx_id %d\n", ret, ctx_id);
+
+ TRACE("channels %d sample_rate %d sample_fmt %d "
+ "channel_layout %lld frame_size %d\n",
+ avctx->channels, avctx->sample_rate, avctx->sample_fmt,
+ avctx->channel_layout, avctx->frame_size);
+
+ tempbuf_size = (sizeof(avctx->sample_fmt) + sizeof(avctx->frame_size)
+ + sizeof(avctx->extradata_size) + avctx->extradata_size)
+ + sizeof(int);
+
+ CONTEXT(s, ctx_id)->opened_context = true;
+ CONTEXT(s, ctx_id)->parser_ctx =
+ maru_brill_codec_parser_init(avctx);
+ } else {
+ ERR("failed to find codec. ctx_id: %d\n", ctx_id);
+ ret = -1;
+ }
+ }
+
+ tempbuf_size += sizeof(ret);
+
+ tempbuf = g_malloc(tempbuf_size);
+ if (!tempbuf) {
+ ERR("failed to allocate a buffer\n");
+ tempbuf_size = 0;
+ } else {
+ memcpy(tempbuf, &ret, sizeof(ret));
+ size = sizeof(ret);
+ if (ret < 0) {
+ ERR("failed to open codec contex.\n");
+ } else {
+ size += write_codec_init_data(avctx, tempbuf + size);
+ TRACE("codec_init. copyback!! size %d\n", size);
+ {
+ memcpy(tempbuf + size, &avctx->extradata_size, sizeof(avctx->extradata_size));
+ size += sizeof(avctx->extradata_size);
+
+ INFO("codec_init. extradata_size: %d\n", avctx->extradata_size);
+ if (avctx->extradata) {
+ memcpy(tempbuf + size, avctx->extradata, avctx->extradata_size);
+ size += avctx->extradata_size;
+ }
+ }
+ }
+ }
+
+ brillcodec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+
+ TRACE("leave: %s\n", __func__);
+
+ return true;
+}
+
+static bool codec_deinit(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+{
+ AVCodecContext *avctx = NULL;
+ AVFrame *frame = NULL;
+ AVCodecParserContext *parserctx = NULL;
+
+ TRACE("enter: %s\n", __func__);
+
+ avctx = CONTEXT(s, ctx_id)->avctx;
+ frame = CONTEXT(s, ctx_id)->frame;
+ parserctx = CONTEXT(s, ctx_id)->parser_ctx;
+ if (!avctx || !frame) {
+ TRACE("%d of AVCodecContext or AVFrame is NULL. "
+ " Those resources have been released before.\n", ctx_id);
+ return false;
+ }
+
+ INFO("close avcontext of %d\n", ctx_id);
+ avcodec_close(avctx);
+ CONTEXT(s, ctx_id)->opened_context = false;
+
+ if (avctx->extradata) {
+ TRACE("free context extradata\n");
+ av_free(avctx->extradata);
+ CONTEXT(s, ctx_id)->avctx->extradata = NULL;
+ }
+
+ if (frame) {
+ TRACE("free frame\n");
+ // av_free(frame);
+ avcodec_free_frame(&frame);
+ CONTEXT(s, ctx_id)->frame = NULL;
+ }
+
+ if (avctx) {
+ TRACE("free codec context\n");
+ av_free(avctx);
+ CONTEXT(s, ctx_id)->avctx = NULL;
+ }
+
+ if (parserctx) {
+ INFO("close parser context\n");
+ av_parser_close(parserctx);
+ CONTEXT(s, ctx_id)->parser_ctx = NULL;
+ }
+
+ brillcodec_push_writequeue(s, NULL, 0, ctx_id, NULL);
+
+ TRACE("leave: %s\n", __func__);
+
+ return true;
+}