int64_t channel_layout;
};
+struct video_data {
+ int32_t width;
+ int32_t height;
+ int32_t fps_n;
+ int32_t fps_d;
+ int32_t par_n;
+ int32_t par_d;
+ int32_t pix_fmt;
+ int32_t bpp;
+ int32_t ticks_per_frame;
+} __attribute__((packed));
+
+struct video_decode_input {
+ int32_t inbuf_size;
+ int32_t idx;
+ int64_t in_offset;
+ uint8_t inbuf; // for pointing inbuf address
+} __attribute__((packed));
+
+struct video_decode_output {
+ int32_t len;
+ int32_t got_picture;
+ struct video_data data;
+} __attribute__((packed));
+
+struct video_encode_input {
+ int32_t inbuf_size;
+ int64_t in_timestamp;
+ uint8_t inbuf; // for pointing inbuf address
+} __attribute__((packed));
+
+struct video_encode_output {
+ int32_t len;
+ int32_t coded_frame;
+ int32_t key_frame;
+ uint8_t data; // for pointing outbuf address
+} __attribute__((packed));
+
+typedef struct DataContainer {
+ // common
+ bool is_got;
+ int32_t len;
+ AVCodecContext *avctx;
+
+ // for decoder
+ size_t picture_buffer_offset;
+ AVFrame *frame;
+
+ // for encoder
+ AVPacket *avpkt;
+} DataContainer;
+
+static void fill_video_data(const AVCodecContext *avctx,
+ struct video_data *video)
+{
+ memset(video, 0x00, sizeof(struct video_data));
+
+ video->width = avctx->width;
+ video->height = avctx->height;
+ video->fps_n = avctx->time_base.num;
+ video->fps_d = avctx->time_base.den;
+ video->pix_fmt = avctx->pix_fmt;
+ video->par_n = avctx->sample_aspect_ratio.num;
+ video->par_d = avctx->sample_aspect_ratio.den;
+ video->bpp = avctx->bits_per_coded_sample;
+ video->ticks_per_frame = avctx->ticks_per_frame;
+}
+
+
DeviceMemEntry *entry[CODEC_CONTEXT_MAX];
// define a queue to manage ioparam, context data
frame->width, frame->height, dst, pict_size);
}
-// default video decode data handler
+// video decode data handler
// FIXME: ignore "size" now...
-static void copy_picture(void *dst, void *opaque, size_t dummy)
+static void copy_decode_data(void *dst, void *opaque, size_t dummy)
{
- size_t size = sizeof(int32_t), offset = 0;
DataContainer *dc = (DataContainer *)opaque;
CodecContext *context = (CodecContext *)dc->avctx->opaque;
if (dc->picture_buffer_offset) {
- // FIXME: if video data is exist...
- *((int32_t *)dst) = dc->len;
- offset += size;
- *((int32_t *)(dst + offset)) = dc->got_picture;
- offset += size;
-
- struct video_data *data = (struct video_data *)(dst + offset);
- fill_video_data(dc->avctx, data);
+ // if output video data is exist...
+ struct video_decode_output *decode_output =
+ (struct video_decode_output *)dst;
+ decode_output->len = dc->len;
+ decode_output->got_picture = dc->is_got ? 1 : 0;
+ fill_video_data(dc->avctx, &decode_output->data);
if (context->is_hwaccel) {
- data->pix_fmt = context->state->hwaccel_plugin->output_pix_fmt;
+ decode_output->data.pix_fmt = context->state->hwaccel_plugin->output_pix_fmt;
}
}
if (dc->frame) {
- // FIXME: if picture is exist...
+ // if picture is exist...
if (context->is_hwaccel) {
context->state->hwaccel_plugin->get_picture(dst + dc->picture_buffer_offset, dc->frame);
} else {
static void release(void *opaque) {
DataContainer *dc = (DataContainer *)opaque;
+ if (dc->avpkt) {
+ g_free(dc->avpkt->data);
+ g_free(dc->avpkt);
+ }
g_free(dc);
}
static DataHandler video_decode_data_handler = {
- .get_data = copy_picture,
+ .get_data = copy_decode_data,
+ .release = release,
+};
+
+static void copy_encode_data(void *dst, void *opaque, size_t dummy)
+{
+ DataContainer *dc = (DataContainer *)opaque;
+ struct video_encode_output *encode_output =
+ (struct video_encode_output *)dst;
+
+ encode_output->len = dc->len;
+ if (dc->len && dc->is_got) {
+ // inform gstreamer plugin about the status of encoded frames
+ // A flag for output buffer in gstreamer is depending on the status.
+ if (dc->avctx->coded_frame) {
+ encode_output->coded_frame = 1;
+ // if key_frame is 0, this frame cannot be decoded independently.
+ encode_output->key_frame = dc->avctx->coded_frame->key_frame;
+ }
+
+ memcpy(&encode_output->data, dc->avpkt->data, dc->avpkt->size);
+ }
+}
+
+static DataHandler video_encode_data_handler = {
+ .get_data = copy_encode_data,
.release = release,
};
CodecParam *ioparam = NULL;
ioparam = g_malloc0(sizeof(CodecParam));
- if (!ioparam) {
- ERR("failed to allocate ioparam\n");
- return;
- }
memcpy(ioparam, &s->ioparam, sizeof(CodecParam));
DeviceMemEntry *data_buf = NULL;
elem = g_malloc0(sizeof(CodecDataStg));
- if (!elem) {
- ERR("failed to allocate ioparam_queue. %d\n", sizeof(CodecDataStg));
- return;
- }
elem->param_buf = ioparam;
uint8_t *device_mem = mem_base + ioparam->mem_offset;
elem = g_malloc0(sizeof(DeviceMemEntry));
- if (!elem) {
- ERR("failed to allocate readqueue node. size: %d\n",
- sizeof(DeviceMemEntry));
- return NULL;
- }
memcpy(&readbuf_size, device_mem, sizeof(readbuf_size));
size = sizeof(readbuf_size);
ioparam->api_index, ioparam->ctx_index, ioparam->mem_offset);
} else {
readbuf = g_malloc0(readbuf_size);
- if (!readbuf) {
- ERR("failed to allocate a read buffer. size: %d\n", readbuf_size);
- } else {
- TRACE("copy input buffer from guest. ctx_id: %d, mem_offset: %x\n",
+ TRACE("copy input buffer from guest. ctx_id: %d, mem_offset: %x\n",
ioparam->ctx_index, ioparam->mem_offset);
- memcpy(readbuf, device_mem + size, readbuf_size);
- }
+ memcpy(readbuf, device_mem + size, readbuf_size);
}
// memset(device_mem, 0x00, sizeof(readbuf_size));
return ret;
}
-static bool codec_decode_video2(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+static bool codec_decode_video_common(MaruBrillCodecState *s, int ctx_id,
+ void *data_buf, bool copy_picture)
{
AVCodecContext *avctx = NULL;
- AVFrame *picture = NULL;
+ AVFrame *frame = NULL;
AVCodecParserContext *pctx = NULL;
AVPacket avpkt;
- uint32_t got_picture = 0, len = -1;
- uint8_t *inbuf = NULL;
- int inbuf_size = 0, idx = 0, size = 0;
- int64_t in_offset = 0;
DeviceMemEntry *elem = NULL;
+ struct video_decode_input empty_input = { 0, };
+ struct video_decode_input *decode_input = &empty_input;
+ uint32_t got_picture = 0;
+ int32_t len = -1;
TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
- size += sizeof(inbuf_size);
- memcpy(&idx, elem->opaque + size, sizeof(idx));
- size += sizeof(idx);
- memcpy(&in_offset, elem->opaque + size, sizeof(in_offset));
- size += sizeof(in_offset);
- TRACE("decode_video. inbuf_size %d\n", inbuf_size);
-
- if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
- }
- } else {
+ if (!elem || !elem->opaque) {
TRACE("decode_video. no input buffer\n");
- // FIXME: improve error handling
- // return false;
+ }
+ else {
+ decode_input = elem->opaque;
}
av_init_packet(&avpkt);
- avpkt.data = inbuf;
- avpkt.size = inbuf_size;
+ avpkt.data = &decode_input->inbuf;
+ avpkt.size = decode_input->inbuf_size;
avctx = CONTEXT(s, ctx_id)->avctx;
- picture = CONTEXT(s, ctx_id)->frame;
- if (!avctx) {
- ERR("decode_video. %d of AVCodecContext is NULL.\n", ctx_id);
- } else if (!avctx->codec) {
- ERR("decode_video. %d of AVCodec is NULL.\n", ctx_id);
- } else if (!picture) {
- ERR("decode_video. %d of AVFrame is NULL.\n", ctx_id);
- } else {
- TRACE("decode_video. bitrate %d resolution(%dx%d)\n",
- avctx->bit_rate, avctx->width, avctx->height);
-
- pctx = CONTEXT(s, ctx_id)->parser_ctx;
+ frame = CONTEXT(s, ctx_id)->frame;
+ pctx = CONTEXT(s, ctx_id)->parser_ctx;
- len = parse_and_decode_video(avctx, picture, pctx, ctx_id,
- &avpkt, &got_picture, idx, in_offset);
+ if(!avctx || !avctx->codec || !frame) {
+ ERR("critical error !!!\n");
+ assert(0);
}
+ TRACE("decode_video. bitrate %d resolution(%dx%d)\n",
+ avctx->bit_rate, avctx->width, avctx->height);
+
+ len = parse_and_decode_video(avctx, frame, pctx, ctx_id,
+ &avpkt, &got_picture, decode_input->idx, decode_input->in_offset);
+
DataContainer *dc = g_malloc0(sizeof(DataContainer));
- dc->picture_buffer_offset = OFFSET_PICTURE_BUFFER;
+ dc->picture_buffer_offset = OFFSET_PICTURE_BUFFER; // we have output video data
dc->len = len;
- dc->got_picture = got_picture;
+ dc->is_got = got_picture;
dc->avctx = avctx;
- if(got_picture) {
- dc->frame = picture;
+ if(got_picture && copy_picture) { // we have output picture
+ dc->frame = frame;
}
brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_decode_data_handler);
static bool codec_decode_video(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
- AVCodecContext *avctx = NULL;
- AVFrame *picture = NULL;
- AVCodecParserContext *pctx = NULL;
- AVPacket avpkt;
-
- uint32_t got_picture = 0, len = -1;
- uint8_t *inbuf = NULL;
- int inbuf_size = 0, idx = 0, size = 0;
- int64_t in_offset = 0;
- DeviceMemEntry *elem = NULL;
-
- TRACE("enter: %s\n", __func__);
-
- elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
- size += sizeof(inbuf_size);
- memcpy(&idx, elem->opaque + size, sizeof(idx));
- size += sizeof(idx);
- memcpy(&in_offset, elem->opaque + size, sizeof(in_offset));
- size += sizeof(in_offset);
- TRACE("decode_video. inbuf_size %d\n", inbuf_size);
-
- if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
- }
- } else {
- TRACE("decode_video. no input buffer\n");
- // FIXME: improve error handling
- // return false;
- }
-
- av_init_packet(&avpkt);
- avpkt.data = inbuf;
- avpkt.size = inbuf_size;
-
- avctx = CONTEXT(s, ctx_id)->avctx;
- picture = CONTEXT(s, ctx_id)->frame;
- if (!avctx) {
- ERR("decode_video. %d of AVCodecContext is NULL.\n", ctx_id);
- } else if (!avctx->codec) {
- ERR("decode_video. %d of AVCodec is NULL.\n", ctx_id);
- } else if (!picture) {
- ERR("decode_video. %d of AVFrame is NULL.\n", ctx_id);
- } else {
- TRACE("decode_video. bitrate %d resolution(%dx%d)\n",
- avctx->bit_rate, avctx->width, avctx->height);
-
- pctx = CONTEXT(s, ctx_id)->parser_ctx;
-
- len = parse_and_decode_video(avctx, picture, pctx, ctx_id,
- &avpkt, &got_picture, idx, in_offset);
- }
-
- DataContainer *dc = g_malloc0(sizeof(DataContainer));
- dc->picture_buffer_offset = OFFSET_PICTURE_BUFFER;
- dc->len = len;
- dc->got_picture = got_picture;
- dc->avctx = avctx;
-
- brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_decode_data_handler);
-
- TRACE("leave: %s\n", __func__);
+ return codec_decode_video_common(s, ctx_id, data_buf, false);
+}
- return true;
+static bool codec_decode_video2(MaruBrillCodecState *s, int ctx_id, void *data_buf)
+{
+ return codec_decode_video_common(s, ctx_id, data_buf, true);
}
-// for old decode API
-static bool codec_picture_copy (MaruBrillCodecState *s, int ctx_id, void *elem)
+static bool codec_picture_copy(MaruBrillCodecState *s, int ctx_id, void *elem)
{
- AVCodecContext *avctx = NULL;
- AVFrame *frame = NULL;
- bool ret = true;
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
TRACE("enter: %s\n", __func__);
TRACE("copy decoded image of %d context.\n", ctx_id);
- avctx = CONTEXT(s, ctx_id)->avctx;
- frame = CONTEXT(s, ctx_id)->frame;
-
- DataContainer *dc = g_malloc0(sizeof(DataContainer));
-
- dc->frame = frame;
- dc->avctx = avctx;
+ dc->avctx = CONTEXT(s, ctx_id)->avctx;
+ dc->frame = CONTEXT(s, ctx_id)->frame;
brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_decode_data_handler);
TRACE("leave: %s\n", __func__);
- return ret;
+ return true;
}
static bool codec_decode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
av_free(out_buf);
}
-
TRACE("leave: %s\n", __func__);
return true;
}
{
AVCodecContext *avctx = NULL;
AVFrame *pict = NULL;
- AVPacket avpkt;
+ AVPacket *avpkt = g_malloc0(sizeof(AVPacket));
uint8_t *inbuf = NULL, *outbuf = NULL;
- int inbuf_size = 0, outbuf_size = 0;
- int got_frame = 0, ret = 0, size = 0;
- int64_t in_timestamp = 0;
- int coded_frame = 0, key_frame = 0;
+ int outbuf_size = 0;
+ int got_frame = 0, ret = 0;
DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
+ struct video_encode_input empty_input = { 0, };
+ struct video_encode_input *encode_input = &empty_input;
TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
- size += sizeof(inbuf_size);
- memcpy(&in_timestamp, elem->opaque + size, sizeof(in_timestamp));
- size += sizeof(in_timestamp);
- TRACE("encode video. inbuf_size %d\n", inbuf_size);
-
- if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
- }
- } else {
- TRACE("encode video. no input buffer.\n");
- // FIXME: improve error handling
- // return false;
+ if (!elem || !elem->opaque) {
+ TRACE("encode_video. no input buffer\n");
+ }
+ else {
+ encode_input = elem->opaque;
}
// initialize AVPacket
- av_init_packet(&avpkt);
- avpkt.data = NULL;
- avpkt.size = 0;
+ av_init_packet(avpkt);
avctx = CONTEXT(s, ctx_id)->avctx;
pict = CONTEXT(s, ctx_id)->frame;
- if (!avctx || !pict) {
- ERR("%d of context or frame is NULL\n", ctx_id);
- } else if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
- } else {
- TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
+
+ if(!avctx || !avctx->codec) {
+ ERR("critical error !!!\n");
+ assert(0);
+ }
+
+ TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
avctx->pix_fmt, inbuf, pict->data[0]);
- ret = avpicture_fill((AVPicture *)pict, inbuf, avctx->pix_fmt,
- avctx->width, avctx->height);
- if (ret < 0) {
- ERR("after avpicture_fill, ret:%d\n", ret);
+ ret = avpicture_fill((AVPicture *)pict, &encode_input->inbuf, avctx->pix_fmt,
+ avctx->width, avctx->height);
+ if (ret < 0) {
+ ERR("after avpicture_fill, ret:%d\n", ret);
+ } else {
+ if (avctx->time_base.num == 0) {
+ pict->pts = AV_NOPTS_VALUE;
} else {
- if (avctx->time_base.num == 0) {
- pict->pts = AV_NOPTS_VALUE;
- } else {
- AVRational bq =
- {1, (G_USEC_PER_SEC * G_GINT64_CONSTANT(1000))};
- pict->pts = av_rescale_q(in_timestamp, bq, avctx->time_base);
- }
- TRACE("encode video. ticks_per_frame:%d, pts:%lld\n",
+ AVRational bq =
+ {1, (G_USEC_PER_SEC * G_GINT64_CONSTANT(1000))};
+ pict->pts = av_rescale_q(encode_input->in_timestamp, bq, avctx->time_base);
+ }
+ TRACE("encode video. ticks_per_frame:%d, pts:%lld\n",
avctx->ticks_per_frame, pict->pts);
- outbuf_size =
- (avctx->width * avctx->height * 6) + FF_MIN_BUFFER_SIZE;
+ outbuf_size =
+ (avctx->width * avctx->height * 6) + FF_MIN_BUFFER_SIZE;
- outbuf = g_malloc0(outbuf_size);
+ outbuf = g_malloc0(outbuf_size);
- avpkt.data = outbuf;
- avpkt.size = outbuf_size;
+ avpkt->data = outbuf;
+ avpkt->size = outbuf_size;
- if (!outbuf) {
- ERR("failed to allocate a buffer of encoding video.\n");
- } else {
- ret = avcodec_encode_video2(avctx, &avpkt, pict, &got_frame);
+ ret = avcodec_encode_video2(avctx, avpkt, pict, &got_frame);
- TRACE("encode video. ret %d got_picture %d outbuf_size %d\n", ret, got_frame, avpkt.size);
- if (avctx->coded_frame) {
- TRACE("encode video. keyframe %d\n", avctx->coded_frame->key_frame);
- }
- }
+ TRACE("encode video. ret %d got_frame %d outbuf_size %d\n", ret, got_frame, avpkt->size);
+ if (avctx->coded_frame) {
+ TRACE("encode video. keyframe %d\n", avctx->coded_frame->key_frame);
}
}
- tempbuf_size = sizeof(ret);
- if (ret < 0) {
- ERR("failed to encode video. ctx_id %d ret %d\n", ctx_id, ret);
- } else {
- tempbuf_size += avpkt.size + sizeof(coded_frame) + sizeof(key_frame);
- }
-
// write encoded video data
- tempbuf = g_malloc0(tempbuf_size);
- if (!tempbuf) {
- ERR("encode video. failed to allocate encoded out buffer.\n");
- } else {
- memcpy(tempbuf, &avpkt.size, sizeof(avpkt.size));
- size = sizeof(avpkt.size);
-
- if ((got_frame) && outbuf) {
- // inform gstreamer plugin about the status of encoded frames
- // A flag for output buffer in gstreamer is depending on the status.
- if (avctx->coded_frame) {
- coded_frame = 1;
- // if key_frame is 0, this frame cannot be decoded independently.
- key_frame = avctx->coded_frame->key_frame;
- }
- memcpy(tempbuf + size, &coded_frame, sizeof(coded_frame));
- size += sizeof(coded_frame);
- memcpy(tempbuf + size, &key_frame, sizeof(key_frame));
- size += sizeof(key_frame);
- memcpy(tempbuf + size, outbuf, avpkt.size);
- }
- }
-
- if (outbuf) {
- TRACE("release encoded output buffer. %p\n", outbuf);
- g_free(outbuf);
- }
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
+ dc->len = ret;
+ dc->is_got = got_frame;
+ dc->avctx = avctx;
+ dc->avpkt = avpkt;
- brillcodec_push_write_queue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+ brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_encode_data_handler);
TRACE("leave: %s\n", __func__);
return true;