#include "maru_brillcodec.h"
-#include "libavresample/avresample.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/opt.h"
-#include "libavformat/avformat.h"
-
-#include "debug_ch.h"
-
/* define debug channel */
MULTI_DEBUG_CHANNEL(qemu, brillcodec);
+// device
+#define CODEC_DEVICE_NAME "codec-pci"
+#define CODEC_DEVICE_THREAD "codec-workthread"
+#define CODEC_VERSION 2
+
// device memory
#define CODEC_META_DATA_SIZE (256)
+#define CODEC_MEM_SIZE (32 * 1024 * 1024)
+#define CODEC_REG_SIZE (256)
+
// libav
#define GEN_MASK(x) ((1 << (x)) - 1)
#define ROUND_UP_X(v, x) (((v) + GEN_MASK(x)) & ~GEN_MASK(x))
#define DEFAULT_VIDEO_GOP_SIZE 15
-enum codec_api_type {
- CODEC_INIT = 0,
- CODEC_DECODE_VIDEO,
- CODEC_ENCODE_VIDEO,
- CODEC_DECODE_AUDIO,
- CODEC_ENCODE_AUDIO,
- CODEC_PICTURE_COPY,
- CODEC_DEINIT,
- CODEC_FLUSH_BUFFERS,
- };
-
-enum codec_type {
- CODEC_TYPE_UNKNOWN = -1,
- CODEC_TYPE_DECODE,
- CODEC_TYPE_ENCODE,
-};
-
-struct video_data {
- int32_t width;
- int32_t height;
- int32_t fps_n;
- int32_t fps_d;
- int32_t par_n;
- int32_t par_d;
- int32_t pix_fmt;
- int32_t bpp;
- int32_t ticks_per_frame;
-};
-struct audio_data {
- int32_t channels;
- int32_t sample_rate;
- int32_t block_align;
- int32_t depth;
- int32_t sample_fmt;
- int32_t frame_size;
- int32_t bits_per_smp_fmt;
- int32_t reserved;
- int64_t channel_layout;
-};
+// define a queue to manage ioparam, context data
+typedef struct DeviceMemEntry {
+ uint8_t *buf;
+ uint32_t buf_size;
+ uint32_t ctx_id;
-DeviceMemEntry *entry[CODEC_CONTEXT_MAX];
+ QTAILQ_ENTRY(DeviceMemEntry) node;
+} DeviceMemEntry;
-// define a queue to manage ioparam, context data
typedef struct CodecDataStg {
CodecParam *param_buf;
DeviceMemEntry *data_buf;
} CodecDataStg;
// define two queue to store input and output buffers.
-struct codec_wq codec_wq = QTAILQ_HEAD_INITIALIZER(codec_wq);
+static QTAILQ_HEAD(codec_wq, DeviceMemEntry) codec_wq =
+ QTAILQ_HEAD_INITIALIZER(codec_wq);
+
static QTAILQ_HEAD(codec_rq, CodecDataStg) codec_rq =
QTAILQ_HEAD_INITIALIZER(codec_rq);
+static DeviceMemEntry *entry[CODEC_CONTEXT_MAX];
+
+// pixel info
+typedef struct PixFmtInfo {
+ uint8_t x_chroma_shift;
+ uint8_t y_chroma_shift;
+} PixFmtInfo;
+
+static PixFmtInfo pix_fmt_info[PIX_FMT_NB];
+
+// thread
+#define DEFAULT_WORKER_THREAD_CNT 8
+
+static void *maru_brill_codec_threads(void *opaque);
+
+// static void maru_brill_codec_reset_parser_info(MaruBrillCodecState *s, int32_t ctx_index);
+static int maru_brill_codec_query_list(MaruBrillCodecState *s);
+static void maru_brill_codec_release_context(MaruBrillCodecState *s, int32_t value);
+
// codec functions
static bool codec_init(MaruBrillCodecState *, int, void *);
static bool codec_deinit(MaruBrillCodecState *, int, void *);
static AVCodecParserContext *maru_brill_codec_parser_init(AVCodecContext *avctx);
+static void maru_brill_codec_pop_writequeue(MaruBrillCodecState *s, uint32_t ctx_idx);
static void maru_brill_codec_push_readqueue(MaruBrillCodecState *s, CodecParam *ioparam);
-static void maru_brill_codec_push_writequeue(MaruBrillCodecState *s, void* opaque,
- size_t data_size, int ctx_id,
- DataHandler *handler);
+static void maru_brill_codec_push_writequeue(MaruBrillCodecState *s, void* buf,
+ uint32_t buf_size, int ctx_id);
static void *maru_brill_codec_store_inbuf(uint8_t *mem_base, CodecParam *ioparam);
-// default handler
-static void default_get_data(void *dst, void *src, size_t size, enum AVPixelFormat pix_fmt) {
- memcpy(dst, src, size);
-}
+static void maru_brill_codec_reset(DeviceState *s);
+
+static void maru_brill_codec_get_cpu_cores(MaruBrillCodecState *s)
+{
+ s->worker_thread_cnt = get_number_of_processors();
+ if (s->worker_thread_cnt < DEFAULT_WORKER_THREAD_CNT) {
+ s->worker_thread_cnt = DEFAULT_WORKER_THREAD_CNT;
+ }
-static void default_release(void *opaque) {
- g_free(opaque);
+ TRACE("number of threads: %d\n", s->worker_thread_cnt);
}
-static DataHandler default_data_handler = {
- .get_data = default_get_data,
- .release = default_release,
-};
+static void maru_brill_codec_threads_create(MaruBrillCodecState *s)
+{
+ int index;
+ QemuThread *pthread = NULL;
+
+ TRACE("enter: %s\n", __func__);
+ pthread = g_malloc(sizeof(QemuThread) * s->worker_thread_cnt);
+ if (!pthread) {
+ ERR("failed to allocate threadpool memory.\n");
+ return;
+ }
-// default video decode data handler
-static void extract(void *dst, void *src, size_t size, enum AVPixelFormat pix_fmt) {
- AVFrame *frame = (AVFrame *)src;
- avpicture_layout((AVPicture *)src, pix_fmt, frame->width, frame->height, dst, size);
-}
+ qemu_cond_init(&s->threadpool.cond);
+ qemu_mutex_init(&s->threadpool.mutex);
-static void release(void *buf) {}
+ s->is_thread_running = true;
-static DataHandler default_video_decode_data_handler = {
- .get_data = extract,
- .release = release,
-};
+ qemu_mutex_lock(&s->context_mutex);
+ s->idle_thread_cnt = 0;
+ qemu_mutex_unlock(&s->context_mutex);
+
+ for (index = 0; index < s->worker_thread_cnt; index++) {
+ qemu_thread_create(&pthread[index], CODEC_DEVICE_THREAD,
+ maru_brill_codec_threads, (void *)s, QEMU_THREAD_JOINABLE);
+ }
+
+ s->threadpool.threads = pthread;
+
+ TRACE("leave: %s\n", __func__);
+}
static void maru_brill_codec_thread_exit(MaruBrillCodecState *s)
{
TRACE("leave: %s\n", __func__);
}
-void maru_brill_codec_wakeup_threads(MaruBrillCodecState *s, int api_index)
+static void maru_brill_codec_wakeup_threads(MaruBrillCodecState *s, int api_index)
{
CodecParam *ioparam = NULL;
qemu_mutex_lock(&s->context_mutex);
if (ioparam->api_index != CODEC_INIT) {
- if (!CONTEXT(s, ioparam->ctx_index).opened_context) {
+ if (!s->context[ioparam->ctx_index].opened_context) {
INFO("abandon api %d for context %d\n",
ioparam->api_index, ioparam->ctx_index);
qemu_mutex_unlock(&s->context_mutex);
TRACE("after sending conditional signal\n");
}
-void *maru_brill_codec_threads(void *opaque)
+static void *maru_brill_codec_threads(void *opaque)
{
MaruBrillCodecState *s = (MaruBrillCodecState *)opaque;
bool ret = false;
TRACE("api_id: %d ctx_id: %d\n", api_id, ctx_id);
qemu_mutex_lock(&s->context_mutex);
- CONTEXT(s, ctx_id).occupied_thread = true;
+ s->context[ctx_id].occupied_thread = true;
qemu_mutex_unlock(&s->context_mutex);
ret = codec_func_handler[api_id](s, ctx_id, indata_buf);
elem->param_buf = NULL;
if (elem->data_buf) {
- if (elem->data_buf->opaque) {
+ if (elem->data_buf->buf) {
TRACE("release inbuf\n");
- g_free(elem->data_buf->opaque);
- elem->data_buf->opaque = NULL;
+ g_free(elem->data_buf->buf);
+ elem->data_buf->buf = NULL;
}
TRACE("release a buffer indata_buf\n");
g_free(elem);
qemu_mutex_lock(&s->context_mutex);
- if (CONTEXT(s, ctx_id).requested_close) {
+ if (s->context[ctx_id].requested_close) {
INFO("make worker thread to handle deinit\n");
// codec_deinit(s, ctx_id, NULL);
maru_brill_codec_release_context(s, ctx_id);
- CONTEXT(s, ctx_id).requested_close = false;
+ s->context[ctx_id].requested_close = false;
}
qemu_mutex_unlock(&s->context_mutex);
qemu_bh_schedule(s->codec_bh);
qemu_mutex_lock(&s->context_mutex);
- CONTEXT(s, ctx_id).occupied_thread = false;
+ s->context[ctx_id].occupied_thread = false;
qemu_mutex_unlock(&s->context_mutex);
}
}
// memset(device_mem, 0x00, sizeof(readbuf_size));
- elem->opaque = readbuf;
- elem->data_size = readbuf_size;
+ elem->buf = readbuf;
+ elem->buf_size = readbuf_size;
elem->ctx_id = ioparam->ctx_index;
return elem;
}
-static void maru_brill_codec_push_writequeue(MaruBrillCodecState *s, void* opaque,
- size_t data_size, int ctx_id,
- DataHandler *handler)
+static void maru_brill_codec_push_writequeue(MaruBrillCodecState *s, void* buf,
+ uint32_t buf_size, int ctx_id)
{
DeviceMemEntry *elem = NULL;
elem = g_malloc0(sizeof(DeviceMemEntry));
- elem->opaque = opaque;
- elem->data_size = data_size;
+ elem->buf = buf;
+ elem->buf_size = buf_size;
elem->ctx_id = ctx_id;
- if (handler) {
- elem->handler = handler;
- } else {
- elem->handler = &default_data_handler;
- }
-
qemu_mutex_lock(&s->context_queue_mutex);
QTAILQ_INSERT_TAIL(&codec_wq, elem, node);
qemu_mutex_unlock(&s->context_queue_mutex);
}
-void maru_brill_codec_pop_writequeue(MaruBrillCodecState *s, uint32_t ctx_idx)
+static void maru_brill_codec_pop_writequeue(MaruBrillCodecState *s, uint32_t ctx_idx)
{
DeviceMemEntry *elem = NULL;
uint32_t mem_offset = 0;
// check corrupted mem_offset
if (mem_offset < CODEC_MEM_SIZE) {
- elem->handler->get_data(s->vaddr + mem_offset, elem->opaque, elem->data_size, s->context[ctx_idx].avctx->pix_fmt);
- elem->handler->release(elem->opaque);
+ if (elem->buf) {
+ TRACE("write data %d to guest. mem_offset: 0x%x\n",
+ elem->buf_size, mem_offset);
+ memcpy(s->vaddr + mem_offset, elem->buf, elem->buf_size);
+
+ TRACE("release output buffer: %p\n", elem->buf);
+ g_free(elem->buf);
+ }
} else {
TRACE("mem_offset is corrupted!!\n");
}
avctx->channels, avctx->sample_rate, avctx->sample_fmt, avctx->channel_layout);
}
-void maru_brill_codec_release_context(MaruBrillCodecState *s, int32_t ctx_id)
+#if 0
+static void maru_brill_codec_reset_parser_info(MaruBrillCodecState *s, int32_t ctx_index)
+{
+ s->context[ctx_index].parser_buf = NULL;
+ s->context[ctx_index].parser_use = false;
+}
+#endif
+
+static void maru_brill_codec_release_context(MaruBrillCodecState *s, int32_t context_id)
{
DeviceMemEntry *wq_elem = NULL, *wnext = NULL;
CodecDataStg *rq_elem = NULL, *rnext = NULL;
TRACE("enter: %s\n", __func__);
- TRACE("release %d of context\n", ctx_id);
+ TRACE("release %d of context\n", context_id);
qemu_mutex_lock(&s->threadpool.mutex);
- if (CONTEXT(s, ctx_id).opened_context) {
+ if (s->context[context_id].opened_context) {
// qemu_mutex_unlock(&s->threadpool.mutex);
- codec_deinit(s, ctx_id, NULL);
+ codec_deinit(s, context_id, NULL);
// qemu_mutex_lock(&s->threadpool.mutex);
}
- CONTEXT(s, ctx_id).occupied_context = false;
+ s->context[context_id].occupied_context = false;
qemu_mutex_unlock(&s->threadpool.mutex);
// TODO: check if foreach statment needs lock or not.
QTAILQ_FOREACH_SAFE(rq_elem, &codec_rq, node, rnext) {
if (rq_elem && rq_elem->data_buf &&
- (rq_elem->data_buf->ctx_id == ctx_id)) {
+ (rq_elem->data_buf->ctx_id == context_id)) {
- TRACE("remove unused node from codec_rq. ctx_id: %d\n", ctx_id);
+ TRACE("remove unused node from codec_rq. ctx_id: %d\n", context_id);
qemu_mutex_lock(&s->context_queue_mutex);
QTAILQ_REMOVE(&codec_rq, rq_elem, node);
qemu_mutex_unlock(&s->context_queue_mutex);
TRACE("release rq_elem: %p\n", rq_elem);
g_free(rq_elem);
} else {
- TRACE("no elem of %d context in the codec_rq.\n", ctx_id);
+ TRACE("no elem of %d context in the codec_rq.\n", context_id);
}
}
QTAILQ_FOREACH_SAFE(wq_elem, &codec_wq, node, wnext) {
- if (wq_elem && wq_elem->ctx_id == ctx_id) {
- TRACE("remove unused node from codec_wq. ctx_id: %d\n", ctx_id);
+ if (wq_elem && wq_elem->ctx_id == context_id) {
+ TRACE("remove unused node from codec_wq. ctx_id: %d\n", context_id);
qemu_mutex_lock(&s->context_queue_mutex);
QTAILQ_REMOVE(&codec_wq, wq_elem, node);
qemu_mutex_unlock(&s->context_queue_mutex);
- if (wq_elem && wq_elem->opaque) {
- TRACE("release wq_buffer: %p\n", wq_elem->opaque);
- g_free(wq_elem->opaque);
- wq_elem->opaque = NULL;
+ if (wq_elem && wq_elem->buf) {
+ TRACE("release wq_buffer: %p\n", wq_elem->buf);
+ g_free(wq_elem->buf);
+ wq_elem->buf = NULL;
}
TRACE("release wq_elem: %p\n", wq_elem);
g_free(wq_elem);
} else {
- TRACE("no elem of %d context in the codec_wq.\n", ctx_id);
+ TRACE("no elem of %d context in the codec_wq.\n", context_id);
}
}
TRACE("leave: %s\n", __func__);
}
-int maru_brill_codec_query_list (MaruBrillCodecState *s)
+
+// initialize each pixel format.
+static void maru_brill_codec_pixfmt_info_init(void)
+{
+ /* YUV formats */
+ pix_fmt_info[PIX_FMT_YUV420P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUV420P].y_chroma_shift = 1;
+
+ pix_fmt_info[PIX_FMT_YUV422P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUV422P].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUV444P].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_YUV444P].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUYV422].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUYV422].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUV410P].x_chroma_shift = 2;
+ pix_fmt_info[PIX_FMT_YUV410P].y_chroma_shift = 2;
+
+ pix_fmt_info[PIX_FMT_YUV411P].x_chroma_shift = 2;
+ pix_fmt_info[PIX_FMT_YUV411P].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUVJ420P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUVJ420P].y_chroma_shift = 1;
+
+ pix_fmt_info[PIX_FMT_YUVJ422P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUVJ422P].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUVJ444P].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_YUVJ444P].y_chroma_shift = 0;
+
+ /* RGB formats */
+ pix_fmt_info[PIX_FMT_RGB24].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB24].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_BGR24].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_BGR24].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB32].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB32].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB565].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB565].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_RGB555].x_chroma_shift = 0;
+ pix_fmt_info[PIX_FMT_RGB555].y_chroma_shift = 0;
+
+ pix_fmt_info[PIX_FMT_YUVA420P].x_chroma_shift = 1;
+ pix_fmt_info[PIX_FMT_YUVA420P].y_chroma_shift = 1;
+}
+
+static int maru_brill_codec_get_picture_size(AVPicture *picture, uint8_t *ptr,
+ int pix_fmt, int width,
+ int height, bool encode)
+{
+ int size, w2, h2, size2;
+ int stride, stride2;
+ int fsize;
+ PixFmtInfo *pinfo;
+
+ pinfo = &pix_fmt_info[pix_fmt];
+
+ switch (pix_fmt) {
+ case PIX_FMT_YUV420P:
+ case PIX_FMT_YUV422P:
+ case PIX_FMT_YUV444P:
+ case PIX_FMT_YUV410P:
+ case PIX_FMT_YUV411P:
+ case PIX_FMT_YUVJ420P:
+ case PIX_FMT_YUVJ422P:
+ case PIX_FMT_YUVJ444P:
+ stride = ROUND_UP_4(width);
+ h2 = ROUND_UP_X(height, pinfo->y_chroma_shift);
+ size = stride * h2;
+ w2 = DIV_ROUND_UP_X(width, pinfo->x_chroma_shift);
+ stride2 = ROUND_UP_4(w2);
+ h2 = DIV_ROUND_UP_X(height, pinfo->y_chroma_shift);
+ size2 = stride2 * h2;
+ fsize = size + 2 * size2;
+ TRACE("stride: %d, stride2: %d, size: %d, size2: %d, fsize: %d\n",
+ stride, stride2, size, size2, fsize);
+
+ if (!encode && !ptr) {
+ TRACE("allocate a buffer for a decoded picture.\n");
+ ptr = g_malloc(fsize);
+ if (!ptr) {
+ ERR("[%d] failed to allocate memory.\n", __LINE__);
+ return -1;
+ }
+ }
+ picture->data[0] = ptr;
+ picture->data[1] = picture->data[0] + size;
+ picture->data[2] = picture->data[1] + size2;
+ picture->data[3] = NULL;
+ picture->linesize[0] = stride;
+ picture->linesize[1] = stride2;
+ picture->linesize[2] = stride2;
+ picture->linesize[3] = 0;
+ TRACE("planes %d %d %d\n", 0, size, size + size2);
+ TRACE("strides %d %d %d\n", 0, stride, stride2, stride2);
+ break;
+ case PIX_FMT_YUVA420P:
+ stride = ROUND_UP_4(width);
+ h2 = ROUND_UP_X(height, pinfo->y_chroma_shift);
+ size = stride * h2;
+ w2 = DIV_ROUND_UP_X(width, pinfo->x_chroma_shift);
+ stride2 = ROUND_UP_4(w2);
+ h2 = DIV_ROUND_UP_X(height, pinfo->y_chroma_shift);
+ size2 = stride2 * h2;
+ fsize = 2 * size + 2 * size2;
+ if (!encode && !ptr) {
+ TRACE("allocate a buffer for a decoded picture.\n");
+ ptr = av_mallocz(fsize);
+ if (!ptr) {
+ ERR("[%d] failed to allocate memory.\n", __LINE__);
+ return -1;
+ }
+ }
+ picture->data[0] = ptr;
+ picture->data[1] = picture->data[0] + size;
+ picture->data[2] = picture->data[1] + size2;
+ picture->data[3] = picture->data[2] + size2;
+ picture->linesize[0] = stride;
+ picture->linesize[1] = stride2;
+ picture->linesize[2] = stride2;
+ picture->linesize[3] = stride;
+ TRACE("planes %d %d %d\n", 0, size, size + size2);
+ TRACE("strides %d %d %d\n", 0, stride, stride2, stride2);
+ break;
+ case PIX_FMT_RGB24:
+ case PIX_FMT_BGR24:
+ stride = ROUND_UP_4 (width * 3);
+ fsize = stride * height;
+ TRACE("stride: %d, size: %d\n", stride, fsize);
+
+ if (!encode && !ptr) {
+ TRACE("allocate a buffer for a decoded picture.\n");
+ ptr = av_mallocz(fsize);
+ if (!ptr) {
+ ERR("[%d] failed to allocate memory.\n", __LINE__);
+ return -1;
+ }
+ }
+ picture->data[0] = ptr;
+ picture->data[1] = NULL;
+ picture->data[2] = NULL;
+ picture->data[3] = NULL;
+ picture->linesize[0] = stride;
+ picture->linesize[1] = 0;
+ picture->linesize[2] = 0;
+ picture->linesize[3] = 0;
+ break;
+ case PIX_FMT_RGB32:
+ stride = width * 4;
+ fsize = stride * height;
+ TRACE("stride: %d, size: %d\n", stride, fsize);
+
+ if (!encode && !ptr) {
+ TRACE("allocate a buffer for a decoded picture.\n");
+ ptr = av_mallocz(fsize);
+ if (!ptr) {
+ ERR("[%d] failed to allocate memory.\n", __LINE__);
+ return -1;
+ }
+ }
+ picture->data[0] = ptr;
+ picture->data[1] = NULL;
+ picture->data[2] = NULL;
+ picture->data[3] = NULL;
+ picture->linesize[0] = stride;
+ picture->linesize[1] = 0;
+ picture->linesize[2] = 0;
+ picture->linesize[3] = 0;
+ break;
+ case PIX_FMT_RGB555:
+ case PIX_FMT_RGB565:
+ stride = ROUND_UP_4 (width * 2);
+ fsize = stride * height;
+ TRACE("stride: %d, size: %d\n", stride, fsize);
+
+ if (!encode && !ptr) {
+ TRACE("allocate a buffer for a decoded picture.\n");
+ ptr = av_mallocz(fsize);
+ if (!ptr) {
+ ERR("[%d] failed to allocate memory.\n", __LINE__);
+ return -1;
+ }
+ }
+ picture->data[0] = ptr;
+ picture->data[1] = NULL;
+ picture->data[2] = NULL;
+ picture->data[3] = NULL;
+ picture->linesize[0] = stride;
+ picture->linesize[1] = 0;
+ picture->linesize[2] = 0;
+ picture->linesize[3] = 0;
+ break;
+ case PIX_FMT_PAL8:
+ stride = ROUND_UP_4(width);
+ size = stride * height;
+ fsize = size + 256 * 4;
+ TRACE("stride: %d, size: %d\n", stride, fsize);
+
+ if (!encode && !ptr) {
+ TRACE("allocate a buffer for a decoded picture.\n");
+ ptr = av_mallocz(fsize);
+ if (!ptr) {
+ ERR("[%d] failed to allocate memory.\n", __LINE__);
+ return -1;
+ }
+ }
+ picture->data[0] = ptr;
+ picture->data[1] = ptr + size;
+ picture->data[2] = NULL;
+ picture->data[3] = NULL;
+ picture->linesize[0] = stride;
+ picture->linesize[1] = 4;
+ picture->linesize[2] = 0;
+ picture->linesize[3] = 0;
+ break;
+ default:
+ picture->data[0] = NULL;
+ picture->data[1] = NULL;
+ picture->data[2] = NULL;
+ picture->data[3] = NULL;
+ fsize = -1;
+ ERR("pixel format: %d was wrong.\n", pix_fmt);
+ break;
+ }
+
+ return fsize;
+}
+
+static int maru_brill_codec_query_list (MaruBrillCodecState *s)
{
AVCodec *codec = NULL;
uint32_t size = 0, mem_size = 0;
memset(codec_fmts, -1, sizeof(codec_fmts));
if (media_type == AVMEDIA_TYPE_VIDEO) {
if (codec->pix_fmts) {
- for (i = 0; codec->pix_fmts[i] != -1 && i < 4; i++) {
+ for (i = 0; codec->pix_fmts[i] != -1; i++) {
codec_fmts[i] = codec->pix_fmts[i];
}
}
memcpy(s->vaddr + size, codec_fmts, sizeof(codec_fmts));
size += sizeof(codec_fmts);
- TRACE("register %s %s\n", codec->name, codec->decode ? "decoder" : "encoder");
codec = av_codec_next(codec);
}
return 0;
}
-int maru_brill_codec_get_context_index(MaruBrillCodecState *s)
+static int maru_brill_codec_get_context_index(MaruBrillCodecState *s)
{
- int ctx_id;
+ int index;
TRACE("enter: %s\n", __func__);
// requires mutex_lock? its function is protected by critical section.
qemu_mutex_lock(&s->threadpool.mutex);
- for (ctx_id = 1; ctx_id < CODEC_CONTEXT_MAX; ctx_id++) {
- if (CONTEXT(s, ctx_id).occupied_context == false) {
- TRACE("get %d of codec context successfully.\n", ctx_id);
- CONTEXT(s, ctx_id).occupied_context = true;
+ for (index = 1; index < CODEC_CONTEXT_MAX; index++) {
+ if (s->context[index].occupied_context == false) {
+ TRACE("get %d of codec context successfully.\n", index);
+ s->context[index].occupied_context = true;
break;
}
}
qemu_mutex_unlock(&s->threadpool.mutex);
- if (ctx_id == CODEC_CONTEXT_MAX) {
+ if (index == CODEC_CONTEXT_MAX) {
ERR("failed to get available codec context. ");
ERR("try to run codec again.\n");
- ctx_id = -1;
+ index = -1;
}
TRACE("leave: %s\n", __func__);
- return ctx_id;
+ return index;
}
-
// allocate avcontext and avframe struct.
-static AVCodecContext *maru_brill_codec_alloc_context(MaruBrillCodecState *s, int ctx_id)
+static AVCodecContext *maru_brill_codec_alloc_context(MaruBrillCodecState *s, int index)
{
TRACE("enter: %s\n", __func__);
- TRACE("allocate %d of context and frame.\n", ctx_id);
- CONTEXT(s, ctx_id).avctx = avcodec_alloc_context3(NULL);
- CONTEXT(s, ctx_id).frame = avcodec_alloc_frame();
- CONTEXT(s, ctx_id).opened_context = false;
+ TRACE("allocate %d of context and frame.\n", index);
+ s->context[index].avctx = avcodec_alloc_context3(NULL);
+ s->context[index].frame = avcodec_alloc_frame();
+ s->context[index].opened_context = false;
+#if 0
+ s->context[index].parser_buf = NULL;
+ s->context[index].parser_use = false;
+#endif
TRACE("leave: %s\n", __func__);
- return CONTEXT(s, ctx_id).avctx;
+ return s->context[index].avctx;
}
static AVCodec *maru_brill_codec_find_avcodec(uint8_t *mem_buf)
return size;
}
-static int convert_audio_sample_fmt(const AVCodec *codec, int codec_type, bool encode)
-{
- int audio_sample_fmt = AV_SAMPLE_FMT_NONE;
-
- if (!codec) {
- return audio_sample_fmt;
- }
-
- if (codec_type != AVMEDIA_TYPE_AUDIO) {
- ERR("this codec_type is invalid %d\n", codec_type);
- return audio_sample_fmt;
- }
-
- if (!strcmp(codec->name, "aac")) {
- // FLOAT format
- if (encode) {
- audio_sample_fmt = AV_SAMPLE_FMT_FLTP;
- } else {
- audio_sample_fmt = AV_SAMPLE_FMT_FLT;
- }
- } else if (!strcmp(codec->name, "mp3") || !strcmp(codec->name, "mp3adu")) {
- // S16 format
- if (encode) {
- audio_sample_fmt = AV_SAMPLE_FMT_S16P;
- } else {
- audio_sample_fmt = AV_SAMPLE_FMT_S16;
- }
- } else if (!strcmp(codec->name, "wmav1") || !strcmp(codec->name, "wmav2")) {
- if (encode) {
- audio_sample_fmt = AV_SAMPLE_FMT_FLTP;
- } else {
- audio_sample_fmt = AV_SAMPLE_FMT_FLT;
- }
- } else {
- INFO("cannot handle %s codec\n", codec->name);
- }
-
- TRACE("convert audio sample_fmt %d\n", audio_sample_fmt);
- return audio_sample_fmt;
-}
-
-static int fill_audio_into_frame(AVCodecContext *avctx, AVFrame *frame,
- uint8_t *samples, int samples_size,
- int nb_samples, int audio_sample_fmt)
-{
- int result = 0;
-
- if (!avctx) {
- ERR("fill_audio. AVCodecContext is NULL!!\n");
- return -1;
- }
-
- if (!frame) {
- ERR("fill_audio. AVFrame is NULL!!\n");
- return -1;
- }
-
- frame->nb_samples = nb_samples;
- frame->format = audio_sample_fmt;
- frame->channel_layout = avctx->channel_layout;
-
- result =
- avcodec_fill_audio_frame(frame, avctx->channels, audio_sample_fmt, (const uint8_t *)samples, samples_size, 0);
- TRACE("fill audio in_frame. ret: %d in_frame->ch_layout %lld\n", result, frame->channel_layout);
-
- return result;
-}
-
-static AVFrame *resample_audio(AVCodecContext *avctx, AVFrame *sample_frame,
- int sample_buffer_size, int in_sample_fmt,
- AVFrame *resample_frame, int *resample_buffer_size,
- int resample_sample_fmt)
+static uint8_t *resample_audio_buffer(AVCodecContext * avctx, AVFrame *samples,
+ int *out_size, int out_sample_fmt)
{
AVAudioResampleContext *avr = NULL;
- uint8_t *resample_buffer = NULL;
- int buffer_size = 0;
- int resample_nb_samples = sample_frame->nb_samples;
+ uint8_t *resampled_audio = NULL;
+ int buffer_size = 0, out_linesize = 0;
+ int nb_samples = samples->nb_samples;
+ // int out_sample_fmt = avctx->sample_fmt - 5;
avr = avresample_alloc_context();
if (!avr) {
return NULL;
}
- TRACE("channel_layout %lld sample_rate %d in_sample_fmt %d resample_sample_fmt %d\n",
- avctx->channel_layout, avctx->sample_rate, avctx->sample_fmt, resample_sample_fmt);
+ TRACE("resample audio. channel_layout %lld sample_rate %d "
+ "in_sample_fmt %d out_sample_fmt %d\n",
+ avctx->channel_layout, avctx->sample_rate,
+ avctx->sample_fmt, out_sample_fmt);
av_opt_set_int(avr, "in_channel_layout", avctx->channel_layout, 0);
- av_opt_set_int(avr, "in_sample_fmt", in_sample_fmt, 0);
+ av_opt_set_int(avr, "in_sample_fmt", avctx->sample_fmt, 0);
av_opt_set_int(avr, "in_sample_rate", avctx->sample_rate, 0);
-
av_opt_set_int(avr, "out_channel_layout", avctx->channel_layout, 0);
- av_opt_set_int(avr, "out_sample_fmt", resample_sample_fmt, 0);
+ av_opt_set_int(avr, "out_sample_fmt", out_sample_fmt, 0);
av_opt_set_int(avr, "out_sample_rate", avctx->sample_rate, 0);
TRACE("open avresample context\n");
return NULL;
}
- resample_frame = avcodec_alloc_frame();
- TRACE("resample audio. nb_samples %d sample_fmt %d\n", resample_nb_samples, resample_sample_fmt);
+ *out_size =
+ av_samples_get_buffer_size(&out_linesize, avctx->channels,
+ nb_samples, out_sample_fmt, 0);
+
+ TRACE("resample audio. out_linesize %d nb_samples %d\n", out_linesize, nb_samples);
- *resample_buffer_size = av_samples_get_buffer_size(NULL, avctx->channels, resample_nb_samples, resample_sample_fmt, 0);
- if (*resample_buffer_size < 0) {
- ERR("failed to get size of resample buffer %d\n", *resample_buffer_size);
+ if (*out_size < 0) {
+ ERR("failed to get size of sample buffer %d\n", *out_size);
avresample_close(avr);
avresample_free(&avr);
return NULL;
}
- resample_buffer = av_mallocz(*resample_buffer_size);
- if (!resample_buffer) {
+ resampled_audio = av_mallocz(*out_size);
+ if (!resampled_audio) {
ERR("failed to allocate resample buffer\n");
avresample_close(avr);
avresample_free(&avr);
return NULL;
}
- fill_audio_into_frame(avctx, resample_frame, resample_buffer,
- *resample_buffer_size, resample_nb_samples, resample_sample_fmt);
-
- buffer_size = avresample_convert(avr, resample_frame->data,
- *resample_buffer_size, resample_nb_samples,
- sample_frame->data, sample_buffer_size,
- sample_frame->nb_samples);
-
- TRACE("resample_audio buffer_size %d\n", buffer_size);
+ buffer_size = avresample_convert(avr, &resampled_audio,
+ out_linesize, nb_samples,
+ samples->data, samples->linesize[0],
+ samples->nb_samples);
+ TRACE("resample_audio out_size %d buffer_size %d\n", *out_size, buffer_size);
avresample_close(avr);
avresample_free(&avr);
- return resample_frame;
-}
-
-static int parse_and_decode_video(AVCodecContext *avctx, AVFrame *picture,
- AVCodecParserContext *pctx, int ctx_id,
- AVPacket *packet, int *got_picture,
- int idx, int64_t in_offset)
-{
- uint8_t *parser_outbuf = NULL;
- int parser_outbuf_size = 0;
- uint8_t *parser_buf = packet->data;
- int parser_buf_size = packet->size;
- int ret = 0, len = -1;
- int64_t pts = 0, dts = 0, pos = 0;
-
- pts = dts = idx;
- pos = in_offset;
-
- do {
- if (pctx) {
- ret = av_parser_parse2(pctx, avctx, &parser_outbuf,
- &parser_outbuf_size, parser_buf, parser_buf_size,
- pts, dts, pos);
-
- if (ret) {
- parser_buf_size -= ret;
- parser_buf += ret;
- }
-
- TRACE("after parsing ret: %d parser_outbuf_size %d parser_buf_size %d pts %lld\n",
- ret, parser_outbuf_size, parser_buf_size, pctx->pts);
-
- /* if there is no output, we must break and wait for more data.
- * also the timestamp in the context is not updated.
- */
- if (parser_outbuf_size == 0) {
- if (parser_buf_size > 0) {
- TRACE("parsing data have been left\n");
- continue;
- } else {
- TRACE("finish parsing data\n");
- break;
- }
- }
-
- packet->data = parser_outbuf;
- packet->size = parser_outbuf_size;
- } else {
- TRACE("not using parser %s\n", avctx->codec->name);
- }
-
- len = avcodec_decode_video2(avctx, picture, got_picture, packet);
- TRACE("decode_video. len %d, got_picture %d\n", len, *got_picture);
-
- if (!pctx) {
- if (len == 0 && (*got_picture) == 0) {
- ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
- break;
- } else if (len < 0) {
- ERR("decoding video error! ctx_id %d len %d\n", ctx_id, len);
- break;
- }
- parser_buf_size -= len;
- parser_buf += len;
- } else {
- if (len == 0) {
- ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
- *got_picture = 0;
- break;
- } else if (len < 0) {
- ERR("decoding video error! trying next ctx_id %d len %d\n", ctx_id, len);
- break;
- }
- }
- } while (parser_buf_size > 0);
-
- return len;
+ return resampled_audio;
}
// codec functions
ERR("[%d] failed to allocate context.\n", __LINE__);
ret = -1;
} else {
- codec = maru_brill_codec_find_avcodec(elem->opaque);
- if (codec) {
- size = sizeof(int32_t) + 32; // buffer size of codec_name
- read_codec_init_data(avctx, elem->opaque + size);
-
- // in case of aac encoder, sample format is float
- if (!strcmp(codec->name, "aac") && codec->encode2) {
- TRACE("convert sample format into SAMPLE_FMT_FLTP\n");
- avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
+#if 0
+ avcodec_get_context_defaults3(avctx, NULL);
- avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ avctx->rc_strategy = 2;
+ avctx->b_frame_strategy = 0;
+ avctx->coder_type = 0;
+ avctx->context_model = 0;
+ avctx->scenechange_threshold = 0;
- INFO("aac encoder!! channels %d channel_layout %lld\n",
- avctx->channels, avctx->channel_layout);
- avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
- }
+ avctx->gop_size = DEFAULT_VIDEO_GOP_SIZE;
+ avctx->lmin = (2 * FF_QP2LAMBDA + 0.5);
+ avctx->lmax = (31 * FF_QP2LAMBDA + 0.5);
+#endif
- TRACE("audio sample format %d\n", avctx->sample_fmt);
- TRACE("strict_std_compliance %d\n", avctx->strict_std_compliance);
+ codec = maru_brill_codec_find_avcodec(elem->buf);
+ if (codec) {
+ size = sizeof(int32_t) + 32; // buffer size of codec_name
+ read_codec_init_data(avctx, elem->buf + size);
// in case of aac encoder, sample format is float
if (!strcmp(codec->name, "aac") && codec->encode2) {
ret = avcodec_open2(avctx, codec, NULL);
INFO("avcodec_open. ret 0x%x ctx_id %d\n", ret, ctx_id);
- INFO("channels %d sample_rate %d sample_fmt %d "
+ TRACE("channels %d sample_rate %d sample_fmt %d "
"channel_layout %lld frame_size %d\n",
avctx->channels, avctx->sample_rate, avctx->sample_fmt,
avctx->channel_layout, avctx->frame_size);
+ sizeof(avctx->extradata_size) + avctx->extradata_size)
+ sizeof(int);
- CONTEXT(s, ctx_id).opened_context = true;
- CONTEXT(s, ctx_id).parser_ctx =
+ s->context[ctx_id].opened_context = true;
+ s->context[ctx_id].parser_ctx =
maru_brill_codec_parser_init(avctx);
} else {
ERR("failed to find codec. ctx_id: %d\n", ctx_id);
}
}
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+ maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id);
TRACE("leave: %s\n", __func__);
TRACE("enter: %s\n", __func__);
- avctx = CONTEXT(s, ctx_id).avctx;
- frame = CONTEXT(s, ctx_id).frame;
- parserctx = CONTEXT(s, ctx_id).parser_ctx;
+ avctx = s->context[ctx_id].avctx;
+ frame = s->context[ctx_id].frame;
+ parserctx = s->context[ctx_id].parser_ctx;
if (!avctx || !frame) {
TRACE("%d of AVCodecContext or AVFrame is NULL. "
" Those resources have been released before.\n", ctx_id);
INFO("close avcontext of %d\n", ctx_id);
// qemu_mutex_lock(&s->threadpool.mutex);
avcodec_close(avctx);
- CONTEXT(s, ctx_id).opened_context = false;
+ s->context[ctx_id].opened_context = false;
// qemu_mutex_unlock(&s->threadpool.mutex);
if (avctx->extradata) {
TRACE("free context extradata\n");
av_free(avctx->extradata);
- CONTEXT(s, ctx_id).avctx->extradata = NULL;
+ s->context[ctx_id].avctx->extradata = NULL;
}
if (frame) {
TRACE("free frame\n");
+ // av_free(frame);
avcodec_free_frame(&frame);
- CONTEXT(s, ctx_id).frame = NULL;
+ s->context[ctx_id].frame = NULL;
}
if (avctx) {
TRACE("free codec context\n");
av_free(avctx);
- CONTEXT(s, ctx_id).avctx = NULL;
+ s->context[ctx_id].avctx = NULL;
}
if (parserctx) {
- INFO("close parser context\n");
av_parser_close(parserctx);
- CONTEXT(s, ctx_id).parser_ctx = NULL;
+ s->context[ctx_id].parser_ctx = NULL;
}
- maru_brill_codec_push_writequeue(s, NULL, 0, ctx_id, NULL);
+ maru_brill_codec_push_writequeue(s, NULL, 0, ctx_id);
TRACE("leave: %s\n", __func__);
TRACE("enter: %s\n", __func__);
- avctx = CONTEXT(s, ctx_id).avctx;
+ avctx = s->context[ctx_id].avctx;
if (!avctx) {
ERR("%d of AVCodecContext is NULL.\n", ctx_id);
ret = false;
ret = false;
} else {
TRACE("flush %d context of buffers.\n", ctx_id);
- AVCodecParserContext *pctx = NULL;
- uint8_t *poutbuf = NULL;
- int poutbuf_size = 0;
- int res = 0;
-
- uint8_t p_inbuf[FF_INPUT_BUFFER_PADDING_SIZE];
- int p_inbuf_size = FF_INPUT_BUFFER_PADDING_SIZE;
-
- memset(&p_inbuf, 0x00, p_inbuf_size);
-
- pctx = CONTEXT(s, ctx_id).parser_ctx;
- if (pctx) {
- res = av_parser_parse2(pctx, avctx, &poutbuf, &poutbuf_size,
- p_inbuf, p_inbuf_size, -1, -1, -1);
- INFO("before flush buffers, using parser. res: %d\n", res);
- }
-
avcodec_flush_buffers(avctx);
}
- maru_brill_codec_push_writequeue(s, NULL, 0, ctx_id, NULL);
+ maru_brill_codec_push_writequeue(s, NULL, 0, ctx_id);
TRACE("leave: %s\n", __func__);
{
AVCodecContext *avctx = NULL;
AVFrame *picture = NULL;
- AVCodecParserContext *pctx = NULL;
AVPacket avpkt;
-
int got_picture = 0, len = -1;
- int idx = 0, size = 0;
- int64_t in_offset = 0;
uint8_t *inbuf = NULL;
- int inbuf_size = 0;
+ int inbuf_size = 0, idx, size = 0;
+ int64_t in_offset;
DeviceMemEntry *elem = NULL;
uint8_t *tempbuf = NULL;
int tempbuf_size = 0;
TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
+ if (elem && elem->buf) {
+ memcpy(&inbuf_size, elem->buf, sizeof(inbuf_size));
size += sizeof(inbuf_size);
- memcpy(&idx, elem->opaque + size, sizeof(idx));
+ memcpy(&idx, elem->buf + size, sizeof(idx));
size += sizeof(idx);
- memcpy(&in_offset, elem->opaque + size, sizeof(in_offset));
+ memcpy(&in_offset, elem->buf + size, sizeof(in_offset));
size += sizeof(in_offset);
TRACE("decode_video. inbuf_size %d\n", inbuf_size);
if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
+ inbuf = elem->buf + size;
}
} else {
TRACE("decode_video. no input buffer\n");
avpkt.data = inbuf;
avpkt.size = inbuf_size;
-
- avctx = CONTEXT(s, ctx_id).avctx;
- picture = CONTEXT(s, ctx_id).frame;
+ avctx = s->context[ctx_id].avctx;
+ picture = s->context[ctx_id].frame;
if (!avctx) {
ERR("decode_video. %d of AVCodecContext is NULL.\n", ctx_id);
} else if (!avctx->codec) {
} else if (!picture) {
ERR("decode_video. %d of AVFrame is NULL.\n", ctx_id);
} else {
- pctx = CONTEXT(s, ctx_id).parser_ctx;
+ // in case of skipping frames
+ // picture->pict_type = -1;
+
+ TRACE("decode_video. bitrate %d\n", avctx->bit_rate);
+ // avctx->reordered_opaque = idx;
+ // picture->reordered_opaque = idx;
- len = parse_and_decode_video(avctx, picture, pctx, ctx_id,
- &avpkt, &got_picture, idx, in_offset);
+ len =
+ avcodec_decode_video2(avctx, picture, &got_picture, &avpkt);
+ TRACE("decode_video. in_size %d len %d, frame_size %d\n", avpkt.size, len, got_picture);
}
- tempbuf_size = sizeof(len) + sizeof(got_picture) + sizeof(struct video_data);
+ tempbuf_size =
+ sizeof(len) + sizeof(got_picture) + sizeof(struct video_data);
+
+ if (len < 0) {
+ ERR("failed to decode video. ctx_id: %d, len: %d\n", ctx_id, len);
+ got_picture = 0;
+ }
tempbuf = g_malloc(tempbuf_size);
if (!tempbuf) {
}
}
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+ maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id);
TRACE("leave: %s\n", __func__);
{
AVCodecContext *avctx = NULL;
AVPicture *src = NULL;
+ AVPicture dst;
+ uint8_t *tempbuf = NULL;
int pict_size = 0;
bool ret = true;
TRACE("copy decoded image of %d context.\n", ctx_id);
- avctx = CONTEXT(s, ctx_id).avctx;
- src = (AVPicture *)CONTEXT(s, ctx_id).frame;
+ avctx = s->context[ctx_id].avctx;
+ src = (AVPicture *)s->context[ctx_id].frame;
if (!avctx) {
ERR("picture_copy. %d of AVCodecContext is NULL.\n", ctx_id);
ret = false;
} else {
TRACE("decoded image. pix_fmt: %d width: %d, height: %d\n",
avctx->pix_fmt, avctx->width, avctx->height);
- pict_size = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
+ pict_size =
+ maru_brill_codec_get_picture_size(&dst, NULL, avctx->pix_fmt,
+ avctx->width, avctx->height, false);
if ((pict_size) < 0) {
ERR("picture size: %d\n", pict_size);
ret = false;
} else {
TRACE("picture size: %d\n", pict_size);
- maru_brill_codec_push_writequeue(s, src, pict_size, ctx_id, &default_video_decode_data_handler);
+ av_picture_copy(&dst, src, avctx->pix_fmt,
+ avctx->width, avctx->height);
+
+ tempbuf = dst.data[0];
}
}
+ maru_brill_codec_push_writequeue(s, tempbuf, pict_size, ctx_id);
+
TRACE("leave: %s\n", __func__);
return ret;
}
-/*
- * decode_audio >> raw audio_buffer >> resample
- *
- * audios sink cannot handle planar format, so it is required
- * to resample audio buffer into linear format.
- */
static bool codec_decode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
AVCodecContext *avctx;
uint8_t *tempbuf = NULL;
int tempbuf_size = 0;
- AVFrame *resample_frame = NULL;
- uint8_t *resample_buf = NULL;
- int resample_buf_size = 0;
+ uint8_t *out_buf = NULL;
+ int out_buf_size = 0;
int out_sample_fmt = -1;
TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
+ if (elem && elem->buf) {
+ memcpy(&inbuf_size, elem->buf, sizeof(inbuf_size));
size = sizeof(inbuf_size);
TRACE("decode_audio. inbuf_size %d\n", inbuf_size);
if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
+ inbuf = elem->buf + size;
}
} else {
ERR("decode_audio. no input buffer\n");
avpkt.data = inbuf;
avpkt.size = inbuf_size;
- avctx = CONTEXT(s, ctx_id).avctx;
- audio_out = CONTEXT(s, ctx_id).frame;
+ avctx = s->context[ctx_id].avctx;
+ // audio_out = s->context[ctx_id].frame;
+ audio_out = avcodec_alloc_frame();
if (!avctx) {
ERR("decode_audio. %d of AVCodecContext is NULL\n", ctx_id);
} else if (!avctx->codec) {
} else if (!audio_out) {
ERR("decode_audio. %d of AVFrame is NULL\n", ctx_id);
} else {
+ // avcodec_get_frame_defaults(audio_out);
+
len = avcodec_decode_audio4(avctx, audio_out, &got_frame, &avpkt);
- TRACE("decode_audio. len %d, channel_layout %lld got_frame %d\n",
+ TRACE("decode_audio. len %d, channel_layout %lld, got_frame %d\n",
len, avctx->channel_layout, got_frame);
-
if (got_frame) {
if (av_sample_fmt_is_planar(avctx->sample_fmt)) {
- out_sample_fmt = convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 0);
+ // convert PLANAR to LINEAR format
+ out_sample_fmt = avctx->sample_fmt - 5;
- if (avctx->channel_layout == 0) {
- avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
- TRACE("decode_audio. channel_layout %lld channels %d\n",
- avctx->channel_layout, avctx->channels);
- }
- resample_frame = resample_audio(avctx, audio_out, audio_out->linesize[0],
- avctx->sample_fmt, NULL, &resample_buf_size,
- out_sample_fmt);
- if (resample_frame) {
- resample_buf = resample_frame->data[0];
- } else {
- ERR("failed to resample decoded audio buffer\n");
- len = -1;
- got_frame = 0;
- }
+ out_buf = resample_audio_buffer(avctx, audio_out, &out_buf_size, out_sample_fmt);
} else {
- INFO("decode_audio. linear audio format\n");
- resample_buf = audio_out->data[0];
- resample_buf_size = audio_out->linesize[0];
+ // TODO: not planar format
+ INFO("decode_audio. cannot handle planar audio format\n");
+ len = -1;
}
}
}
got_frame = 0;
} else {
tempbuf_size += (sizeof(out_sample_fmt) + sizeof(avctx->sample_rate)
- + sizeof(avctx->channels) + sizeof(avctx->channel_layout)
- + sizeof(resample_buf_size) + resample_buf_size);
+ + sizeof(avctx->channels) + sizeof(avctx->channel_layout)
+ + sizeof(out_buf_size) + out_buf_size);
}
tempbuf = g_malloc(tempbuf_size);
memcpy(tempbuf + size, &avctx->channel_layout, sizeof(avctx->channel_layout));
size += sizeof(avctx->channel_layout);
- memcpy(tempbuf + size, &resample_buf_size, sizeof(resample_buf_size));
- size += sizeof(resample_buf_size);
- if (resample_buf) {
+ memcpy(tempbuf + size, &out_buf_size, sizeof(out_buf_size));
+ size += sizeof(out_buf_size);
+ if (out_buf) {
TRACE("copy resampled audio buffer\n");
- memcpy(tempbuf + size, resample_buf, resample_buf_size);
+ memcpy(tempbuf + size, out_buf, out_buf_size);
}
}
}
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
-
- if (resample_frame) {
- TRACE("release decoded frame\n");
- av_free(resample_buf);
- av_free(resample_frame);
- }
+ maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id);
if (audio_out) {
avcodec_free_frame(&audio_out);
}
+ if (out_buf) {
+ TRACE("and release decoded_audio buffer\n");
+ av_free(out_buf);
+ }
+
+
TRACE("leave: %s\n", __func__);
return true;
}
TRACE("enter: %s\n", __func__);
elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&inbuf_size, elem->opaque, sizeof(inbuf_size));
+ if (elem && elem->buf) {
+ memcpy(&inbuf_size, elem->buf, sizeof(inbuf_size));
size += sizeof(inbuf_size);
- memcpy(&in_timestamp, elem->opaque + size, sizeof(in_timestamp));
+ memcpy(&in_timestamp, elem->buf + size, sizeof(in_timestamp));
size += sizeof(in_timestamp);
TRACE("encode video. inbuf_size %d\n", inbuf_size);
if (inbuf_size > 0) {
- inbuf = elem->opaque + size;
+ inbuf = elem->buf + size;
}
} else {
TRACE("encode video. no input buffer.\n");
avpkt.data = NULL;
avpkt.size = 0;
- avctx = CONTEXT(s, ctx_id).avctx;
- pict = CONTEXT(s, ctx_id).frame;
+ avctx = s->context[ctx_id].avctx;
+ pict = s->context[ctx_id].frame;
if (!avctx || !pict) {
ERR("%d of context or frame is NULL\n", ctx_id);
} else if (!avctx->codec) {
TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
avctx->pix_fmt, inbuf, pict->data[0]);
- ret = avpicture_fill((AVPicture *)pict, inbuf, avctx->pix_fmt,
- avctx->width, avctx->height);
+ ret =
+ maru_brill_codec_get_picture_size((AVPicture *)pict, inbuf,
+ avctx->pix_fmt, avctx->width,
+ avctx->height, true);
if (ret < 0) {
ERR("after avpicture_fill, ret:%d\n", ret);
} else {
g_free(outbuf);
}
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+ maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id);
TRACE("leave: %s\n", __func__);
return true;
}
+static int codec_fill_audio_frame(AVFrame *frame, uint8_t *audio_buffer,
+ int audio_buffer_size, int audio_sample_fmt,
+ int channels, int frame_size,
+ int64_t channel_layout)
+{
+ uint8_t *samples = NULL;
+ int audio_sample_buffer_size = 0;
+ int ret = 0;
+
+ audio_sample_buffer_size = av_samples_get_buffer_size(NULL, channels, frame_size, audio_sample_fmt, 0);
+
+ samples = av_mallocz(audio_sample_buffer_size);
+ if (!samples) {
+ return -1;
+ }
+
+ if (audio_buffer) {
+ memcpy(samples, audio_buffer, audio_buffer_size);
+ }
+
+ ret = avcodec_fill_audio_frame(frame, channels, audio_sample_fmt,
+ (const uint8_t *)samples, audio_sample_buffer_size, 0);
+
+ TRACE("fill audio_frame. ret: %d channel_layout %lld\n", ret, frame->channel_layout);
+
+ return ret;
+}
+
static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
AVCodecContext *avctx = NULL;
AVPacket avpkt;
uint8_t *audio_in = NULL;
int32_t audio_in_size = 0;
- int ret = -1, got_pkt = 0, size = 0;
+ int ret = 0, got_pkt = 0, size = 0;
DeviceMemEntry *elem = NULL;
uint8_t *tempbuf = NULL;
int tempbuf_size = 0;
AVFrame *in_frame = NULL;
- AVFrame *resample_frame = NULL;
+ AVFrame *resampled_frame = NULL;
int64_t in_timestamp = 0;
TRACE("enter: %s\n", __func__);
* audio_in : raw audio data
*/
elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&audio_in_size, elem->opaque, sizeof(audio_in_size));
+ if (elem && elem->buf) {
+ memcpy(&audio_in_size, elem->buf, sizeof(audio_in_size));
size += sizeof(audio_in_size);
- memcpy(&in_timestamp, elem->opaque + size, sizeof(in_timestamp));
+ memcpy(&in_timestamp, elem->buf + size, sizeof(in_timestamp));
size += sizeof(in_timestamp);
TRACE("encode_audio. audio_in_size %d\n", audio_in_size);
if (audio_in_size > 0) {
// audio_in = g_malloc0(audio_in_size);
// memcpy(audio_in, elem->buf + size, audio_in_size);
- audio_in = elem->opaque + size;
+ audio_in = elem->buf + size;
}
} else {
TRACE("encode_audio. no input buffer\n");
}
- avctx = CONTEXT(s, ctx_id).avctx;
+ av_init_packet(&avpkt);
+ // packet data will be allocated by encoder
+ avpkt.data = NULL;
+ avpkt.size = 0;
+
+ avctx = s->context[ctx_id].avctx;
if (!avctx) {
- ERR("encode_audio. %d of Context is NULL\n", ctx_id);
+ ERR("encode_audio. %d of context is NULL\n", ctx_id);
+ ret = -1;
} else if (!avctx->codec) {
- ERR("encode_audio. %d of AVCodec is NULL\n", ctx_id);
+ ERR("encode_audio. %d of codec is NULL\n", ctx_id);
+ ret = -1;
} else {
- int bytes_per_sample = 0;
- int nb_samples = 0;
- int audio_in_sample_fmt = AV_SAMPLE_FMT_S16;
- // audio input src can generate a buffer as an int format.
-
- int resample_buf_size = 0;
- int resample_sample_fmt = 0;
-
- bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt);
- TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt);
-
- nb_samples = audio_in_size / (bytes_per_sample * avctx->channels);
- TRACE("nb_samples %d\n", nb_samples);
-
in_frame = avcodec_alloc_frame();
if (!in_frame) {
ERR("encode_audio. failed to allocate in_frame\n");
+ ret = -1;
} else {
- // prepare audio_in frame
- ret = fill_audio_into_frame(avctx, in_frame, audio_in, audio_in_size, nb_samples, audio_in_sample_fmt);
- if (ret < 0) {
- ERR("failed to fill audio into frame\n");
- } else {
- resample_sample_fmt =
- convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 1);
- resample_frame = resample_audio(avctx, in_frame, audio_in_size,
- audio_in_sample_fmt, NULL, &resample_buf_size,
- resample_sample_fmt);
-
- if (resample_frame) {
- av_init_packet(&avpkt);
- avpkt.data = NULL;
- avpkt.size = 0;
-
- ret = avcodec_encode_audio2(avctx, &avpkt, (const AVFrame *)resample_frame, &got_pkt);
- TRACE("encode audio. ret %d got_pkt %d avpkt.size %d frame_number %d\n",
- ret, got_pkt, avpkt.size, avctx->frame_number);
+ AVAudioResampleContext *avr = NULL;
+ int resampled_buffer_size = 0;
+ int resampled_sample_fmt = AV_SAMPLE_FMT_FLTP;
+ int convert_size = 0;
+ int bytes_per_sample = 0;
+ int audio_in_sample_fmt = AV_SAMPLE_FMT_S16;
+
+ bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt);
+ TRACE("bytes per sample %d, AV_SAMPLE_FMT_S16\n", bytes_per_sample);
+
+ in_frame->nb_samples = audio_in_size / (bytes_per_sample * avctx->channels);
+ TRACE("audio frame. nb_samples %d\n", in_frame->nb_samples);
+
+ in_frame->format = audio_in_sample_fmt;
+ in_frame->channel_layout = avctx->channel_layout;
+
+ // audio_in_frame
+ ret = codec_fill_audio_frame(in_frame, audio_in, audio_in_size,
+ audio_in_sample_fmt, avctx->channels, avctx->frame_size,
+ avctx->channel_layout);
+
+ if (ret == 0) {
+ resampled_frame = avcodec_alloc_frame();
+ if (!resampled_frame) {
+ ERR("encode_audio. failed to allocate resampled_frame\n");
+ ret = -1;
+ } else {
+ int resampled_sample_fmt = AV_SAMPLE_FMT_FLTP;
+
+ resampled_frame->nb_samples = in_frame->nb_samples;
+ resampled_frame->format = resampled_sample_fmt;
+ resampled_frame->channel_layout = avctx->channel_layout;
+
+ ret = codec_fill_audio_frame(resampled_frame, NULL,
+ 0, resampled_sample_fmt,
+ avctx->channels, avctx->frame_size,
+ avctx->channel_layout);
+ }
+ }
+
+ if (ret == 0) {
+ avr = avresample_alloc_context();
+ if (avr) {
+ av_opt_set_int(avr, "in_channel_layout", avctx->channel_layout, 0);
+ av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16 , 0);
+ av_opt_set_int(avr, "in_sample_rate", avctx->sample_rate, 0);
+ av_opt_set_int(avr, "out_channel_layout", avctx->channel_layout, 0);
+ av_opt_set_int(avr, "out_sample_fmt", resampled_sample_fmt, 0);
+ av_opt_set_int(avr, "out_sample_rate", avctx->sample_rate, 0);
+
+ ret = avresample_open(avr);
+ if (ret == 0) {
+ convert_size =
+ avresample_convert(avr, resampled_frame->data,
+ resampled_buffer_size, resampled_frame->nb_samples,
+ in_frame->data, audio_in_size,
+ in_frame->nb_samples);
+
+ TRACE("resample_audio convert_size %d\n", convert_size);
+ avresample_close(avr);
+ }
+ avresample_free(&avr);
+ } else {
+ ERR("failed to allocate AVAudioResampleContext\n");
+ ret = -1;
}
}
+
+ if (ret == 0) {
+ ret = avcodec_encode_audio2(avctx, &avpkt, (const AVFrame *)resampled_frame, &got_pkt);
+ TRACE("encode audio. ret %d got_pkt %d avpkt.size %d "
+ "frame_number %d coded_frame %p\n", ret, got_pkt,
+ avpkt.size, avctx->frame_number, avctx->coded_frame);
+ }
}
}
}
}
- maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
-
+ maru_brill_codec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id);
if (in_frame) {
- av_free(in_frame);
+ avcodec_free_frame(&in_frame);
}
- if (resample_frame) {
- av_free(resample_frame->data[0]);
- av_free(resample_frame);
+ if (resampled_frame) {
+ avcodec_free_frame(&resampled_frame);
}
TRACE("[%s] leave:\n", __func__);
switch (avctx->codec_id) {
case CODEC_ID_MPEG4:
case CODEC_ID_VC1:
- TRACE("not using parser\n");
+ TRACE("not using parser.\n");
break;
case CODEC_ID_H264:
if (avctx->extradata_size == 0) {
}
break;
default:
- parser = av_parser_init(avctx->codec_id);
+ parser = av_parser_init (avctx->codec_id);
if (parser) {
- INFO("using parser: %s\n", avctx->codec->name);
+ INFO("using parser. %d\n", avctx->codec_id);
}
break;
}
return parser;
}
+
+static void maru_brill_codec_bh_callback(void *opaque)
+{
+ MaruBrillCodecState *s = (MaruBrillCodecState *)opaque;
+
+ TRACE("enter: %s\n", __func__);
+
+ qemu_mutex_lock(&s->context_queue_mutex);
+ if (!QTAILQ_EMPTY(&codec_wq)) {
+ qemu_mutex_unlock(&s->context_queue_mutex);
+
+ TRACE("raise irq\n");
+ pci_set_irq(&s->dev, 1);
+ s->irq_raised = 1;
+ } else {
+ qemu_mutex_unlock(&s->context_queue_mutex);
+ TRACE("codec_wq is empty!!\n");
+ }
+
+ TRACE("leave: %s\n", __func__);
+}
+
+/*
+ * Codec Device APIs
+ */
+static uint64_t maru_brill_codec_read(void *opaque,
+ hwaddr addr,
+ unsigned size)
+{
+ MaruBrillCodecState *s = (MaruBrillCodecState *)opaque;
+ uint64_t ret = 0;
+
+ switch (addr) {
+ case CODEC_CMD_GET_THREAD_STATE:
+ qemu_mutex_lock(&s->context_queue_mutex);
+ if (s->irq_raised) {
+ ret = CODEC_TASK_END;
+ pci_set_irq(&s->dev, 0);
+ s->irq_raised = 0;
+ }
+ qemu_mutex_unlock(&s->context_queue_mutex);
+
+ TRACE("get thread_state. ret: %d\n", ret);
+ break;
+
+ case CODEC_CMD_GET_CTX_FROM_QUEUE:
+ {
+ DeviceMemEntry *head = NULL;
+
+ qemu_mutex_lock(&s->context_queue_mutex);
+ head = QTAILQ_FIRST(&codec_wq);
+ if (head) {
+ ret = head->ctx_id;
+ QTAILQ_REMOVE(&codec_wq, head, node);
+ entry[ret] = head;
+ TRACE("get a elem from codec_wq. 0x%x\n", head);
+ } else {
+ ret = 0;
+ }
+ qemu_mutex_unlock(&s->context_queue_mutex);
+
+ TRACE("get a head from a writequeue. head: %x\n", ret);
+ }
+ break;
+
+ case CODEC_CMD_GET_VERSION:
+ ret = CODEC_VERSION;
+ TRACE("codec version: %d\n", ret);
+ break;
+
+ case CODEC_CMD_GET_ELEMENT:
+ ret = maru_brill_codec_query_list(s);
+ break;
+
+ case CODEC_CMD_GET_CONTEXT_INDEX:
+ ret = maru_brill_codec_get_context_index(s);
+ TRACE("get context index: %d\n", ret);
+ break;
+
+ default:
+ ERR("no avaiable command for read. %d\n", addr);
+ }
+
+ return ret;
+}
+
+static void maru_brill_codec_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ MaruBrillCodecState *s = (MaruBrillCodecState *)opaque;
+
+ switch (addr) {
+ case CODEC_CMD_API_INDEX:
+ TRACE("set codec_cmd value: %d\n", value);
+ s->ioparam.api_index = value;
+ maru_brill_codec_wakeup_threads(s, value);
+ break;
+
+ case CODEC_CMD_CONTEXT_INDEX:
+ TRACE("set context_index value: %d\n", value);
+ s->ioparam.ctx_index = value;
+ break;
+
+ case CODEC_CMD_DEVICE_MEM_OFFSET:
+ TRACE("set mem_offset value: 0x%x\n", value);
+ s->ioparam.mem_offset = value;
+ break;
+
+ case CODEC_CMD_RELEASE_CONTEXT:
+ {
+ int ctx_index = (int32_t)value;
+
+ if (s->context[ctx_index].occupied_thread) {
+ s->context[ctx_index].requested_close = true;
+ INFO("make running thread to handle deinit\n");
+ } else {
+ maru_brill_codec_release_context(s, ctx_index);
+ }
+ }
+ break;
+
+ case CODEC_CMD_GET_DATA_FROM_QUEUE:
+ maru_brill_codec_pop_writequeue(s, (uint32_t)value);
+ break;
+
+ default:
+ ERR("no available command for write. %d\n", addr);
+ }
+}
+
+static const MemoryRegionOps maru_brill_codec_mmio_ops = {
+ .read = maru_brill_codec_read,
+ .write = maru_brill_codec_write,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static int maru_brill_codec_initfn(PCIDevice *dev)
+{
+ MaruBrillCodecState *s = DO_UPCAST(MaruBrillCodecState, dev, dev);
+ uint8_t *pci_conf = s->dev.config;
+
+ INFO("device initialization.\n");
+ pci_config_set_interrupt_pin(pci_conf, 1);
+
+ memory_region_init_ram(&s->vram, OBJECT(s), "maru_brill_codec.vram", CODEC_MEM_SIZE);
+ s->vaddr = (uint8_t *)memory_region_get_ram_ptr(&s->vram);
+
+ memory_region_init_io(&s->mmio, OBJECT(s), &maru_brill_codec_mmio_ops, s,
+ "maru_brill_codec.mmio", CODEC_REG_SIZE);
+
+ pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->vram);
+ pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio);
+
+ qemu_mutex_init(&s->context_mutex);
+ qemu_mutex_init(&s->context_queue_mutex);
+ qemu_mutex_init(&s->ioparam_queue_mutex);
+
+ maru_brill_codec_get_cpu_cores(s);
+ maru_brill_codec_threads_create(s);
+
+ // register a function to qemu bottom-halves to switch context.
+ s->codec_bh = qemu_bh_new(maru_brill_codec_bh_callback, s);
+
+ return 0;
+}
+
+static void maru_brill_codec_exitfn(PCIDevice *dev)
+{
+ MaruBrillCodecState *s = DO_UPCAST(MaruBrillCodecState, dev, dev);
+ INFO("device exit\n");
+
+ qemu_mutex_destroy(&s->context_mutex);
+ qemu_mutex_destroy(&s->context_queue_mutex);
+ qemu_mutex_destroy(&s->ioparam_queue_mutex);
+
+ qemu_bh_delete(s->codec_bh);
+
+ memory_region_destroy(&s->vram);
+ memory_region_destroy(&s->mmio);
+}
+
+static void maru_brill_codec_reset(DeviceState *d)
+{
+ MaruBrillCodecState *s = (MaruBrillCodecState *)d;
+ INFO("device reset\n");
+
+ s->irq_raised = 0;
+
+ memset(&s->context, 0, sizeof(CodecContext) * CODEC_CONTEXT_MAX);
+ memset(&s->ioparam, 0, sizeof(CodecParam));
+
+ maru_brill_codec_pixfmt_info_init();
+}
+
+static void maru_brill_codec_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->init = maru_brill_codec_initfn;
+ k->exit = maru_brill_codec_exitfn;
+ k->vendor_id = PCI_VENDOR_ID_TIZEN;
+ k->device_id = PCI_DEVICE_ID_VIRTUAL_BRILL_CODEC;
+ k->class_id = PCI_CLASS_OTHERS;
+ dc->reset = maru_brill_codec_reset;
+ dc->desc = "Virtual new codec device for Tizen emulator";
+}
+
+static TypeInfo codec_device_info = {
+ .name = CODEC_DEVICE_NAME,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(MaruBrillCodecState),
+ .class_init = maru_brill_codec_class_init,
+};
+
+static void codec_register_types(void)
+{
+ type_register_static(&codec_device_info);
+}
+
+type_init(codec_register_types)