*
*/
-#include "qemu-common.h"
-
#include "maru_brill_codec.h"
-#include "osutil.h"
/* define debug channel */
-// brill: brilliant abbreviation
MULTI_DEBUG_CHANNEL(qemu, brillcodec);
// device
#define CODEC_DEVICE_NAME "brilcodec"
#define CODEC_VERSION 1
+// device memory
+#define CODEC_META_DATA_SIZE (256)
+
#define CODEC_MEM_SIZE (32 * 1024 * 1024)
#define CODEC_REG_SIZE (256)
-// FFmpeg
+// libav
#define GEN_MASK(x) ((1 << (x)) - 1)
#define ROUND_UP_X(v, x) (((v) + GEN_MASK(x)) & ~GEN_MASK(x))
#define ROUND_UP_2(x) ROUND_UP_X(x, 1)
#define DEFAULT_VIDEO_GOP_SIZE 15
-// debug
-#ifdef CODEC_DEBUG
-#include <sys/time.h>
-#define CODEC_CURRENT_TIME \
-{ \
- struct timeval now; \
- gettimeofday(&now, NULL); \
- INFO("[%s][%d] current time %ld:%ld\n", __func__, __LINE__, now.tv_sec, now.tv_usec); \
-}
-#else
-#define CODEC_CURRENT_TIME
-#endif
-
-// device memory
-#define CODEC_META_DATA_SIZE (256)
+// define a queue to manage ioparam, context data
typedef struct DeviceMemEntry {
uint8_t *buf;
uint32_t buf_size;
QTAILQ_ENTRY(DeviceMemEntry) node;
} DeviceMemEntry;
-static DeviceMemEntry *entry[CODEC_CONTEXT_MAX];
-
typedef struct CodecParamStg {
void *buf;
QTAILQ_ENTRY(CodecParamStg) node;
static QTAILQ_HEAD(ioparam_queue, CodecParamStg) ioparam_queue =
QTAILQ_HEAD_INITIALIZER(ioparam_queue);
+static DeviceMemEntry *entry[CODEC_CONTEXT_MAX];
+
+// pixel info
typedef struct PixFmtInfo {
uint8_t x_chroma_shift;
uint8_t y_chroma_shift;
static PixFmtInfo pix_fmt_info[PIX_FMT_NB];
-// static void maru_brill_codec_reset_parser_info(MaruBrillCodecState *s, int32_t ctx_index);
+// thread
+static int worker_thread_cnt = 0;
+#define DEFAULT_WORKER_THREAD_CNT 4
+
+static void *maru_brill_codec_threads(void *opaque);
+// static void maru_brill_codec_reset_parser_info(MaruBrillCodecState *s, int32_t ctx_index);
static int maru_brill_codec_query_list(MaruBrillCodecState *s);
-static void maru_brill_codec_alloc_context(MaruBrillCodecState *s, int index);
static void maru_brill_codec_release_context(MaruBrillCodecState *s, int32_t value);
// codec functions
-static void codec_init(MaruBrillCodecState *s, int ctx_id, int f_id);
-static void codec_deinit(MaruBrillCodecState *s, int ctx_id, int f_id);
-static void codec_decode_video(MaruBrillCodecState *s, int ctx_id, int f_id);
-static void codec_encode_video(MaruBrillCodecState *s, int ctx_id, int f_id);
-static void codec_decode_audio(MaruBrillCodecState *s, int ctx_id, int f_id);
-static void codec_encode_audio(MaruBrillCodecState *s, int ctx_id, int f_id);
-static void codec_picture_copy(MaruBrillCodecState *s, int ctx_id, int f_id);
-
-typedef void (*CodecFuncEntry)(MaruBrillCodecState *, int, int);
+static bool codec_init(MaruBrillCodecState *s, int ctx_id, int f_id);
+static bool codec_deinit(MaruBrillCodecState *s, int ctx_id, int f_id);
+static bool codec_decode_video(MaruBrillCodecState *s, int ctx_id, int f_id);
+static bool codec_encode_video(MaruBrillCodecState *s, int ctx_id, int f_id);
+static bool codec_decode_audio(MaruBrillCodecState *s, int ctx_id, int f_id);
+static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, int f_id);
+static bool codec_picture_copy(MaruBrillCodecState *s, int ctx_id, int f_id);
+
+typedef bool (*CodecFuncEntry)(MaruBrillCodecState *, int, int);
static CodecFuncEntry codec_func_handler[] = {
codec_init,
codec_decode_video,
int64_t pts, int64_t dts, int64_t pos);
#endif
-static int worker_thread_cnt = 0;
-static void *maru_brill_codec_threads(void *opaque);
-
static void *maru_brill_codec_pop_readqueue(MaruBrillCodecState *s, int32_t file_index);
static void maru_brill_codec_pop_writequeue(MaruBrillCodecState *s, int32_t file_index);
static void maru_brill_codec_push_readqueue(MaruBrillCodecState *s, CodecParam *ioparam);
static void maru_brill_codec_add_ioparam_queue(MaruBrillCodecState *s, void *ioparam);
-// threads
static void maru_brill_codec_get_cpu_cores(void)
{
worker_thread_cnt = get_number_of_processors();
+ if (worker_thread_cnt == 1) {
+ worker_thread_cnt = DEFAULT_WORKER_THREAD_CNT;
+ }
+
TRACE("number of threads: %d\n", worker_thread_cnt);
}
TRACE("enter: %s\n", __func__);
- pthread = g_malloc0(sizeof(QemuThread) * worker_thread_cnt);
+ pthread = g_malloc(sizeof(QemuThread) * worker_thread_cnt);
if (!pthread) {
- ERR("Failed to allocate threadpool memory.\n");
+ ERR("failed to allocate threadpool memory.\n");
return;
}
+
qemu_cond_init(&s->threadpool.cond);
qemu_mutex_init(&s->threadpool.mutex);
- s->threadpool.is_running = 1;
+
+ s->is_thread_running = 1;
for (index = 0; index < worker_thread_cnt; index++) {
qemu_thread_create(&pthread[index],
}
s->threadpool.threads = pthread;
+
TRACE("leave: %s\n", __func__);
}
TRACE("enter: %s\n", __func__);
/* stop to run dedicated threads. */
- s->threadpool.is_running = 0;
+ s->is_thread_running = 0;
for (index = 0; index < worker_thread_cnt; index++) {
qemu_thread_join(&s->threadpool.threads[index]);
qemu_cond_destroy(&s->threadpool.cond);
if (s->threadpool.threads) {
- g_free (s->threadpool.threads);
+ g_free(s->threadpool.threads);
s->threadpool.threads = NULL;
}
TRACE("no buffer from guest\n");
break;
}
+
maru_brill_codec_add_ioparam_queue(s, (void *)ioparam);
qemu_cond_signal(&s->threadpool.cond);
}
TRACE("enter: %s\n", __func__);
qemu_mutex_lock(&s->context_mutex);
- while (s->threadpool.is_running) {
+ while (s->is_thread_running) {
int ctx_id, f_id, api_id;
CodecParamStg *elem = NULL;
qemu_mutex_lock(&s->ioparam_queue_mutex);
elem = QTAILQ_FIRST(&ioparam_queue);
- qemu_mutex_unlock(&s->ioparam_queue_mutex);
- if (!elem) {
+ if (elem) {
+ QTAILQ_REMOVE(&ioparam_queue, elem, node);
+ qemu_mutex_unlock(&s->ioparam_queue_mutex);
+ } else {
+ qemu_mutex_unlock(&s->ioparam_queue_mutex);
continue;
}
- qemu_mutex_lock(&s->ioparam_queue_mutex);
- QTAILQ_REMOVE(&ioparam_queue, elem, node);
- qemu_mutex_unlock(&s->ioparam_queue_mutex);
-
api_id = ((CodecParam *)elem->buf)->api_index;
ctx_id = ((CodecParam *)elem->buf)->ctx_index;
f_id = ((CodecParam *)elem->buf)->file_index;
TRACE("api_id: %d ctx_id: %d f_id: %x\n", api_id, ctx_id, f_id);
+
if (elem->buf) {
+ /* free ioparam pointer from maru_brill_codec_wakeup_threads */
TRACE("release a buffer of CodecParam\n");
g_free(elem->buf);
}
g_free(elem);
}
- codec_func_handler[api_id](s, ctx_id, f_id);
-
- qemu_mutex_lock(&s->threadpool.mutex);
- s->threadpool.state = CODEC_TASK_END;
- qemu_mutex_unlock(&s->threadpool.mutex);
+ if (!codec_func_handler[api_id](s, ctx_id, f_id)) {
+ ERR("codec_func failure or double access.\n");
+ continue;
+ }
if (api_id == CODEC_DEINIT) {
TRACE("deinit func does not need to raise interrupt.\n");
} else {
TRACE("switch context to raise interrupt.\n");
+// qemu_mutex_lock(&s->threadpool.mutex);
+// s->thread_state = CODEC_TASK_END;
qemu_bh_schedule(s->codec_bh);
+// qemu_mutex_unlock(&s->threadpool.mutex);
}
}
qemu_mutex_unlock(&s->context_mutex);
+
maru_brill_codec_thread_exit(s);
TRACE("leave: %s\n", __func__);
return NULL;
}
- qemu_mutex_lock(&s->context_queue_mutex);
- QTAILQ_REMOVE(&codec_rq, elem, node);
- qemu_mutex_unlock(&s->context_queue_mutex);
-
return elem;
}
qemu_mutex_lock(&s->context_queue_mutex);
elem = QTAILQ_FIRST(&codec_rq);
+ if (elem) {
+ QTAILQ_REMOVE(&codec_rq, elem, node);
+ }
qemu_mutex_unlock(&s->context_queue_mutex);
+
if (!elem) {
ERR("codec_rq is empty.\n");
return NULL;
ERR("failed to find a proper entry via file_index. %x\n", file_index);
return;
}
+
TRACE("pop_writeqeue. context index: %d\n", ctx_idx);
elem = entry[ctx_idx];
if (elem) {
mem_offset = s->ioparam.mem_offset;
- TRACE("write data as many as %d to guest, mem_offset: 0x%x\n",
- elem->buf_size, mem_offset);
- memcpy(s->vaddr + mem_offset, elem->buf, elem->buf_size);
+ // check corrupted mem_offset
+ if (mem_offset < CODEC_MEM_SIZE) {
+ if (elem->buf) {
+ TRACE("write data as many as %d to guest, mem_offset: 0x%x\n",
+ elem->buf_size, mem_offset);
+ memcpy(s->vaddr + mem_offset, elem->buf, elem->buf_size);
- if (elem->buf) {
- TRACE("pop_writequeue. release buffer: %p\n", elem->buf);
- g_free(elem->buf);
+ TRACE("pop_writequeue. release buffer: %p\n", elem->buf);
+ g_free(elem->buf);
+ }
+ } else {
+ TRACE("mem_offset is corrupted!!\n");
}
TRACE("pop_writequeue. release elem: %p\n", elem);
g_free(elem);
+
entry[ctx_idx] = NULL;
} else {
- ERR("failed to get ioparam from file_index %x\n", file_index);
+ TRACE("there is no buffer to copy data to guest\n");
+// ERR("failed to get ioparam from file_index %x\n", file_index);
}
TRACE("leave: %s\n", __func__);
TRACE("enter: %s\n", __func__);
qemu_mutex_lock(&s->threadpool.mutex);
-
for (ctx_id = 1; ctx_id < CODEC_CONTEXT_MAX; ctx_id++) {
if (s->context[ctx_id].file_index == file_index) {
TRACE("reset %d context\n", ctx_id);
qemu_mutex_unlock(&s->threadpool.mutex);
if (ctx_id == CODEC_CONTEXT_MAX) {
- ERR("cannot find a context for 0x%x\n", file_index);
+ WARN("cannot find a context for 0x%x\n", file_index);
} else {
codec_deinit(s, ctx_id, file_index);
- // TODO: check how long takes time to iterate.
QTAILQ_FOREACH_SAFE(rq_elem, &codec_rq, node, next) {
if (rq_elem && rq_elem->buf_id == file_index) {
TRACE("remove unused node from codec_rq. file: %p\n", file_index);
// maru_brill_codec_reset_parser_info(s, ctx_id);
}
-// qemu_mutex_unlock(&s->threadpool.mutex);
TRACE("leave: %s\n", __func__);
}
}
// allocate avcontext and avframe struct.
-static void maru_brill_codec_alloc_context(MaruBrillCodecState *s, int index)
+static AVCodecContext *maru_brill_codec_alloc_context(MaruBrillCodecState *s, int index)
{
TRACE("enter: %s\n", __func__);
s->context[index].parser_use = false;
TRACE("leave: %s\n", __func__);
+
+ return s->context[index].avctx;
}
static AVCodec *maru_brill_codec_find_avcodec(uint8_t *mem_buf)
struct video_data video;
int size = 0;
+ // TODO: if len is negative, copy len to guest.
memcpy(mem_buf, &len, sizeof(len));
size = sizeof(len);
memcpy(mem_buf + size, &got_pic_ptr, sizeof(got_pic_ptr));
size += sizeof(got_pic_ptr);
- deserialize_video_data(avctx, &video);
- memcpy(mem_buf + size, &video, sizeof(struct video_data));
+ if (avctx) {
+ deserialize_video_data(avctx, &video);
+ memcpy(mem_buf + size, &video, sizeof(struct video_data));
+ }
}
// write the result of codec_decode_audio
}
// codec functions
-static void codec_init(MaruBrillCodecState *s, int ctx_id, int f_id)
+static bool codec_init(MaruBrillCodecState *s, int ctx_id, int f_id)
{
AVCodecContext *avctx = NULL;
- AVCodecParserContext *parser = NULL;
AVCodec *codec = NULL;
int size = 0, ret = -1;
uint8_t *meta_buf = NULL;
TRACE("enter: %s\n", __func__);
+ // assign meta_buf
+ meta_buf = s->vaddr + ((ctx_id - 1) * CODEC_META_DATA_SIZE);
+ meta_buf += 8; // skipped header.
- maru_brill_codec_alloc_context(s, ctx_id);
- avctx = s->context[ctx_id].avctx;
+ // allocate AVCodecContext
+ avctx = maru_brill_codec_alloc_context(s, ctx_id);
if (!avctx) {
ERR("[%d] failed to allocate context.\n", __LINE__);
- return;
- }
-
- s->context[ctx_id].file_index = f_id;
-
- meta_buf = s->vaddr + ((ctx_id - 1) * CODEC_META_DATA_SIZE);
- meta_buf += 8; // skipped header.
+ ret = -1;
+ } else {
+ s->context[ctx_id].file_index = f_id;
- codec = maru_brill_codec_find_avcodec(meta_buf);
- if (codec) {
- size = sizeof(int32_t) + 32; // buffer size of codec_name
- read_codec_init_data(avctx, meta_buf + size);
+ codec = maru_brill_codec_find_avcodec(meta_buf);
+ if (codec) {
+ size = sizeof(int32_t) + 32; // buffer size of codec_name
+ read_codec_init_data(avctx, meta_buf + size);
- ret = avcodec_open(avctx, codec);
- INFO("avcodec_open done: %d\n", ret);
+ ret = avcodec_open(avctx, codec);
+ INFO("avcodec_open done: %d\n", ret);
- s->context[ctx_id].opened = true;
- } else {
- ERR("failed to find codec.\n");
+ s->context[ctx_id].opened = true;
+ s->context[ctx_id].parser_ctx = maru_brill_codec_parser_init(avctx);
+ } else {
+ ERR("failed to find codec.\n");
+ }
}
+ // return the result of avcodec_open
memcpy(meta_buf, &ret, sizeof(ret));
size = sizeof(ret);
if (ret < 0) {
maru_brill_codec_push_writequeue(s, NULL, 0, ctx_id, f_id);
- parser = maru_brill_codec_parser_init(avctx);
- s->context[ctx_id].parser_ctx = parser;
-
TRACE("leave: %s\n", __func__);
+
+ return true;
}
-static void codec_deinit(MaruBrillCodecState *s, int ctx_id, int f_id)
+static bool codec_deinit(MaruBrillCodecState *s, int ctx_id, int f_id)
{
- AVCodecContext *avctx;
- AVFrame *frame;
- AVCodecParserContext *parserctx;
+ AVCodecContext *avctx = NULL;
+ AVFrame *frame = NULL;
+ AVCodecParserContext *parserctx = NULL;
TRACE("enter: %s\n", __func__);
- qemu_mutex_lock(&s->threadpool.mutex);
- if (!s->context[ctx_id].opened) {
- qemu_mutex_unlock(&s->threadpool.mutex);
- INFO("%d of context has already been closed.\n", ctx_id);
- return;
- } else {
- qemu_mutex_unlock(&s->threadpool.mutex);
- INFO("%d of context has not been closed yet.\n", ctx_id);
- }
-
avctx = s->context[ctx_id].avctx;
frame = s->context[ctx_id].frame;
parserctx = s->context[ctx_id].parser_ctx;
if (!avctx || !frame) {
- ERR("%d of context or frame is NULL.\n", ctx_id);
+ INFO("%d of context has alread been closed\n", ctx_id);
+ return false;
} else {
+ qemu_mutex_lock(&s->threadpool.mutex);
+ if (!s->context[ctx_id].opened) {
+ INFO("%d of context has alread been closed\n", ctx_id);
+ qemu_mutex_unlock(&s->threadpool.mutex);
+ return false;
+ }
avcodec_close(avctx);
INFO("close avcontext of %d\n", ctx_id);
s->context[ctx_id].opened = false;
+ qemu_mutex_unlock(&s->threadpool.mutex);
if (avctx->extradata) {
TRACE("free context extradata\n");
}
TRACE("leave: %s\n", __func__);
+
+ return true;
}
-static void codec_decode_video(MaruBrillCodecState *s, int ctx_id, int f_id)
+static bool codec_decode_video(MaruBrillCodecState *s, int ctx_id, int f_id)
{
AVCodecContext *avctx = NULL;
AVFrame *picture = NULL;
uint8_t *inbuf = NULL;
int inbuf_size, idx, size = 0;
int64_t in_offset;
-#if 0
- AVCodecParserContext *pctx = NULL;
- int parser_ret, bsize;
- uint8_t *bdata;
-#endif
+
DeviceMemEntry *elem = NULL;
uint8_t *meta_buf = NULL;
TRACE("enter: %s\n", __func__);
- avctx = s->context[ctx_id].avctx;
- picture = s->context[ctx_id].frame;
-// pctx = s->context[ctx_id].parser_ctx;
- if (!avctx || !picture) {
- ERR("%d of AVCodecContext or AVFrame is NULL.\n", ctx_id);
- return;
- }
-
- if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
- return;
- }
-
meta_buf = s->vaddr + ((ctx_id - 1) * CODEC_META_DATA_SIZE);
meta_buf += 8; // skipped header.
// inbuf = elem->buf;
inbuf = g_malloc(inbuf_size);
memcpy(inbuf, elem->buf, inbuf_size);
- } else if (elem && inbuf_size > 0) {
+ } else if (elem) {
inbuf_size = 0;
- } else {
TRACE("decode_video. no input buffer.\n");
+ } else {
+ ERR("wrong input data\n");
+ return false;
}
-#if 0
- // TODO: not sure that it needs to parser a packet or not.
- if (pctx) {
- parser_ret =
- maru_brill_codec_parser_parse (pctx, avctx, inbuf, inbuf_size,
- idx, idx, in_offset);
- &bdata, &bsize, idx, idx, in_offset);
- INFO("returned parser_ret: %d.\n", parser_ret);
- }
-#endif
-
- memset(&avpkt, 0x00, sizeof(AVPacket));
+ av_init_packet(&avpkt);
avpkt.data = inbuf;
avpkt.size = inbuf_size;
- len = avcodec_decode_video2(avctx, picture, &got_pic_ptr, &avpkt);
- if (len < 0) {
- ERR("failed to decode video.\n");
+ avctx = s->context[ctx_id].avctx;
+ picture = s->context[ctx_id].frame;
+ if (!avctx || !picture) {
+ ERR("%d of AVCodecContext or AVFrame is NULL.\n", ctx_id);
+ len = -1;
+ } else if (!avctx->codec) {
+ ERR("%d of AVCodec is NULL.\n", ctx_id);
+ len = -1;
+ } else {
+ len = avcodec_decode_video2(avctx, picture, &got_pic_ptr, &avpkt);
+ if (len < 0) {
+ ERR("failed to decode video.\n");
+ }
}
+
TRACE("after decoding video. len: %d, have_data: %d\n", len);
if (inbuf) {
TRACE("decode_video input bufffer.\n");
maru_brill_codec_push_writequeue(s, NULL, 0, ctx_id, f_id);
TRACE("leave: %s\n", __func__);
+
+ return true;
}
-static void codec_picture_copy (MaruBrillCodecState *s, int ctx_id, int f_id)
+static bool codec_picture_copy (MaruBrillCodecState *s, int ctx_id, int f_id)
{
AVCodecContext *avctx;
AVPicture *src;
AVPicture dst;
- uint8_t *out_buffer, *tempbuf = NULL;
-;
- int pict_size;
+ uint8_t *out_buffer = NULL, *tempbuf = NULL;
+ int pict_size = 0;
TRACE("enter: %s\n", __func__);
src = (AVPicture *)s->context[ctx_id].frame;
if (!avctx || !src) {
ERR("%d of AVCodecContext or AVFrame is NULL.\n", ctx_id);
- return;
- }
-
- if (!avctx->codec) {
+ } else if (!avctx->codec) {
ERR("%d of AVCodec is NULL.\n", ctx_id);
- return;
- }
-
- TRACE("decoded image. pix_fmt: %d width: %d, height: %d\n",
- avctx->pix_fmt, avctx->width, avctx->height);
-
- pict_size =
- maru_brill_codec_get_picture_size(&dst, NULL, avctx->pix_fmt,
- avctx->width, avctx->height, false);
- if ((pict_size) < 0) {
- ERR("picture size: %d\n", pict_size);
- return;
- }
- TRACE("picture size: %d\n", pict_size);
-
- av_picture_copy(&dst, src, avctx->pix_fmt, avctx->width, avctx->height);
-
- tempbuf = g_malloc0(pict_size);
- if (!tempbuf) {
- ERR("failed to allocate a picture buffer. size: %d\n", pict_size);
- pict_size = 0;
} else {
- out_buffer = dst.data[0];
- memcpy(tempbuf, out_buffer, pict_size);
- av_free(out_buffer);
- }
+ TRACE("decoded image. pix_fmt: %d width: %d, height: %d\n",
+ avctx->pix_fmt, avctx->width, avctx->height);
+
+ pict_size =
+ maru_brill_codec_get_picture_size(&dst, NULL, avctx->pix_fmt,
+ avctx->width, avctx->height, false);
+ if ((pict_size) < 0) {
+ ERR("picture size: %d\n", pict_size);
+ } else {
+ TRACE("picture size: %d\n", pict_size);
+ av_picture_copy(&dst, src, avctx->pix_fmt,
+ avctx->width, avctx->height);
+ tempbuf = g_malloc0(pict_size);
+ if (!tempbuf) {
+ ERR("failed to allocate a picture buffer. size: %d\n", pict_size);
+ } else {
+ out_buffer = dst.data[0];
+ memcpy(tempbuf, out_buffer, pict_size);
+ }
+ av_free(out_buffer);
+ }
+ }
maru_brill_codec_push_writequeue(s, tempbuf, pict_size, ctx_id, f_id);
TRACE("leave: %s\n", __func__);
+
+ return true;
}
-static void codec_decode_audio(MaruBrillCodecState *s, int ctx_id, int f_id)
+static bool codec_decode_audio(MaruBrillCodecState *s, int ctx_id, int f_id)
{
AVCodecContext *avctx;
AVPacket avpkt;
- int16_t *samples;
+ int16_t *samples = NULL;
uint8_t *inbuf = NULL;
- int inbuf_size;
- int len, frame_size_ptr = 0;
-#if 0
- uint8_t *parser_buf;
- bool parser_use;
-#endif
+ int inbuf_size = 0;
+ int len = 0, frame_size_ptr = 0;
uint8_t *meta_buf = NULL;
DeviceMemEntry *elem = NULL;
uint8_t *tempbuf = NULL;
TRACE("enter: %s\n", __func__);
- avctx = s->context[ctx_id].avctx;
- if (!avctx) {
- ERR("[%s] %d of AVCodecContext is NULL!\n", __func__, ctx_id);
- return;
- }
-
- if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
- return;
- }
-
-#if 0
- if (s->context[ctx_id].parser_ctx) {
- parser_buf = s->context[ctx_id].parser_buf;
- parser_use = s->context[ctx_id].parser_use;
- }
-#endif
-
meta_buf = s->vaddr + ((ctx_id - 1) * CODEC_META_DATA_SIZE);
meta_buf += 8; // skipped header.
memcpy(&inbuf_size, meta_buf, sizeof(inbuf_size));
TRACE("before decoding audio. inbuf_size: %d\n", inbuf_size);
-#if 0
- if (parser_buf && parser_use) {
- TRACE("[%s] use parser, buf:%p codec_id:%x\n",
- __func__, parser_buf, avctx->codec_id);
- buf = parser_buf;
- } else if (buf_size > 0) {
-#endif
elem = get_device_mem_ptr(s, f_id);
if (elem && elem->buf) {
inbuf = elem->buf;
- } else if (elem && inbuf_size > 0 ) {
+ } else if (elem) {
inbuf_size = 0;
- } else {
TRACE("decode_audio. no input buffer.\n");
+ } else {
+ ERR("wrong input data\n");
+ return false;
}
av_init_packet(&avpkt);
avpkt.data = inbuf;
avpkt.size = inbuf_size;
- frame_size_ptr = AVCODEC_MAX_AUDIO_FRAME_SIZE;
- samples = av_mallocz(frame_size_ptr);
- if (!samples) {
- ERR("failed to allocate an outbuf of audio.\n");
- len = -1;
+ avctx = s->context[ctx_id].avctx;
+ if (!avctx) {
+ ERR("[%s] %d of AVCodecContext is NULL!\n", __func__, ctx_id);
+ write_codec_decode_audio_data(0, 0, 0, -1, 0, meta_buf);
+ } else if (!avctx->codec) {
+ ERR("%d of AVCodec is NULL.\n", ctx_id);
+ write_codec_decode_audio_data(0, 0, 0, -1, 0, meta_buf);
} else {
- len =
- avcodec_decode_audio3(avctx, samples, &frame_size_ptr, &avpkt);
- if (len < 0) {
- ERR("failed to decode audio\n", len);
- }
- TRACE("decoding audio. len %d, channel_layout %lld, frame_size %d\n",
- len, avctx->channel_layout, frame_size_ptr);
+ frame_size_ptr = AVCODEC_MAX_AUDIO_FRAME_SIZE;
+ samples = av_mallocz(frame_size_ptr);
+ if (!samples) {
+ ERR("failed to allocate an outbuf of audio.\n");
+ len = -1;
+ } else {
+ len =
+ avcodec_decode_audio3(avctx, samples, &frame_size_ptr, &avpkt);
- if (inbuf) {
- TRACE("release decode_audio inbuf\n");
- g_free(inbuf);
- g_free(elem);
+ TRACE("audio. len %d, channel_layout %lld, frame_size %d\n",
+ len, avctx->channel_layout, frame_size_ptr);
}
+
+ write_codec_decode_audio_data(avctx->sample_rate, avctx->channels,
+ avctx->channel_layout, len,
+ frame_size_ptr, meta_buf);
}
- write_codec_decode_audio_data(avctx->sample_rate, avctx->channels,
- avctx->channel_layout, len,
- frame_size_ptr, meta_buf);
+ if (inbuf) {
+ TRACE("release decode_audio inbuf\n");
+ g_free(inbuf);
+ g_free(elem);
+ }
- if (len > 0) {
- tempbuf = g_malloc0(frame_size_ptr);
- if (!tempbuf) {
- ERR("decode_audio. failed to allocate memory "
- "len %d, have_data: %d\n", len, frame_size_ptr);
- } else {
- memcpy(tempbuf, samples, frame_size_ptr);
- }
- av_free(samples);
- } else {
+ if (len < 0) {
+ ERR("failed to decode audio\n", len);
frame_size_ptr = 0;
+ } else {
+ if (frame_size_ptr > 0) {
+ tempbuf = g_malloc0(frame_size_ptr);
+ if (!tempbuf) {
+ ERR("decode_audio. failed to allocate memory "
+ "len %d, have_data: %d\n", len, frame_size_ptr);
+ } else {
+ memcpy(tempbuf, samples, frame_size_ptr);
+ }
+ av_free(samples);
+ }
}
+
maru_brill_codec_push_writequeue(s, tempbuf, frame_size_ptr, ctx_id, f_id);
-#if 0
- if (parser_buf && parser_use) {
- TRACE("[%s] free parser buf\n", __func__);
- av_free(avpkt.data);
- s->context[ctx_id].parser_buf = NULL;
- }
-#endif
TRACE("leave: %s\n", __func__);
+
+ return true;
}
-static void codec_encode_video(MaruBrillCodecState *s, int ctx_id, int f_id)
+static bool codec_encode_video(MaruBrillCodecState *s, int ctx_id, int f_id)
{
AVCodecContext *avctx = NULL;
AVFrame *pict = NULL;
TRACE("enter: %s\n", __func__);
- avctx = s->context[ctx_id].avctx;
- pict = s->context[ctx_id].frame;
- if (!avctx || !pict) {
- ERR("%d of context or frame is NULL\n", ctx_id);
- return;
- }
-
- if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
- return;
- }
-
meta_buf = s->vaddr + ((ctx_id - 1) * CODEC_META_DATA_SIZE);
meta_buf += 8; // skipped header.
elem = get_device_mem_ptr(s, f_id);
if (elem && elem->buf) {
inbuf = elem->buf;
- } else if (elem && inbuf_size > 0) {
+ } else if (elem) {
+ TRACE("encode_video. no input buffer.\n");
inbuf_size = 0;
} else {
- TRACE("encode_video. no input buffer.\n");
+ ERR("wrong input data\n");
+ return false;
}
- TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
- avctx->pix_fmt, inbuf, pict->data[0]);
-
- ret =
- maru_brill_codec_get_picture_size((AVPicture *)pict, inbuf, avctx->pix_fmt,
- avctx->width, avctx->height, true);
- if (ret < 0) {
- ERR("after avpicture_fill, ret:%d\n", ret);
+ avctx = s->context[ctx_id].avctx;
+ pict = s->context[ctx_id].frame;
+ if (!avctx || !pict) {
+ ERR("%d of context or frame is NULL\n", ctx_id);
+ len = -1;
+ } else if (!avctx->codec) {
+ ERR("%d of AVCodec is NULL.\n", ctx_id);
len = -1;
} else {
- if (avctx->time_base.num == 0) {
- pict->pts = AV_NOPTS_VALUE;
- } else {
- AVRational bq = {1, (G_USEC_PER_SEC * G_GINT64_CONSTANT(1000))};
- pict->pts = av_rescale_q(in_timestamp, bq, avctx->time_base);
- }
- TRACE("before encode video, ticks_per_frame:%d, pts:%lld\n",
- avctx->ticks_per_frame, pict->pts);
-
- outbuf_size = (avctx->width * avctx->height * 6) + FF_MIN_BUFFER_SIZE;
- outbuf = g_malloc0(outbuf_size);
- if (!outbuf) {
- ERR("failed to allocate a buffer of encoding video.\n");
+ TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
+ avctx->pix_fmt, inbuf, pict->data[0]);
+
+ ret =
+ maru_brill_codec_get_picture_size((AVPicture *)pict, inbuf,
+ avctx->pix_fmt, avctx->width,
+ avctx->height, true);
+ if (ret < 0) {
+ ERR("after avpicture_fill, ret:%d\n", ret);
len = -1;
} else {
- len = avcodec_encode_video(avctx, outbuf, outbuf_size, pict);
- if (len < 0) {
- ERR("failed to encode video. len: %d\n", len);
+ if (avctx->time_base.num == 0) {
+ pict->pts = AV_NOPTS_VALUE;
+ } else {
+ AVRational bq =
+ {1, (G_USEC_PER_SEC * G_GINT64_CONSTANT(1000))};
+ pict->pts = av_rescale_q(in_timestamp, bq, avctx->time_base);
}
+ TRACE("before encode video, ticks_per_frame:%d, pts:%lld\n",
+ avctx->ticks_per_frame, pict->pts);
+
+ outbuf_size =
+ (avctx->width * avctx->height * 6) + FF_MIN_BUFFER_SIZE;
+ outbuf = g_malloc0(outbuf_size);
+ if (!outbuf) {
+ ERR("failed to allocate a buffer of encoding video.\n");
+ len = -1;
+ } else {
+ len = avcodec_encode_video(avctx, outbuf, outbuf_size, pict);
- TRACE("encode video, len:%d, pts:%lld, outbuf size: %d\n",
- len, pict->pts, outbuf_size);
- if (inbuf) {
- TRACE("[%d] release used read bufffer.\n", __LINE__);
- g_free(inbuf);
- g_free(elem);
- }
+ TRACE("encode video! len:%d pts:%lld outbuf:%p outbuf size: %d\n",
+ len, pict->pts, outbuf, outbuf_size);
+ }
}
}
+ if (inbuf) {
+ TRACE("[%d] release used read bufffer.\n", __LINE__);
+ g_free(inbuf);
+ g_free(elem);
+ }
+
// write encoded video data
memcpy(meta_buf, &len, sizeof(len));
- if (len > 0) {
+ if (len < 0) {
+ ERR("failed to encode video. len: %d\n", len);
+ } else {
tempbuf = g_malloc0(len);
if (!tempbuf) {
ERR("failed to allocate an element of writequeue.\n");
} else {
memcpy(tempbuf, outbuf, len);
}
+ }
+
+ if (outbuf) {
+ TRACE("release encoded output buffer. %p\n", outbuf);
g_free(outbuf);
- } else {
- len = 0;
}
+
maru_brill_codec_push_writequeue(s, tempbuf, len, ctx_id, f_id);
TRACE("leave: %s\n", __func__);
+ return true;
}
-static void codec_encode_audio(MaruBrillCodecState *s, int ctx_id, int f_id)
+static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, int f_id)
{
AVCodecContext *avctx;
uint8_t *inbuf = NULL, *outbuf = NULL;
TRACE("enter: %s\n", __func__);
- avctx = s->context[ctx_id].avctx;
- if (!avctx) {
- ERR("[%s] %d of Context is NULL!\n", __func__, ctx_id);
- return;
- }
- if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
- return;
- }
-
meta_buf = s->vaddr + ((ctx_id - 1) * CODEC_META_DATA_SIZE);
meta_buf += 8; // skipped header.
elem = get_device_mem_ptr(s, f_id);
if (elem && elem->buf) {
inbuf = elem->buf;
- } else if (elem && inbuf_size > 0) {
+ } else if (elem) {
+ TRACE("encode_audio. no input buffer.\n");
inbuf_size = 0;
} else {
- TRACE("encode_audio. no input buffer.\n");
+ ERR("wrong input data\n");
+ return false;
}
- outbuf = g_malloc0(max_size + FF_MIN_BUFFER_SIZE);
- if (!outbuf) {
- ERR("failed to allocate a buffer of encoding audio.\n");
- len = -1;
+ avctx = s->context[ctx_id].avctx;
+ if (!avctx) {
+ ERR("[%s] %d of Context is NULL!\n", __func__, ctx_id);
+ } else if (!avctx->codec) {
+ ERR("%d of AVCodec is NULL.\n", ctx_id);
} else {
- len =
- avcodec_encode_audio(avctx, outbuf, max_size, (short *)inbuf);
- if (len < 0) {
- ERR("failed to encode audio.\n");
- }
- TRACE("after encoding audio. len: %d\n", len);
- if (inbuf) {
- TRACE("[%d] release used read bufffer.\n", __LINE__);
- g_free(inbuf);
- g_free(elem);
+ outbuf = g_malloc0(max_size + FF_MIN_BUFFER_SIZE);
+ if (!outbuf) {
+ ERR("failed to allocate a buffer of encoding audio.\n");
+ len = -1;
+ } else {
+ len =
+ avcodec_encode_audio(avctx, outbuf, max_size, (short *)inbuf);
+ TRACE("after encoding audio. len: %d\n", len);
}
}
+ if (inbuf) {
+ TRACE("[%d] release used read bufffer.\n", __LINE__);
+ g_free(inbuf);
+ g_free(elem);
+ }
+
// write encoded audio data
memcpy(meta_buf, &len, sizeof(len));
- if (len > 0) {
+ if (len < 0) {
+ ERR("failed to encode audio. len: %d\n", len);
+ } else {
tempbuf = g_malloc0(len);
if (!tempbuf) {
ERR("encode_audio. failed to allocate temporary buffer.\n");
} else {
memcpy(tempbuf, outbuf, len);
}
+ }
+
+ if (outbuf) {
av_free(outbuf);
- } else {
- len = 0;
}
- maru_brill_codec_push_writequeue(s, tempbuf, len, ctx_id, f_id);
+ maru_brill_codec_push_writequeue(s, tempbuf, len, ctx_id, f_id);
TRACE("[%s] leave:\n", __func__);
+ return true;
}
static AVCodecParserContext *maru_brill_codec_parser_init(AVCodecContext *avctx)
return parser;
}
-#if 0
-static int maru_brill_codec_parser_parse(AVCodecParserContext *pctx, AVCodecContext *avctx,
- uint8_t *inbuf, int inbuf_size,
- int64_t pts, int64_t dts, int64_t pos)
-{
- int ret = 0;
- uint8_t *outbuf;
- int outbuf_size;
-
- if (!avctx || !pctx) {
- ERR("Codec or Parser Context is empty\n");
- return -1;
- }
-
- ret = av_parser_parse2(pctx, avctx, &outbuf, &outbuf_size,
- inbuf, inbuf_size, pts, dts, pos);
-
- INFO("after parsing, idx: %d, outbuf size: %d, inbuf_size: %d, ret: %d\n",
- pts, outbuf_size, inbuf_size, ret);
-
- return ret;
-}
-#endif
-
//
static void maru_brill_codec_bh_callback(void *opaque)
{
if (!QTAILQ_EMPTY(&codec_wq)) {
qemu_mutex_unlock(&s->context_queue_mutex);
- TRACE("raise irq for shared task.\n");
+ TRACE("raise irq\n");
qemu_irq_raise(s->dev.irq[0]);
} else {
qemu_mutex_unlock(&s->context_queue_mutex);
switch (addr) {
case CODEC_CMD_GET_THREAD_STATE:
- if (s->threadpool.state) {
- s->threadpool.state = CODEC_TASK_START;
+#if 0
+ if (s->thread_state) {
+ s->thread_state = CODEC_TASK_START;
qemu_mutex_lock(&s->context_queue_mutex);
if (!QTAILQ_EMPTY(&codec_wq)) {
}
qemu_mutex_unlock(&s->context_queue_mutex);
}
+#endif
+ qemu_mutex_lock(&s->context_queue_mutex);
+ if (!QTAILQ_EMPTY(&codec_wq)) {
+ ret = CODEC_TASK_END;
+ }
+ qemu_mutex_unlock(&s->context_queue_mutex);
+
TRACE("get thread_state. ret: %d\n", ret);
qemu_irq_lower(s->dev.irq[0]);
break;
- case CODEC_CMD_GET_QUEUE:
+ case CODEC_CMD_GET_CTX_FROM_QUEUE:
{
DeviceMemEntry *head = NULL;
qemu_mutex_lock(&s->context_queue_mutex);
head = QTAILQ_FIRST(&codec_wq);
- qemu_mutex_unlock(&s->context_queue_mutex);
if (head) {
ret = head->ctx_id;
- qemu_mutex_lock(&s->context_queue_mutex);
QTAILQ_REMOVE(&codec_wq, head, node);
- qemu_mutex_unlock(&s->context_queue_mutex);
entry[ret] = head;
TRACE("get a elem from codec_wq. 0x%x\n", head);
} else {
ret = 0;
}
+ qemu_mutex_unlock(&s->context_queue_mutex);
+
TRACE("get a head from a writequeue. head: %x\n", ret);
}
break;
maru_brill_codec_release_context(s, (int32_t)value);
break;
- case CODEC_CMD_POP_WRITE_QUEUE:
+ case CODEC_CMD_GET_DATA_FROM_QUEUE:
maru_brill_codec_pop_writequeue(s, (uint32_t)value);
break;
+
default:
ERR("no available command for write. %d\n", addr);
}