From e8aa4769f6f4c2dc0683b4a8b3d4360635ea25ee Mon Sep 17 00:00:00 2001 From: Kitae Kim Date: Thu, 20 Nov 2014 17:12:55 +0900 Subject: [PATCH] brillcodec: apply struct base memory access for audio encoding Struct base memory access for audio encoder is applied. DataHandler for audio encoder is applied. Change-Id: I66bc5cc3b464adaf10de389fd2c1a71b3a47d27f Signed-off-by: Kitae Kim --- tizen/src/hw/pci/maru_brillcodec.c | 181 ++++++++++++++++--------------------- 1 file changed, 77 insertions(+), 104 deletions(-) diff --git a/tizen/src/hw/pci/maru_brillcodec.c b/tizen/src/hw/pci/maru_brillcodec.c index f4784cb..db7a6e8 100644 --- a/tizen/src/hw/pci/maru_brillcodec.c +++ b/tizen/src/hw/pci/maru_brillcodec.c @@ -80,7 +80,7 @@ typedef struct DataContainer { size_t picture_buffer_offset; }; - // for video encoder + // for video/audio encoder struct { AVPacket *avpkt; }; @@ -748,8 +748,7 @@ static void copy_video_decode_data(void *dst, void *opaque, size_t dummy) static void copy_video_encode_data(void *dst, void *opaque, size_t dummy) { DataContainer *dc = (DataContainer *)opaque; - struct video_encode_output *encode_output = - (struct video_encode_output *)dst; + struct video_encode_output *encode_output = (struct video_encode_output *)dst; encode_output->len = dc->avpkt->size; if (dc->avpkt->size && dc->is_got) { @@ -892,7 +891,7 @@ static bool decode_video_common(MaruBrillCodecState *s, int ctx_id, dc->len = len; dc->is_got = got_picture; dc->avctx = avctx; - if(got_picture && copy_picture) { // we have output picture + if (got_picture && copy_picture) { // we have output picture dc->frame = frame; } @@ -1038,6 +1037,16 @@ struct audio_decode_output { uint8_t data; // for pointing data address } __attribute__((packed)); +struct audio_encode_input { + int32_t inbuf_size; + uint8_t inbuf; // for pointing inbuf address +} __attribute__((packed)); + +struct audio_encode_output { + int32_t len; + uint8_t data; // for pointing data address +} __attribute__((packed)); + static int convert_audio_sample_fmt(const AVCodec *codec, int codec_type, bool encode) { int audio_sample_fmt = AV_SAMPLE_FMT_NONE; @@ -1179,12 +1188,9 @@ static AVFrame *resample_audio(AVCodecContext *avctx, AVFrame *sample_frame, static void copy_audio_decode_data(void *dst, void *opaque, size_t dummy) { DataContainer *dc = (DataContainer *)opaque; - struct audio_decode_output *decode_output = (struct audio_decode_output *)dst; - // FIXME - //decode_output->len = dc->len; decode_output->len = dc->frame->linesize[0]; decode_output->got_frame = dc->is_got ? 1 : 0; @@ -1206,6 +1212,21 @@ static void copy_audio_decode_data(void *dst, void *opaque, size_t dummy) g_free(dc); } +static void copy_audio_encode_data(void *dst, void *opaque, size_t dummy) +{ + DataContainer *dc = (DataContainer *)opaque; + struct audio_encode_output *encode_output = (struct audio_encode_output *)dst; + + encode_output->len = dc->avpkt->size; + if (dc->avpkt->size && dc->is_got) { + memcpy(&encode_output->data, dc->avpkt->data, dc->avpkt->size); + } + + g_free(dc->avpkt->data); + g_free(dc->avpkt); + g_free(dc); +} + /* * dc->resampled = resample_frame ? true : false; * decode_audio >> raw audio_buffer >> resample @@ -1228,7 +1249,7 @@ static bool decode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf) elem = (DeviceMemEntry *)data_buf; if (!elem || !elem->opaque) { - TRACE("decode_video. no input buffer\n"); + TRACE("decode_audio. no input buffer\n"); } else { decode_input = elem->opaque; } @@ -1290,18 +1311,15 @@ static bool decode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf) static bool encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf) { AVCodecContext *avctx = NULL; - AVPacket avpkt; - uint8_t *audio_in = NULL; - int32_t audio_in_size = 0; - int ret = -1, got_pkt = 0, size = 0; + AVPacket *avpkt = g_malloc0(sizeof(AVPacket)); + int len = 0, got_frame = 0; DeviceMemEntry *elem = NULL; - uint8_t *tempbuf = NULL; - int tempbuf_size = 0; - AVFrame *in_frame = NULL; AVFrame *resample_frame = NULL; - int64_t in_timestamp = 0; + + struct audio_encode_input empty_input = { 0, }; + struct audio_encode_input *encode_input = &empty_input; TRACE("enter: %s\n", __func__); @@ -1311,109 +1329,64 @@ static bool encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf) * audio_in : raw audio data */ elem = (DeviceMemEntry *)data_buf; - if (elem && elem->opaque) { - memcpy(&audio_in_size, elem->opaque, sizeof(audio_in_size)); - size += sizeof(audio_in_size); - - memcpy(&in_timestamp, elem->opaque + size, sizeof(in_timestamp)); - size += sizeof(in_timestamp); - - TRACE("encode_audio. audio_in_size %d\n", audio_in_size); - if (audio_in_size > 0) { - // audio_in = g_malloc0(audio_in_size); - // memcpy(audio_in, elem->buf + size, audio_in_size); - audio_in = elem->opaque + size; - } - } else { + if (!elem || !elem->opaque) { TRACE("encode_audio. no input buffer\n"); - // FIXME: improve error handling - // return false; + } else { + encode_input = elem->opaque; } + av_init_packet(avpkt); + avctx = CONTEXT(s, ctx_id)->avctx; - if (!avctx) { - ERR("[%s] %d of Context is NULL!\n", __func__, ctx_id); - } else if (!avctx->codec) { - ERR("%d of AVCodec is NULL.\n", ctx_id); - } else { - int bytes_per_sample = 0; - int nb_samples = 0; - int audio_in_sample_fmt = AV_SAMPLE_FMT_S16; - // audio input src can generate a buffer as an int format. + in_frame = CONTEXT(s, ctx_id)->frame; - int resample_buf_size = 0; - int resample_sample_fmt = 0; + if (!avctx || !avctx->codec || !in_frame) { + ERR("critical error !!!\n"); + assert(0); + } - bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt); - TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt); + int bytes_per_sample = 0; + int nb_samples = 0; + int audio_in_sample_fmt = AV_SAMPLE_FMT_S16; + // audio input src can generate a buffer as an int format. - nb_samples = audio_in_size / (bytes_per_sample * avctx->channels); - TRACE("nb_samples %d\n", nb_samples); + int resample_buf_size = 0; + int resample_sample_fmt = 0; + int ret = 0; - in_frame = avcodec_alloc_frame(); - if (!in_frame) { - ERR("encode_audio. failed to allocate in_frame\n"); - } else { - // prepare audio_in frame - ret = fill_audio_into_frame(avctx, in_frame, audio_in, audio_in_size, nb_samples, audio_in_sample_fmt); - if (ret < 0) { - ERR("failed to fill audio into frame\n"); - } else { - resample_sample_fmt = - convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 1); - resample_frame = resample_audio(avctx, in_frame, audio_in_size, - audio_in_sample_fmt, NULL, &resample_buf_size, - resample_sample_fmt); - - if (resample_frame) { - av_init_packet(&avpkt); - avpkt.data = NULL; - avpkt.size = 0; - - ret = avcodec_encode_audio2(avctx, &avpkt, (const AVFrame *)resample_frame, &got_pkt); - TRACE("encode audio. ret %d got_pkt %d avpkt.size %d frame_number %d\n", - ret, got_pkt, avpkt.size, avctx->frame_number); - } - } - } - } + bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt); + TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt); - tempbuf_size = sizeof(ret); - if (ret < 0) { - ERR("failed to encode audio. ctx_id %d ret %d\n", ctx_id, ret); - } else { - tempbuf_size += (sizeof(avpkt.size) + avpkt.size); - } - TRACE("encode_audio. writequeue elem buffer size %d\n", tempbuf_size); + nb_samples = encode_input->inbuf_size / (bytes_per_sample * avctx->channels); + TRACE("nb_samples %d\n", nb_samples); - // write encoded audio data - tempbuf = g_malloc0(tempbuf_size); - if (!tempbuf) { - ERR("encode audio. failed to allocate encoded out buffer.\n"); + ret = fill_audio_into_frame(avctx, in_frame, + &encode_input->inbuf, encode_input->inbuf_size, + nb_samples, audio_in_sample_fmt); + if (ret < 0) { + ERR("failed to fill audio into frame\n"); } else { - memcpy(tempbuf, &ret, sizeof(ret)); - size = sizeof(ret); - if (ret == 0) { - memcpy(tempbuf + size, &avpkt.size, sizeof(avpkt.size)); - size += sizeof(avpkt.size); - - if (got_pkt) { - memcpy(tempbuf + size, avpkt.data, avpkt.size); - av_free_packet(&avpkt); - } + resample_sample_fmt = + convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 1); + + resample_frame = + resample_audio(avctx, in_frame, encode_input->inbuf_size, + audio_in_sample_fmt, NULL, &resample_buf_size, + resample_sample_fmt); + + if (resample_frame) { + len = avcodec_encode_audio2(avctx, avpkt, (const AVFrame *)resample_frame, &got_frame); + TRACE("encode audio. len %d got_frame %d avpkt->size %d frame_number %d\n", + len, got_frame, avpkt->size, avctx->frame_number); } } - brillcodec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL); - - if (in_frame) { - av_free(in_frame); - } + DataContainer *dc = g_malloc0(sizeof(DataContainer)); + dc->is_got = got_frame; + dc->avctx = avctx; + dc->avpkt = avpkt; - if (resample_frame) { - av_free(resample_frame->data[0]); - av_free(resample_frame); - } + brillcodec_push_writequeue(s, dc, 0, ctx_id, ©_audio_encode_data); TRACE("[%s] leave:\n", __func__); -- 2.7.4