From fee97d37af42c811888404fd760612306961f571 Mon Sep 17 00:00:00 2001 From: Kitae Kim Date: Mon, 21 Jul 2014 13:20:47 +0900 Subject: [PATCH] brillcodec: improve error handlings. fix uninitialized variables. In addition to this, check variables whether they are null or not. Change-Id: I270884d8080db5f350bcea59bfffa25912a42ff5 Signed-off-by: Kitae Kim --- tizen/src/hw/maru_brill_codec.c | 211 ++++++++++++++++---------------- 1 file changed, 104 insertions(+), 107 deletions(-) diff --git a/tizen/src/hw/maru_brill_codec.c b/tizen/src/hw/maru_brill_codec.c index 2055c078d5..8b60f5cc28 100644 --- a/tizen/src/hw/maru_brill_codec.c +++ b/tizen/src/hw/maru_brill_codec.c @@ -1701,6 +1701,34 @@ static bool codec_encode_video(MaruBrillCodecState *s, int ctx_id, void *data_bu return true; } +static int codec_fill_audio_frame(AVFrame *frame, uint8_t *audio_buffer, + int audio_buffer_size, int audio_sample_fmt, + int channels, int frame_size, + int64_t channel_layout) +{ + uint8_t *samples = NULL; + int audio_sample_buffer_size = 0; + int ret = 0; + + audio_sample_buffer_size = av_samples_get_buffer_size(NULL, channels, frame_size, audio_sample_fmt, 0); + + samples = av_mallocz(audio_sample_buffer_size); + if (!samples) { + return -1; + } + + if (audio_buffer) { + memcpy(samples, audio_buffer, audio_buffer_size); + } + + ret = avcodec_fill_audio_frame(frame, channels, audio_sample_fmt, + (const uint8_t *)samples, audio_sample_buffer_size, 0); + + TRACE("fill audio_frame. ret: %d channel_layout %lld\n", ret, frame->channel_layout); + + return ret; +} + static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf) { AVCodecContext *avctx = NULL; @@ -1715,7 +1743,6 @@ static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_bu AVFrame *in_frame = NULL; AVFrame *resampled_frame = NULL; - uint8_t *samples = NULL; int64_t in_timestamp = 0; TRACE("enter: %s\n", __func__); @@ -1741,129 +1768,100 @@ static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_bu } } else { TRACE("encode_audio. no input buffer\n"); - // FIXME: improve error handling - // return false; } + av_init_packet(&avpkt); + // packet data will be allocated by encoder + avpkt.data = NULL; + avpkt.size = 0; + avctx = s->context[ctx_id].avctx; if (!avctx) { - ERR("[%s] %d of Context is NULL!\n", __func__, ctx_id); + ERR("encode_audio. %d of context is NULL\n", ctx_id); + ret = -1; } else if (!avctx->codec) { - ERR("%d of AVCodec is NULL.\n", ctx_id); + ERR("encode_audio. %d of codec is NULL\n", ctx_id); + ret = -1; } else { - int bytes_per_sample = 0; - int audio_in_buffer_size = 0; - int audio_in_sample_fmt = AV_SAMPLE_FMT_S16; - in_frame = avcodec_alloc_frame(); if (!in_frame) { - // FIXME: error handling ERR("encode_audio. failed to allocate in_frame\n"); ret = -1; - } - - bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt); - TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt); - - in_frame->nb_samples = audio_in_size / (bytes_per_sample * avctx->channels); - TRACE("in_frame->nb_samples %d\n", in_frame->nb_samples); - - in_frame->format = audio_in_sample_fmt; - in_frame->channel_layout = avctx->channel_layout; - - audio_in_buffer_size = av_samples_get_buffer_size(NULL, avctx->channels, avctx->frame_size, audio_in_sample_fmt, 0); - TRACE("audio_in_buffer_size: %d, audio_in_size %d\n", audio_in_buffer_size, audio_in_size); - - { - samples = av_mallocz(audio_in_buffer_size); - memcpy(samples, audio_in, audio_in_size); - - // g_free(audio_in); - // audio_in = NULL; - - ret = avcodec_fill_audio_frame(in_frame, avctx->channels, AV_SAMPLE_FMT_S16, (const uint8_t *)samples, audio_in_size, 0); - TRACE("fill in_frame. ret: %d frame->ch_layout %lld\n", ret, in_frame->channel_layout); - } - - { + } else { AVAudioResampleContext *avr = NULL; - uint8_t *resampled_audio = NULL; - int resampled_buffer_size = 0, resampled_linesize = 0, convert_size; - int resampled_nb_samples = 0; + int resampled_buffer_size = 0; int resampled_sample_fmt = AV_SAMPLE_FMT_FLTP; + int convert_size = 0; + int bytes_per_sample = 0; + int audio_in_sample_fmt = AV_SAMPLE_FMT_S16; - avr = avresample_alloc_context(); - - av_opt_set_int(avr, "in_channel_layout", avctx->channel_layout, 0); - av_opt_set_int(avr, "in_sample_fmt", audio_in_sample_fmt , 0); - av_opt_set_int(avr, "in_sample_rate", avctx->sample_rate, 0); - av_opt_set_int(avr, "out_channel_layout", avctx->channel_layout, 0); - av_opt_set_int(avr, "out_sample_fmt", resampled_sample_fmt, 0); - av_opt_set_int(avr, "out_sample_rate", avctx->sample_rate, 0); - - resampled_nb_samples = in_frame->nb_samples; // av_get_bytes_per_samples(resampled_sample_fmt); - - if (avresample_open(avr) < 0) { - ERR("failed to open avresample context\n"); - avresample_free(&avr); - } - - resampled_buffer_size = av_samples_get_buffer_size(&resampled_linesize, avctx->channels, resampled_nb_samples, resampled_sample_fmt, 0); - if (resampled_buffer_size < 0) { - ERR("failed to get size of sample buffer %d\n", resampled_buffer_size); - avresample_close(avr); - avresample_free(&avr); + bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt); + TRACE("bytes per sample %d, AV_SAMPLE_FMT_S16\n", bytes_per_sample); + + in_frame->nb_samples = audio_in_size / (bytes_per_sample * avctx->channels); + TRACE("audio frame. nb_samples %d\n", in_frame->nb_samples); + + in_frame->format = audio_in_sample_fmt; + in_frame->channel_layout = avctx->channel_layout; + + // audio_in_frame + ret = codec_fill_audio_frame(in_frame, audio_in, audio_in_size, + audio_in_sample_fmt, avctx->channels, avctx->frame_size, + avctx->channel_layout); + + if (ret == 0) { + resampled_frame = avcodec_alloc_frame(); + if (!resampled_frame) { + ERR("encode_audio. failed to allocate resampled_frame\n"); + ret = -1; + } else { + int resampled_sample_fmt = AV_SAMPLE_FMT_FLTP; + + resampled_frame->nb_samples = in_frame->nb_samples; + resampled_frame->format = resampled_sample_fmt; + resampled_frame->channel_layout = avctx->channel_layout; + + ret = codec_fill_audio_frame(resampled_frame, NULL, + 0, resampled_sample_fmt, + avctx->channels, avctx->frame_size, + avctx->channel_layout); + } } - TRACE("resampled nb_samples %d linesize %d out_size %d\n", resampled_nb_samples, resampled_linesize, resampled_buffer_size); - - resampled_audio = av_mallocz(resampled_buffer_size); - if (!resampled_audio) { - ERR("failed to allocate resample buffer\n"); - avresample_close(avr); - avresample_free(&avr); + if (ret == 0) { + avr = avresample_alloc_context(); + if (avr) { + av_opt_set_int(avr, "in_channel_layout", avctx->channel_layout, 0); + av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16 , 0); + av_opt_set_int(avr, "in_sample_rate", avctx->sample_rate, 0); + av_opt_set_int(avr, "out_channel_layout", avctx->channel_layout, 0); + av_opt_set_int(avr, "out_sample_fmt", resampled_sample_fmt, 0); + av_opt_set_int(avr, "out_sample_rate", avctx->sample_rate, 0); + + ret = avresample_open(avr); + if (ret == 0) { + convert_size = + avresample_convert(avr, resampled_frame->data, + resampled_buffer_size, resampled_frame->nb_samples, + in_frame->data, audio_in_size, + in_frame->nb_samples); + + TRACE("resample_audio convert_size %d\n", convert_size); + avresample_close(avr); + } + avresample_free(&avr); + } else { + ERR("failed to allocate AVAudioResampleContext\n"); + ret = -1; + } } - // in_frame->nb_samples = nb_samples; - resampled_frame = avcodec_alloc_frame(); - if (!resampled_frame) { - // FIXME: error handling - ERR("encode_audio. failed to allocate resampled_frame\n"); - ret = -1; + if (ret == 0) { + ret = avcodec_encode_audio2(avctx, &avpkt, (const AVFrame *)resampled_frame, &got_pkt); + TRACE("encode audio. ret %d got_pkt %d avpkt.size %d " + "frame_number %d coded_frame %p\n", ret, got_pkt, + avpkt.size, avctx->frame_number, avctx->coded_frame); } - - - bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt); - TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt); - - resampled_frame->nb_samples = in_frame->nb_samples; - TRACE("resampled_frame->nb_samples %d\n", resampled_frame->nb_samples); - - resampled_frame->format = resampled_sample_fmt; - resampled_frame->channel_layout = avctx->channel_layout; - - ret = avcodec_fill_audio_frame(resampled_frame, avctx->channels, resampled_sample_fmt, - (const uint8_t *)resampled_audio, resampled_buffer_size, 0); - TRACE("fill resampled_frame ret: %d frame->ch_layout %lld\n", ret, in_frame->channel_layout); - - convert_size = avresample_convert(avr, resampled_frame->data, resampled_buffer_size, resampled_nb_samples, - in_frame->data, audio_in_size, in_frame->nb_samples); - - TRACE("resample_audio convert_size %d\n", convert_size); - - avresample_close(avr); - avresample_free(&avr); - } - - if (ret == 0) { - av_init_packet(&avpkt); - // packet data will be allocated by encoder - avpkt.data = NULL; - avpkt.size = 0; - - ret = avcodec_encode_audio2(avctx, &avpkt, (const AVFrame *)resampled_frame, &got_pkt); - TRACE("encode audio. ret %d got_pkt %d avpkt.size %d frame_number %d coded_frame %p\n", - ret, got_pkt, avpkt.size, avctx->frame_number, avctx->coded_frame); } } @@ -1871,7 +1869,6 @@ static bool codec_encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_bu if (ret < 0) { ERR("failed to encode audio. ctx_id %d ret %d\n", ctx_id, ret); } else { - // tempbuf_size += (max_size); // len; tempbuf_size += (sizeof(avpkt.size) + avpkt.size); } TRACE("encode_audio. writequeue elem buffer size %d\n", tempbuf_size); -- 2.34.1