size_t picture_buffer_offset;
};
- // for video encoder
+ // for video/audio encoder
struct {
AVPacket *avpkt;
};
static void copy_video_encode_data(void *dst, void *opaque, size_t dummy)
{
DataContainer *dc = (DataContainer *)opaque;
- struct video_encode_output *encode_output =
- (struct video_encode_output *)dst;
+ struct video_encode_output *encode_output = (struct video_encode_output *)dst;
encode_output->len = dc->avpkt->size;
if (dc->avpkt->size && dc->is_got) {
dc->len = len;
dc->is_got = got_picture;
dc->avctx = avctx;
- if(got_picture && copy_picture) { // we have output picture
+ if (got_picture && copy_picture) { // we have output picture
dc->frame = frame;
}
uint8_t data; // for pointing data address
} __attribute__((packed));
+struct audio_encode_input {
+ int32_t inbuf_size;
+ uint8_t inbuf; // for pointing inbuf address
+} __attribute__((packed));
+
+struct audio_encode_output {
+ int32_t len;
+ uint8_t data; // for pointing data address
+} __attribute__((packed));
+
static int convert_audio_sample_fmt(const AVCodec *codec, int codec_type, bool encode)
{
int audio_sample_fmt = AV_SAMPLE_FMT_NONE;
static void copy_audio_decode_data(void *dst, void *opaque, size_t dummy)
{
DataContainer *dc = (DataContainer *)opaque;
-
struct audio_decode_output *decode_output =
(struct audio_decode_output *)dst;
- // FIXME
- //decode_output->len = dc->len;
decode_output->len = dc->frame->linesize[0];
decode_output->got_frame = dc->is_got ? 1 : 0;
g_free(dc);
}
+static void copy_audio_encode_data(void *dst, void *opaque, size_t dummy)
+{
+ DataContainer *dc = (DataContainer *)opaque;
+ struct audio_encode_output *encode_output = (struct audio_encode_output *)dst;
+
+ encode_output->len = dc->avpkt->size;
+ if (dc->avpkt->size && dc->is_got) {
+ memcpy(&encode_output->data, dc->avpkt->data, dc->avpkt->size);
+ }
+
+ g_free(dc->avpkt->data);
+ g_free(dc->avpkt);
+ g_free(dc);
+}
+
/*
* dc->resampled = resample_frame ? true : false;
* decode_audio >> raw audio_buffer >> resample
elem = (DeviceMemEntry *)data_buf;
if (!elem || !elem->opaque) {
- TRACE("decode_video. no input buffer\n");
+ TRACE("decode_audio. no input buffer\n");
} else {
decode_input = elem->opaque;
}
static bool encode_audio(MaruBrillCodecState *s, int ctx_id, void *data_buf)
{
AVCodecContext *avctx = NULL;
- AVPacket avpkt;
- uint8_t *audio_in = NULL;
- int32_t audio_in_size = 0;
- int ret = -1, got_pkt = 0, size = 0;
+ AVPacket *avpkt = g_malloc0(sizeof(AVPacket));
+ int len = 0, got_frame = 0;
DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
-
AVFrame *in_frame = NULL;
AVFrame *resample_frame = NULL;
- int64_t in_timestamp = 0;
+
+ struct audio_encode_input empty_input = { 0, };
+ struct audio_encode_input *encode_input = &empty_input;
TRACE("enter: %s\n", __func__);
* audio_in : raw audio data
*/
elem = (DeviceMemEntry *)data_buf;
- if (elem && elem->opaque) {
- memcpy(&audio_in_size, elem->opaque, sizeof(audio_in_size));
- size += sizeof(audio_in_size);
-
- memcpy(&in_timestamp, elem->opaque + size, sizeof(in_timestamp));
- size += sizeof(in_timestamp);
-
- TRACE("encode_audio. audio_in_size %d\n", audio_in_size);
- if (audio_in_size > 0) {
- // audio_in = g_malloc0(audio_in_size);
- // memcpy(audio_in, elem->buf + size, audio_in_size);
- audio_in = elem->opaque + size;
- }
- } else {
+ if (!elem || !elem->opaque) {
TRACE("encode_audio. no input buffer\n");
- // FIXME: improve error handling
- // return false;
+ } else {
+ encode_input = elem->opaque;
}
+ av_init_packet(avpkt);
+
avctx = CONTEXT(s, ctx_id)->avctx;
- if (!avctx) {
- ERR("[%s] %d of Context is NULL!\n", __func__, ctx_id);
- } else if (!avctx->codec) {
- ERR("%d of AVCodec is NULL.\n", ctx_id);
- } else {
- int bytes_per_sample = 0;
- int nb_samples = 0;
- int audio_in_sample_fmt = AV_SAMPLE_FMT_S16;
- // audio input src can generate a buffer as an int format.
+ in_frame = CONTEXT(s, ctx_id)->frame;
- int resample_buf_size = 0;
- int resample_sample_fmt = 0;
+ if (!avctx || !avctx->codec || !in_frame) {
+ ERR("critical error !!!\n");
+ assert(0);
+ }
- bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt);
- TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt);
+ int bytes_per_sample = 0;
+ int nb_samples = 0;
+ int audio_in_sample_fmt = AV_SAMPLE_FMT_S16;
+ // audio input src can generate a buffer as an int format.
- nb_samples = audio_in_size / (bytes_per_sample * avctx->channels);
- TRACE("nb_samples %d\n", nb_samples);
+ int resample_buf_size = 0;
+ int resample_sample_fmt = 0;
+ int ret = 0;
- in_frame = avcodec_alloc_frame();
- if (!in_frame) {
- ERR("encode_audio. failed to allocate in_frame\n");
- } else {
- // prepare audio_in frame
- ret = fill_audio_into_frame(avctx, in_frame, audio_in, audio_in_size, nb_samples, audio_in_sample_fmt);
- if (ret < 0) {
- ERR("failed to fill audio into frame\n");
- } else {
- resample_sample_fmt =
- convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 1);
- resample_frame = resample_audio(avctx, in_frame, audio_in_size,
- audio_in_sample_fmt, NULL, &resample_buf_size,
- resample_sample_fmt);
-
- if (resample_frame) {
- av_init_packet(&avpkt);
- avpkt.data = NULL;
- avpkt.size = 0;
-
- ret = avcodec_encode_audio2(avctx, &avpkt, (const AVFrame *)resample_frame, &got_pkt);
- TRACE("encode audio. ret %d got_pkt %d avpkt.size %d frame_number %d\n",
- ret, got_pkt, avpkt.size, avctx->frame_number);
- }
- }
- }
- }
+ bytes_per_sample = av_get_bytes_per_sample(audio_in_sample_fmt);
+ TRACE("bytes per sample %d, sample format %d\n", bytes_per_sample, audio_in_sample_fmt);
- tempbuf_size = sizeof(ret);
- if (ret < 0) {
- ERR("failed to encode audio. ctx_id %d ret %d\n", ctx_id, ret);
- } else {
- tempbuf_size += (sizeof(avpkt.size) + avpkt.size);
- }
- TRACE("encode_audio. writequeue elem buffer size %d\n", tempbuf_size);
+ nb_samples = encode_input->inbuf_size / (bytes_per_sample * avctx->channels);
+ TRACE("nb_samples %d\n", nb_samples);
- // write encoded audio data
- tempbuf = g_malloc0(tempbuf_size);
- if (!tempbuf) {
- ERR("encode audio. failed to allocate encoded out buffer.\n");
+ ret = fill_audio_into_frame(avctx, in_frame,
+ &encode_input->inbuf, encode_input->inbuf_size,
+ nb_samples, audio_in_sample_fmt);
+ if (ret < 0) {
+ ERR("failed to fill audio into frame\n");
} else {
- memcpy(tempbuf, &ret, sizeof(ret));
- size = sizeof(ret);
- if (ret == 0) {
- memcpy(tempbuf + size, &avpkt.size, sizeof(avpkt.size));
- size += sizeof(avpkt.size);
-
- if (got_pkt) {
- memcpy(tempbuf + size, avpkt.data, avpkt.size);
- av_free_packet(&avpkt);
- }
+ resample_sample_fmt =
+ convert_audio_sample_fmt(avctx->codec, avctx->codec_type, 1);
+
+ resample_frame =
+ resample_audio(avctx, in_frame, encode_input->inbuf_size,
+ audio_in_sample_fmt, NULL, &resample_buf_size,
+ resample_sample_fmt);
+
+ if (resample_frame) {
+ len = avcodec_encode_audio2(avctx, avpkt, (const AVFrame *)resample_frame, &got_frame);
+ TRACE("encode audio. len %d got_frame %d avpkt->size %d frame_number %d\n",
+ len, got_frame, avpkt->size, avctx->frame_number);
}
}
- brillcodec_push_writequeue(s, tempbuf, tempbuf_size, ctx_id, NULL);
-
- if (in_frame) {
- av_free(in_frame);
- }
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
+ dc->is_got = got_frame;
+ dc->avctx = avctx;
+ dc->avpkt = avpkt;
- if (resample_frame) {
- av_free(resample_frame->data[0]);
- av_free(resample_frame);
- }
+ brillcodec_push_writequeue(s, dc, 0, ctx_id, ©_audio_encode_data);
TRACE("[%s] leave:\n", __func__);