avctx->ticks_per_frame = video->ticks_per_frame;
}
- INFO("codec_init. video, resolution: %dx%d, framerate: %d/%d "
- "pixel_fmt: %d sample_aspect_ratio: %d/%d bpp %d\n",
+ INFO("video_init! resolution %dx%d, framerate %d/%d "
+ "pixel_fmt %d sample_aspect_ratio %d/%d bpp %d\n",
avctx->width, avctx->height, avctx->time_base.num,
avctx->time_base.den, avctx->pix_fmt, avctx->sample_aspect_ratio.num,
avctx->sample_aspect_ratio.den, avctx->bits_per_coded_sample);
avctx->sample_fmt = audio->sample_fmt;
}
- INFO("codec_init. audio, channel %d sample_rate %d sample_fmt %d ch_layout %lld\n",
+ INFO("audio_init! channel %d sample_rate %d sample_fmt %d ch_layout %lld\n",
avctx->channels, avctx->sample_rate, avctx->sample_fmt, avctx->channel_layout);
}
}
}
} else {
- ERR("unknown media type: %d\n", media_type);
+ ERR("unknown media type: %d name %s\n", media_type, codec->name);
}
memset(s->vaddr + mem_size, 0x00, length);
return ctx_id;
}
-
-// allocate avcontext and avframe struct.
-static AVCodecContext *maru_brill_codec_alloc_context(MaruBrillCodecState *s, int ctx_id)
-{
- TRACE("enter: %s\n", __func__);
-
- TRACE("allocate %d of context and frame.\n", ctx_id);
- CONTEXT(s, ctx_id).avctx = avcodec_alloc_context3(NULL);
- CONTEXT(s, ctx_id).frame = avcodec_alloc_frame();
- CONTEXT(s, ctx_id).opened_context = false;
-
- TRACE("leave: %s\n", __func__);
-
- return CONTEXT(s, ctx_id).avctx;
-}
-
static AVCodec *maru_brill_codec_find_avcodec(uint8_t *mem_buf)
{
AVCodec *codec = NULL;
} else {
codec = avcodec_find_decoder_by_name (codec_name);
}
- INFO("%s!! find %s %s\n", codec ? "success" : "failure",
+ INFO("%s! find %s %s\n", codec ? "success" : "failure",
codec_name, encode ? "encoder" : "decoder");
return codec;
memcpy(&avctx->extradata_size,
mem_buf + size, sizeof(avctx->extradata_size));
size += sizeof(avctx->extradata_size);
- INFO("extradata size: %d.\n", avctx->extradata_size);
+ INFO("extradata size: %d\n", avctx->extradata_size);
if (avctx->extradata_size > 0) {
avctx->extradata =
memcpy(avctx->extradata, mem_buf + size, avctx->extradata_size);
}
} else {
- TRACE("no extra data.\n");
+ TRACE("no extra data\n");
avctx->extradata =
av_mallocz(ROUND_UP_X(FF_INPUT_BUFFER_PADDING_SIZE, 4));
}
return NULL;
}
+#if LIBAVUTIL_VERSION_CHECK
resample_frame = avcodec_alloc_frame();
+#else
+ resample_frame = av_frame_alloc();
+#endif
TRACE("resample audio. nb_samples %d sample_fmt %d\n", resample_nb_samples, resample_sample_fmt);
*resample_buffer_size = av_samples_get_buffer_size(NULL, avctx->channels, resample_nb_samples, resample_sample_fmt, 0);
}
TRACE("after parsing ret: %d parser_outbuf_size %d parser_buf_size %d pts %lld\n",
- ret, parser_outbuf_size, parser_buf_size, pctx->pts);
+ ret, parser_outbuf_size, parser_buf_size, pctx->pts);
/* if there is no output, we must break and wait for more data.
* also the timestamp in the context is not updated.
if (!pctx) {
if (len == 0 && (*got_picture) == 0) {
- ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
+ INFO("decoding video 1! didn't return any data! ctx_id %d len %d\n", ctx_id, len);
break;
} else if (len < 0) {
ERR("decoding video error! ctx_id %d len %d\n", ctx_id, len);
parser_buf += len;
} else {
if (len == 0) {
- ERR("decoding video didn't return any data! ctx_id %d len %d\n", ctx_id, len);
+ INFO("decoding video 2! didn't return any data! ctx_id %d len %d\n", ctx_id, len);
*got_picture = 0;
break;
} else if (len < 0) {
break;
}
}
+
+ TRACE("decoding video. parser_buf_size %d\n", parser_buf_size);
} while (parser_buf_size > 0);
return len;
elem = (DeviceMemEntry *)data_buf;
- // allocate AVCodecContext
- avctx = maru_brill_codec_alloc_context(s, ctx_id);
- if (!avctx) {
- ERR("[%d] failed to allocate context.\n", __LINE__);
- ret = -1;
- } else {
- codec = maru_brill_codec_find_avcodec(elem->opaque);
- if (codec) {
- size = sizeof(int32_t) + 32; // buffer size of codec_name
- read_codec_init_data(avctx, elem->opaque + size);
-
- // in case of aac encoder, sample format is float
- if (!strcmp(codec->name, "aac") && codec->encode2) {
- TRACE("convert sample format into SAMPLE_FMT_FLTP\n");
- avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
-
- avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ codec = maru_brill_codec_find_avcodec(elem->opaque);
+ if (codec) {
+ TRACE("allocate %d of context and frame.\n", ctx_id);
+ CONTEXT(s, ctx_id).avctx = avctx = avcodec_alloc_context3(codec);
+#if LIBAVUTIL_VERSION_CHECK
+ CONTEXT(s, ctx_id).frame = avcodec_alloc_frame();
+#else
+ CONTEXT(s, ctx_id).frame = av_frame_alloc();
+#endif
+ CONTEXT(s, ctx_id).opened_context = false;
+
+ size = sizeof(int32_t) + 32; // buffer size of codec_name
+ read_codec_init_data(avctx, elem->opaque + size);
+
+ AVDictionary *opts = NULL;
+
+ if (codec->encode2) {
+ avctx->gop_size = DEFAULT_VIDEO_GOP_SIZE;
+
+ switch (codec->id) {
+ case AV_CODEC_ID_AAC:
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
+ avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
- INFO("aac encoder!! channels %d channel_layout %lld\n",
- avctx->channels, avctx->channel_layout);
- avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
+ INFO("aac encoder sample_fmt %d channels %d channel_layout %lld strict_std_compliance %d\n",
+ avctx->sample_fmt, avctx->channels, avctx->channel_layout, avctx->strict_std_compliance);
+ break;
+ case AV_CODEC_ID_H264:
+ avctx->bit_rate = 0;
+
+ avctx->flags = 0;
+ // avctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ avctx->flags |= CODEC_FLAG_CLOSED_GOP;
+ avctx->flags |= CODEC_FLAG_LOOP_FILTER;
+ avctx->qmax = 45;
+ av_dict_set(&opts, "aud", "1" , 0);
+ break;
+ default:
+ break;
}
- TRACE("audio sample format %d\n", avctx->sample_fmt);
- TRACE("strict_std_compliance %d\n", avctx->strict_std_compliance);
+ INFO("setting encode_init extradata_size %d channels %d sample_rate %d sample_fmt %d "
+ "channel_layout %lld frame_size %d bitrate %d profile %d\n",
+ avctx->extradata_size, avctx->channels, avctx->sample_rate, avctx->sample_fmt,
+ avctx->channel_layout, avctx->frame_size, avctx->bit_rate, avctx->profile);
+ }
+
+ ret = avcodec_open2(avctx, codec, &opts);
+ INFO("avcodec_open. ret 0x%x ctx_id %d\n", ret, ctx_id);
- ret = avcodec_open2(avctx, codec, NULL);
- INFO("avcodec_open. ret 0x%x ctx_id %d\n", ret, ctx_id);
+ TRACE("avcodec_open resolution %dx%d, framerate %d/%d pixel_fmt %d sample_aspect_ratio %d/%d bpp %d\n",
+ avctx->width, avctx->height, avctx->time_base.num,
+ avctx->time_base.den, avctx->pix_fmt, avctx->sample_aspect_ratio.num,
+ avctx->sample_aspect_ratio.den, avctx->bits_per_coded_sample);
- INFO("channels %d sample_rate %d sample_fmt %d "
- "channel_layout %lld frame_size %d\n",
- avctx->channels, avctx->sample_rate, avctx->sample_fmt,
+ TRACE("channels %d bitrate %d sample_rate %d sample_fmt %d channel_layout %lld frame_size %d\n",
+ avctx->channels, avctx->bit_rate, avctx->sample_rate, avctx->sample_fmt,
avctx->channel_layout, avctx->frame_size);
- tempbuf_size = (sizeof(avctx->sample_fmt) + sizeof(avctx->frame_size)
- + sizeof(avctx->extradata_size) + avctx->extradata_size)
- + sizeof(int);
+ tempbuf_size = (sizeof(avctx->sample_fmt) + sizeof(avctx->frame_size)
+ + sizeof(avctx->extradata_size) + avctx->extradata_size) + sizeof(int);
- CONTEXT(s, ctx_id).opened_context = true;
- CONTEXT(s, ctx_id).parser_ctx =
- maru_brill_codec_parser_init(avctx);
- } else {
- ERR("failed to find codec. ctx_id: %d\n", ctx_id);
- ret = -1;
- }
+ CONTEXT(s, ctx_id).opened_context = true;
+ CONTEXT(s, ctx_id).parser_ctx = maru_brill_codec_parser_init(avctx);
+ } else {
+ ERR("failed to find codec. ctx_id: %d\n", ctx_id);
+ ret = -1;
}
tempbuf_size += sizeof(ret);
} else {
size += write_codec_init_data(avctx, tempbuf + size);
TRACE("codec_init. copyback!! size %d\n", size);
- {
- memcpy(tempbuf + size, &avctx->extradata_size, sizeof(avctx->extradata_size));
- size += sizeof(avctx->extradata_size);
-
- INFO("codec_init. extradata_size: %d\n", avctx->extradata_size);
- if (avctx->extradata) {
- memcpy(tempbuf + size, avctx->extradata, avctx->extradata_size);
- size += avctx->extradata_size;
- }
+
+ memcpy(tempbuf + size, &avctx->extradata_size, sizeof(avctx->extradata_size));
+ size += sizeof(avctx->extradata_size);
+ if (avctx->extradata) {
+ memcpy(tempbuf + size, avctx->extradata, avctx->extradata_size);
+ size += avctx->extradata_size;
}
}
}
}
INFO("close avcontext of %d\n", ctx_id);
- // qemu_mutex_lock(&s->threadpool.mutex);
avcodec_close(avctx);
CONTEXT(s, ctx_id).opened_context = false;
- // qemu_mutex_unlock(&s->threadpool.mutex);
if (avctx->extradata) {
TRACE("free context extradata\n");
if (frame) {
TRACE("free frame\n");
+#if LIBAVUTIL_VERSION_CHECK
avcodec_free_frame(&frame);
+#else
+ av_frame_free(&frame);
+#endif
CONTEXT(s, ctx_id).frame = NULL;
}
if (pctx) {
res = av_parser_parse2(pctx, avctx, &poutbuf, &poutbuf_size,
p_inbuf, p_inbuf_size, -1, -1, -1);
- INFO("before flush buffers, using parser. res: %d\n", res);
+ TRACE("before flush buffers, using parser. res: %d\n", res);
}
avcodec_flush_buffers(avctx);
ERR("decode_video. %d of AVFrame is NULL.\n", ctx_id);
} else {
pctx = CONTEXT(s, ctx_id).parser_ctx;
-
len = parse_and_decode_video(avctx, picture, pctx, ctx_id,
&avpkt, &got_picture, idx, in_offset);
}
AVFrame *pict = NULL;
AVPacket avpkt;
uint8_t *inbuf = NULL, *outbuf = NULL;
- int inbuf_size = 0, outbuf_size = 0;
+ int inbuf_size = 0;
int got_frame = 0, ret = 0, size = 0;
int64_t in_timestamp = 0;
int coded_frame = 0, key_frame = 0;
} else if (!avctx->codec) {
ERR("%d of AVCodec is NULL.\n", ctx_id);
} else {
- TRACE("pixel format: %d inbuf: %p, picture data: %p\n",
- avctx->pix_fmt, inbuf, pict->data[0]);
+ TRACE("pixel format: %d inbuf: %p, inbuf_size: %d picture data: %p\n",
+ avctx->pix_fmt, inbuf, inbuf_size, pict->data[0]);
ret = avpicture_fill((AVPicture *)pict, inbuf, avctx->pix_fmt,
avctx->width, avctx->height);
if (avctx->time_base.num == 0) {
pict->pts = AV_NOPTS_VALUE;
} else {
- AVRational bq =
- {1, (G_USEC_PER_SEC * G_GINT64_CONSTANT(1000))};
+ AVRational bq = {1, (G_USEC_PER_SEC * G_GINT64_CONSTANT(1000))};
pict->pts = av_rescale_q(in_timestamp, bq, avctx->time_base);
}
- TRACE("encode video. ticks_per_frame:%d, pts:%lld\n",
- avctx->ticks_per_frame, pict->pts);
- outbuf_size =
- (avctx->width * avctx->height * 6) + FF_MIN_BUFFER_SIZE;
+ TRACE("encode video. ticks_per_frame:%d, pts:%lld\n", avctx->ticks_per_frame, pict->pts);
+ int outbuf_size = 0;
+
+ outbuf_size = (avctx->width * avctx->height * 6) + FF_MIN_BUFFER_SIZE;
outbuf = g_malloc0(outbuf_size);
avpkt.data = outbuf;
} else {
ret = avcodec_encode_video2(avctx, &avpkt, pict, &got_frame);
- TRACE("encode video. ret %d got_picture %d outbuf_size %d\n", ret, got_frame, avpkt.size);
- if (avctx->coded_frame) {
- TRACE("encode video. keyframe %d\n", avctx->coded_frame->key_frame);
- }
+ TRACE("encode video. ret %d got_picture %d outbuf_size %d keyframe %d pkt %d\n",
+ ret, got_frame, avpkt.size, avctx->coded_frame->key_frame, avpkt.flags);
}
}
}
- tempbuf_size = sizeof(ret);
+ tempbuf_size = sizeof(avpkt.size);
if (ret < 0) {
ERR("failed to encode video. ctx_id %d ret %d\n", ctx_id, ret);
} else {
memcpy(tempbuf, &avpkt.size, sizeof(avpkt.size));
size = sizeof(avpkt.size);
+ TRACE("encode video. output_packet size %d\n", avpkt.size);
+
if ((got_frame) && outbuf) {
// inform gstreamer plugin about the status of encoded frames
- // A flag for output buffer in gstreamer is depending on the status.
+ // output buffer in gstreamer is depending on this status.
+
if (avctx->coded_frame) {
- coded_frame = 1;
+ coded_frame = true;
// if key_frame is 0, this frame cannot be decoded independently.
- key_frame = avctx->coded_frame->key_frame;
+ if (avctx->codec_id == AV_CODEC_ID_H264) {
+ key_frame = avpkt.flags;
+ } else {
+ key_frame = avctx->coded_frame->key_frame;
+ }
}
+
memcpy(tempbuf + size, &coded_frame, sizeof(coded_frame));
size += sizeof(coded_frame);
memcpy(tempbuf + size, &key_frame, sizeof(key_frame));
size += sizeof(key_frame);
- memcpy(tempbuf + size, outbuf, avpkt.size);
+ memcpy(tempbuf + size, avpkt.data, avpkt.size);
}
}
nb_samples = audio_in_size / (bytes_per_sample * avctx->channels);
TRACE("nb_samples %d\n", nb_samples);
+#if LIBAVUTIL_VERSION_CHECK
in_frame = avcodec_alloc_frame();
+#else
+ in_frame = av_frame_alloc();
+#endif
if (!in_frame) {
ERR("encode_audio. failed to allocate in_frame\n");
} else {
static AVCodecParserContext *maru_brill_codec_parser_init(AVCodecContext *avctx)
{
AVCodecParserContext *parser = NULL;
+ int codec_id = avctx->codec_id;
- if (!avctx) {
- ERR("context is NULL\n");
- return NULL;
- }
-
- switch (avctx->codec_id) {
- case CODEC_ID_MPEG4:
- case CODEC_ID_VC1:
+ switch (codec_id) {
+ case AV_CODEC_ID_MPEG4:
+ case AV_CODEC_ID_VC1:
TRACE("not using parser\n");
break;
- case CODEC_ID_H264:
+ case AV_CODEC_ID_H264:
if (avctx->extradata_size == 0) {
TRACE("H.264 with no extradata, creating parser.\n");
- parser = av_parser_init (avctx->codec_id);
+ parser = av_parser_init (codec_id);
}
break;
default:
- parser = av_parser_init(avctx->codec_id);
+ parser = av_parser_init(codec_id);
if (parser) {
- INFO("using parser: %s\n", avctx->codec->name);
+ INFO("using parser: %d\n", codec_id);
}
break;
}