CODEC_TYPE_ENCODE,
};
-struct video_data {
- int32_t width;
- int32_t height;
- int32_t fps_n;
- int32_t fps_d;
- int32_t par_n;
- int32_t par_d;
- int32_t pix_fmt;
- int32_t bpp;
- int32_t ticks_per_frame;
-};
-
struct audio_data {
int32_t channels;
int32_t sample_rate;
.release = default_release,
};
+static void default_get_picture(void *dst, void *src, enum AVPixelFormat pix_fmt)
+{
+ AVFrame *frame = (AVFrame *)src;
+ int pict_size = avpicture_get_size(pix_fmt, frame->width, frame->height);
+ if (pict_size < 0) {
+ // cannot enter here...
+ ERR("Invalid picture size\n");
+ return;
+ }
+ avpicture_layout((AVPicture *)frame, pix_fmt,
+ frame->width, frame->height, dst, pict_size);
+}
+
// default video decode data handler
-static void copy_picture(void *dst, void *opaque, size_t size)
+// FIXME: ignore "size" now...
+static void copy_picture(void *dst, void *opaque, size_t dummy)
{
+ size_t size = sizeof(int32_t), offset = 0;
DataContainer *dc = (DataContainer *)opaque;
+ CodecContext *context = (CodecContext *)dc->avctx->opaque;
+
+ if (dc->picture_buffer_offset) {
+ // FIXME: if video data is exist...
+ *((int32_t *)dst) = dc->len;
+ offset += size;
+ *((int32_t *)(dst + offset)) = dc->got_picture;
+ offset += size;
+
+ struct video_data *data = (struct video_data *)(dst + offset);
+ fill_video_data(dc->avctx, data);
- if (dc->len_data_buffer) {
- memcpy(dst, dc->data_buffer, dc->len_data_buffer);
+ if (context->is_hwaccel) {
+ data->pix_fmt = context->state->hwaccel_plugin->output_pix_fmt;
+ }
}
+
if (dc->frame) {
- avpicture_layout((AVPicture *)dc->frame, dc->pix_fmt, dc->frame->width, dc->frame->height,
- dst + dc->picture_buffer_offset, size - dc->picture_buffer_offset); // FIXME
+ // FIXME: if picture is exist...
+ if (context->is_hwaccel) {
+ context->state->hwaccel_plugin->get_picture(dst + dc->picture_buffer_offset, dc->frame);
+ } else {
+ default_get_picture(dst + dc->picture_buffer_offset, dc->frame, dc->avctx->pix_fmt);
+ }
}
}
static void release(void *opaque) {
DataContainer *dc = (DataContainer *)opaque;
- g_free(dc->data_buffer);
g_free(dc);
}
-static DataHandler default_video_decode_data_handler = {
+static DataHandler video_decode_data_handler = {
.get_data = copy_picture,
.release = release,
};
avctx->sample_aspect_ratio.den, avctx->bits_per_coded_sample);
}
-static void deserialize_video_data (const AVCodecContext *avctx,
- struct video_data *video)
-{
- memset(video, 0x00, sizeof(struct video_data));
-
- video->width = avctx->width;
- video->height = avctx->height;
- video->fps_n = avctx->time_base.num;
- video->fps_d = avctx->time_base.den;
- video->pix_fmt = avctx->pix_fmt;
- video->par_n = avctx->sample_aspect_ratio.num;
- video->par_d = avctx->sample_aspect_ratio.den;
- video->bpp = avctx->bits_per_coded_sample;
- video->ticks_per_frame = avctx->ticks_per_frame;
-}
-
static void serialize_audio_data (const struct audio_data *audio,
AVCodecContext *avctx)
{
return resampled_audio;
}
-static int parse_and_decode_video(AVCodecContext *avctx, AVFrame *picture,
+static uint32_t parse_and_decode_video(AVCodecContext *avctx, AVFrame *picture,
AVCodecParserContext *pctx, int ctx_id,
- AVPacket *packet, int *got_picture,
+ AVPacket *packet, uint32_t *got_picture,
int idx, int64_t in_offset)
{
uint8_t *parser_outbuf = NULL;
TRACE("not using parser %s\n", avctx->codec->name);
}
- len = avcodec_decode_video2(avctx, picture, got_picture, packet);
+ len = avcodec_decode_video2(avctx, picture, (int *)got_picture, packet);
TRACE("decode_video. len %d, got_picture %d\n", len, *got_picture);
if (!pctx) {
AVCodecParserContext *pctx = NULL;
AVPacket avpkt;
- int got_picture = 0, len = -1;
+ uint32_t got_picture = 0, len = -1;
uint8_t *inbuf = NULL;
int inbuf_size = 0, idx = 0, size = 0;
int64_t in_offset = 0;
DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
TRACE("enter: %s\n", __func__);
&avpkt, &got_picture, idx, in_offset);
}
- tempbuf_size = sizeof(len) + sizeof(got_picture) + sizeof(struct video_data);
-
- tempbuf = g_malloc(tempbuf_size);
- if (!tempbuf) {
- ERR("failed to allocate decoded video buffer\n");
- // FIXME: how to handle this case?
- } else {
- struct video_data video;
-
- memcpy(tempbuf, &len, sizeof(len));
- size = sizeof(len);
- memcpy(tempbuf + size, &got_picture, sizeof(got_picture));
- size += sizeof(got_picture);
- if (avctx) {
- deserialize_video_data(avctx, &video);
- memcpy(tempbuf + size, &video, sizeof(struct video_data));
- }
- }
-
- TRACE("decoded image. len: %d got_picture: %d pix_fmt: %d width: %d, height: %d\n",
- len, got_picture, avctx->pix_fmt, avctx->width, avctx->height);
-
- int pict_size = 0;
- bool ret = true;
-
DataContainer *dc = g_malloc0(sizeof(DataContainer));
- dc->data_buffer = tempbuf;
- dc->len_data_buffer = tempbuf_size;
dc->picture_buffer_offset = OFFSET_PICTURE_BUFFER;
-
- if (got_picture) {
- pict_size = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
-
- if ((pict_size) < 0) {
- ERR("picture size: %d\n", pict_size);
- ret = false;
- } else {
- TRACE("picture size: %d\n", pict_size);
-
- dc->frame = picture;
- dc->pix_fmt = avctx->pix_fmt;
- }
+ dc->len = len;
+ dc->got_picture = got_picture;
+ dc->avctx = avctx;
+ if(got_picture) {
+ dc->frame = picture;
}
- if (CONTEXT(s, ctx_id)->is_hwaccel) {
- brillcodec_push_write_queue(s, dc, dc->picture_buffer_offset + pict_size, ctx_id, s->hwaccel_plugin->video_decode_data_handler);
- } else {
- brillcodec_push_write_queue(s, dc, dc->picture_buffer_offset + pict_size, ctx_id, &default_video_decode_data_handler);
- }
+ brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_decode_data_handler);
TRACE("leave: %s\n", __func__);
- return ret;
+ return true;
}
static bool codec_decode_video(MaruBrillCodecState *s, int ctx_id, void *data_buf)
AVCodecParserContext *pctx = NULL;
AVPacket avpkt;
- int got_picture = 0, len = -1;
+ uint32_t got_picture = 0, len = -1;
uint8_t *inbuf = NULL;
int inbuf_size = 0, idx = 0, size = 0;
int64_t in_offset = 0;
DeviceMemEntry *elem = NULL;
- uint8_t *tempbuf = NULL;
- int tempbuf_size = 0;
TRACE("enter: %s\n", __func__);
&avpkt, &got_picture, idx, in_offset);
}
- tempbuf_size = sizeof(len) + sizeof(got_picture) + sizeof(struct video_data);
-
- tempbuf = g_malloc(tempbuf_size);
- if (!tempbuf) {
- ERR("failed to allocate decoded audio buffer\n");
- tempbuf_size = 0;
- } else {
- struct video_data video;
-
- memcpy(tempbuf, &len, sizeof(len));
- size = sizeof(len);
- memcpy(tempbuf + size, &got_picture, sizeof(got_picture));
- size += sizeof(got_picture);
- if (avctx) {
- deserialize_video_data(avctx, &video);
- memcpy(tempbuf + size, &video, sizeof(struct video_data));
- }
- }
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
+ dc->picture_buffer_offset = OFFSET_PICTURE_BUFFER;
+ dc->len = len;
+ dc->got_picture = got_picture;
+ dc->avctx = avctx;
- brillcodec_push_write_queue(s, tempbuf, tempbuf_size, ctx_id, NULL);
+ brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_decode_data_handler);
TRACE("leave: %s\n", __func__);
{
AVCodecContext *avctx = NULL;
AVFrame *frame = NULL;
- int pict_size = 0;
bool ret = true;
TRACE("enter: %s\n", __func__);
avctx = CONTEXT(s, ctx_id)->avctx;
frame = CONTEXT(s, ctx_id)->frame;
- if (!avctx) {
- ERR("picture_copy. %d of AVCodecContext is NULL.\n", ctx_id);
- ret = false;
- } else if (!avctx->codec) {
- ERR("picture_copy. %d of AVCodec is NULL.\n", ctx_id);
- ret = false;
- } else if (!frame) {
- ERR("picture_copy. %d of AVFrame is NULL.\n", ctx_id);
- ret = false;
- } else {
- TRACE("decoded image. pix_fmt: %d width: %d, height: %d\n",
- avctx->pix_fmt, avctx->width, avctx->height);
- pict_size = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
- if ((pict_size) < 0) {
- ERR("picture size: %d\n", pict_size);
- ret = false;
- } else {
- TRACE("picture size: %d\n", pict_size);
+ DataContainer *dc = g_malloc0(sizeof(DataContainer));
- DataContainer *dc = g_malloc0(sizeof(DataContainer));
- dc->frame = frame;
- dc->pix_fmt = avctx->pix_fmt;
+ dc->frame = frame;
+ dc->avctx = avctx;
- if (CONTEXT(s, ctx_id)->is_hwaccel) {
- brillcodec_push_write_queue(s, dc, pict_size, ctx_id, s->hwaccel_plugin->video_decode_data_handler);
- } else {
- brillcodec_push_write_queue(s, dc, pict_size, ctx_id, &default_video_decode_data_handler);
- }
- }
- }
+ brillcodec_push_write_queue(s, dc, 0, ctx_id, &video_decode_data_handler);
TRACE("leave: %s\n", __func__);
#include "maru_brillcodec_plugin.h"
-#define SURFACE_COUNT 20
+#define SURFACE_COUNT 4
#define PROFILE VAProfileH264High
#ifndef VA_SURFACE_ATTRIB_SETTABLE
src[1], src_pitch[1], width / 2, height / 2);
}
-static int extract(AVFrame *src, void* dst, size_t size)
+static void extract(void* dst, void *src)
{
- VASurfaceID surface_id = (VASurfaceID)(uintptr_t)src->data[3];
+ AVFrame *frame = (AVFrame *)src;
+ VASurfaceID surface_id = (VASurfaceID)(uintptr_t)frame->data[3];
#if VA_CHECK_VERSION(0,31,0)
if (vaSyncSurface(va_display, surface_id))
#error
#endif
{
- return -1;
+ return;
}
if (va_ctx->is_supports_derive) {
if (vaDeriveImage(va_display, surface_id, &va_ctx->image) != VA_STATUS_SUCCESS) {
- return -1;
+ return;
}
} else {
if (vaGetImage(va_display, surface_id,
- 0, 0, src->width, src->height,
+ 0, 0, frame->width, frame->height,
va_ctx->image.image_id)) {
- return -1;
+ return;
}
}
void *p_base;
if (vaMapBuffer(va_display, va_ctx->image.buf, &p_base)) {
- return -1;
+ return;
}
const uint32_t fourcc = va_ctx->image.format.fourcc;
pitch[i] = va_ctx->image.pitches[i];
}
uint8_t *data[4];
- av_image_fill_pointers(data, PIX_FMT_YUV420P, src->height, dst, (const int *)pitch);
- copy_yv12(data, plane, pitch, src->width, src->height);
+ av_image_fill_pointers(data, PIX_FMT_YUV420P, frame->height, dst, (const int *)pitch);
+ copy_yv12(data, plane, pitch, frame->width, frame->height);
#else
// for performance testing... U, V plane order is reversed
- memcpy(dst, p_base + va_ctx->image.offsets[0], size);
+ size_t pict_size = avpicture_get_size(PIX_FMT_YUV420P, frame->width, frame->height);
+ memcpy(dst, p_base + va_ctx->image.offsets[0], pict_size);
#endif
} else {
- return -1;
+ return;
}
if (vaUnmapBuffer(va_display, va_ctx->image.buf)) {
- return -1;
+ return;
}
if (va_ctx->is_supports_derive)
va_ctx->image.image_id = VA_INVALID_ID;
}
- return 0;
+ return;
}
static int get_surface(AVCodecContext *p_context,
*((bool *)frame->opaque) = false;
}
-static void vaapi_extract(void *dst, void *opaque, size_t size)
-{
- DataContainer *dc = (DataContainer *)opaque;
- if (dc->len_data_buffer) {
- memcpy(dst, dc->data_buffer, dc->len_data_buffer);
- }
- if (dc->frame) {
- extract(dc->frame, dst + dc->picture_buffer_offset, size - dc->picture_buffer_offset);
- }
-}
-
-static void release(void *opaque)
-{
- DataContainer *dc = (DataContainer *)opaque;
- g_free(dc->data_buffer);
- g_free(dc);
-}
-
-static DataHandler vaapi_video_decode_data_handler = {
- .get_data = vaapi_extract,
- .release = release,
-};
-
CodecPlugin vaapi_plugin = {
.pix_fmt = PIX_FMT_VAAPI_VLD,
+ .output_pix_fmt = PIX_FMT_YUV420P,
.setup = setup,
.get_buffer = get_surface,
.release_buffer = release_surface,
- .video_decode_data_handler = &vaapi_video_decode_data_handler,
+ .get_picture = extract,
};
surface->is_occupied = false;
}
-static int extract(AVFrame *src, void *dst, size_t size)
+static void extract(void *dst, void *src)
{
- LPDIRECT3DSURFACE9 d3d = (LPDIRECT3DSURFACE9)(uintptr_t)src->data[3];
+ AVFrame *frame = (AVFrame *)src;
+ LPDIRECT3DSURFACE9 d3d = (LPDIRECT3DSURFACE9)(uintptr_t)frame->data[3];
/* */
assert(dxva_ctx->output == MAKEFOURCC('Y','V','1','2'));
D3DLOCKED_RECT lock;
if (FAILED(IDirect3DSurface9_LockRect(d3d, &lock, NULL, D3DLOCK_READONLY))) {
ERR("Failed to lock surface\n");
- return -1;
+ return;
}
if (dxva_ctx->render_fmt == MAKEFOURCC('Y','V','1','2') ||
}
uint8_t *data[4];
- av_image_fill_pointers(data, PIX_FMT_YUV420P, src->height, dst, (const int *)pitch);
- copy_yv12(data, pitch, plane, pitch, src->width, src->height);
+ av_image_fill_pointers(data, PIX_FMT_YUV420P, frame->height, dst, (const int *)pitch);
+ copy_yv12(data, pitch, plane, pitch, frame->width, frame->height);
} else if (dxva_ctx->render_fmt == MAKEFOURCC('N','V','1','2')) {
uint8_t *plane[2] = {
lock.pBits,
uint8_t *data[4];
int linesizes[4];
- av_image_fill_linesizes(linesizes, AV_PIX_FMT_YUV420P, src->width);
- av_image_fill_pointers(data, PIX_FMT_YUV420P, src->height, dst, linesizes);
- copy_nv12(data, linesizes, plane, pitch, src->width, src->height);
+ av_image_fill_linesizes(linesizes, AV_PIX_FMT_YUV420P, frame->width);
+ av_image_fill_pointers(data, PIX_FMT_YUV420P, frame->height, dst, linesizes);
+ copy_nv12(data, linesizes, plane, pitch, frame->width, frame->height);
} else {
ERR("Not supported format.(%x)\n", dxva_ctx->render_fmt);
IDirect3DSurface9_UnlockRect(d3d);
- return -1;
+ return;
}
/* */
IDirect3DSurface9_UnlockRect(d3d);
- return 0;
-}
-
-static void dxva_extract(void *dst, void *src, size_t size) {
- extract((AVFrame *)src, dst, size);
}
-static void dxva_release(void *buf) {}
-
-static DataHandler dxva_video_decode_data_handler = {
- .get_data = dxva_extract,
- .release = dxva_release,
-};
-
CodecPlugin dxva_plugin = {
.pix_fmt = PIX_FMT_DXVA2_VLD,
+ .output_pix_fmt = PIX_FMT_YUV420P,
.setup = dxva_setup,
.get_buffer = dxva_get_surface,
.release_buffer = dxva_release_surface,
- .video_decode_data_handler = &dxva_video_decode_data_handler,
+ .get_picture = extract,
};