Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
speed gain at this point but it should work.
+If there are inter-frame dependencies, so the codec calls
+ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The
+frames must then be freed with ff_thread_release_buffer().
+Otherwise leave it at zero and decode directly into the user-supplied frames.
+
Call ff_thread_report_progress() after some part of the current picture has decoded.
A good place to put this is where draw_horiz_band() is called - add this if it isn't
called anywhere, as it's useful too and the implementation is trivial when you're
* 4XM codec.
*/
+#include "libavutil/frame.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
typedef struct FourXContext {
AVCodecContext *avctx;
DSPContext dsp;
- AVFrame *current_picture, *last_picture;
+ AVFrame *last_picture;
GetBitContext pre_gb; ///< ac/dc prefix
GetBitContext gb;
GetByteContext g;
}
}
-static void init_mv(FourXContext *f)
+static void init_mv(FourXContext *f, int linesize)
{
int i;
for (i = 0; i < 256; i++) {
if (f->version > 1)
- f->mv[i] = mv[i][0] + mv[i][1] * f->current_picture->linesize[0] / 2;
+ f->mv[i] = mv[i][0] + mv[i][1] * linesize / 2;
else
- f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * f->current_picture->linesize[0] / 2;
+ f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * linesize / 2;
}
}
}
}
-static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
+static int decode_p_frame(FourXContext *f, AVFrame *frame,
+ const uint8_t *buf, int length)
{
int x, y;
const int width = f->avctx->width;
const int height = f->avctx->height;
uint16_t *src = (uint16_t *)f->last_picture->data[0];
- uint16_t *dst = (uint16_t *)f->current_picture->data[0];
- const int stride = f->current_picture->linesize[0] >> 1;
+ uint16_t *dst = (uint16_t *)frame->data[0];
+ const int stride = frame->linesize[0] >> 1;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset;
bytestream2_init(&f->g, buf + bytestream_offset,
length - bytestream_offset);
- init_mv(f);
+ init_mv(f, frame->linesize[0]);
for (y = 0; y < height; y += 8) {
for (x = 0; x < width; x += 8)
return 0;
}
-static inline void idct_put(FourXContext *f, int x, int y)
+static inline void idct_put(FourXContext *f, AVFrame *frame, int x, int y)
{
int16_t (*block)[64] = f->block;
- int stride = f->current_picture->linesize[0] >> 1;
+ int stride = frame->linesize[0] >> 1;
int i;
- uint16_t *dst = ((uint16_t*)f->current_picture->data[0]) + y * stride + x;
+ uint16_t *dst = ((uint16_t*)frame->data[0]) + y * stride + x;
for (i = 0; i < 4; i++) {
block[i][0] += 0x80 * 8 * 8;
return red / 3 * 1024 + green / 3 * 32 + blue / 3;
}
-static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
+static int decode_i2_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{
int x, y, x2, y2;
const int width = f->avctx->width;
const int height = f->avctx->height;
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
- uint16_t *dst = (uint16_t*)f->current_picture->data[0];
- const int stride = f->current_picture->linesize[0]>>1;
+ uint16_t *dst = (uint16_t*)frame->data[0];
+ const int stride = frame->linesize[0]>>1;
GetByteContext g3;
if (length < mbs * 8) {
return 0;
}
-static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
+static int decode_i_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{
int x, y, ret;
const int width = f->avctx->width;
if ((ret = decode_i_mb(f)) < 0)
return ret;
- idct_put(f, x, y);
+ idct_put(f, frame, x, y);
}
}
int buf_size = avpkt->size;
FourXContext *const f = avctx->priv_data;
AVFrame *picture = data;
- AVFrame *p;
int i, frame_4cc, frame_size, ret;
frame_4cc = AV_RL32(buf);
frame_size = buf_size - 12;
}
- FFSWAP(AVFrame*, f->current_picture, f->last_picture);
-
- p = f->current_picture;
- avctx->coded_frame = p;
-
// alternatively we would have to use our own buffer management
avctx->flags |= CODEC_FLAG_EMU_EDGE;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 1;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, picture, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (frame_4cc == AV_RL32("ifr2")) {
- p->pict_type = AV_PICTURE_TYPE_I;
- if ((ret = decode_i2_frame(f, buf - 4, frame_size + 4)) < 0)
+ picture->pict_type = AV_PICTURE_TYPE_I;
+ if ((ret = decode_i2_frame(f, picture, buf - 4, frame_size + 4)) < 0)
return ret;
} else if (frame_4cc == AV_RL32("ifrm")) {
- p->pict_type = AV_PICTURE_TYPE_I;
- if ((ret = decode_i_frame(f, buf, frame_size)) < 0)
+ picture->pict_type = AV_PICTURE_TYPE_I;
+ if ((ret = decode_i_frame(f, picture, buf, frame_size)) < 0)
return ret;
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
if (!f->last_picture->data[0]) {
- f->last_picture->reference = 1;
- if ((ret = ff_get_buffer(avctx, f->last_picture)) < 0) {
+ if ((ret = ff_get_buffer(avctx, f->last_picture,
+ AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
memset(f->last_picture->data[0], 0, avctx->height * FFABS(f->last_picture->linesize[0]));
}
- p->pict_type = AV_PICTURE_TYPE_P;
- if ((ret = decode_p_frame(f, buf, frame_size)) < 0)
+ picture->pict_type = AV_PICTURE_TYPE_P;
+ if ((ret = decode_p_frame(f, picture, buf, frame_size)) < 0)
return ret;
} else if (frame_4cc == AV_RL32("snd_")) {
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n",
buf_size);
}
- p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
+ picture->key_frame = picture->pict_type == AV_PICTURE_TYPE_I;
- *picture = *p;
+ av_frame_unref(f->last_picture);
+ if ((ret = av_frame_ref(f->last_picture, picture)) < 0)
+ return ret;
*got_frame = 1;
emms_c();
else
avctx->pix_fmt = AV_PIX_FMT_BGR555;
- f->current_picture = avcodec_alloc_frame();
- f->last_picture = avcodec_alloc_frame();
- if (!f->current_picture || !f->last_picture) {
- avcodec_free_frame(&f->current_picture);
- avcodec_free_frame(&f->last_picture);
+ f->last_picture = av_frame_alloc();
+ if (!f->last_picture)
return AVERROR(ENOMEM);
- }
return 0;
}
f->cfrm[i].allocated_size = 0;
}
ff_free_vlc(&f->pre_vlc);
- if (f->current_picture->data[0])
- avctx->release_buffer(avctx, f->current_picture);
- if (f->last_picture->data[0])
- avctx->release_buffer(avctx, f->last_picture);
- avcodec_free_frame(&f->current_picture);
- avcodec_free_frame(&f->last_picture);
+ av_frame_free(&f->last_picture);
return 0;
}
typedef struct EightBpsContext {
AVCodecContext *avctx;
- AVFrame pic;
unsigned char planes;
unsigned char planemap[4];
static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
EightBpsContext * const c = avctx->priv_data;
unsigned char *planemap = c->planemap;
int ret;
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- c->pic.reference = 0;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* Decode a plane */
for (row = 0; row < height; row++) {
- pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
- pixptr_end = pixptr + c->pic.linesize[0];
+ pixptr = frame->data[0] + row * frame->linesize[0] + planemap[p];
+ pixptr_end = pixptr + frame->linesize[0];
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
/* Decode a row of this plane */
while (dlen > 0) {
AV_PKT_DATA_PALETTE,
NULL);
if (pal) {
- c->pic.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE);
}
- memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
+ memcpy (frame->data[1], c->pal, AVPALETTE_SIZE);
}
*got_frame = 1;
- *(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return buf_size;
EightBpsContext * const c = avctx->priv_data;
c->avctx = avctx;
- c->pic.data[0] = NULL;
switch (avctx->bits_per_coded_sample) {
case 8:
return 0;
}
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- EightBpsContext * const c = avctx->priv_data;
-
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- return 0;
-}
-
AVCodec ff_eightbps_decoder = {
.name = "8bps",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_8BPS,
.priv_data_size = sizeof(EightBpsContext),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"),
/* get output buffer */
frame->nb_samples = buf_size * (is_compr + 1);
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
/* get output buffer */
+ av_frame_unref(ac->frame);
ac->frame->nb_samples = 2048;
- if ((ret = ff_get_buffer(avctx, ac->frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, ac->frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include <string.h>
#include "avcodec.h"
+#include "internal.h"
#include "msrledec.h"
typedef struct AascContext {
AVCodecContext *avctx;
GetByteContext gb;
- AVFrame frame;
+ AVFrame *frame;
} AascContext;
static av_cold int aasc_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_BGR24;
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
return 0;
}
AascContext *s = avctx->priv_data;
int compr, i, stride, ret;
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
case 0:
stride = (avctx->width * 3 + 3) & ~3;
for (i = avctx->height - 1; i >= 0; i--) {
- memcpy(s->frame.data[0] + i * s->frame.linesize[0], buf, avctx->width * 3);
+ memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * 3);
buf += stride;
}
break;
case 1:
bytestream2_init(&s->gb, buf, buf_size);
- ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
+ ff_msrle_decode(avctx, (AVPicture*)s->frame, 8, &s->gb);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
}
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(data, s->frame)) < 0)
+ return ret;
/* report that the buffer was completely consumed */
return buf_size;
{
AascContext *s = avctx->priv_data;
- /* release the last frame */
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_free(&s->frame);
return 0;
}
/* get output buffer */
frame->nb_samples = s->num_blocks * 256;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = nb_samples;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = num_blocks * BLOCK_SAMPLES;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (!alac->nb_samples) {
/* get output buffer */
frame->nb_samples = output_samples;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = ctx->cur_frame_length;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = AMR_BLOCK_SIZE;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = 4 * AMRWB_SFR_SIZE_16k;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
typedef struct AnmContext {
- AVFrame frame;
+ AVFrame *frame;
int palette[AVPALETTE_COUNT];
GetByteContext gb;
int x; ///< x coordinate position
avctx->pix_fmt = AV_PIX_FMT_PAL8;
- s->frame.reference = 1;
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
if (bytestream2_get_bytes_left(&s->gb) < 16 * 8 + 4 * 256)
return AVERROR_INVALIDDATA;
uint8_t *dst, *dst_end;
int count, ret;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0){
+ if ((ret = ff_reget_buffer(avctx, s->frame)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- dst = s->frame.data[0];
- dst_end = s->frame.data[0] + s->frame.linesize[0]*avctx->height;
+ dst = s->frame->data[0];
+ dst_end = s->frame->data[0] + s->frame->linesize[0]*avctx->height;
bytestream2_init(&s->gb, avpkt->data, buf_size);
do {
/* if statements are ordered by probability */
#define OP(gb, pixel, count) \
- op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame.linesize[0])
+ op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame->linesize[0])
int type = bytestream2_get_byte(&s->gb);
count = type & 0x7F;
}
} while (bytestream2_get_bytes_left(&s->gb) > 0);
- memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(data, s->frame)) < 0)
+ return ret;
+
return buf_size;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
AnmContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+
+ av_frame_free(&s->frame);
return 0;
}
*/
#include "libavutil/common.h"
+#include "libavutil/frame.h"
#include "libavutil/lfg.h"
#include "avcodec.h"
#include "cga_data.h"
};
typedef struct {
- AVFrame frame;
+ AVFrame *frame;
int x; /**< x cursor position (pixels) */
int y; /**< y cursor position (pixels) */
int sx; /**< saved x cursor position (pixels) */
AnsiContext *s = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
/* defaults */
s->font = ff_vga16_font;
s->font_height = 16;
i = 0;
for (; i < avctx->height - s->font_height; i++)
- memcpy(s->frame.data[0] + i * s->frame.linesize[0],
- s->frame.data[0] + (i + s->font_height) * s->frame.linesize[0],
+ memcpy(s->frame->data[0] + i * s->frame->linesize[0],
+ s->frame->data[0] + (i + s->font_height) * s->frame->linesize[0],
avctx->width);
for (; i < avctx->height; i++)
- memset(s->frame.data[0] + i * s->frame.linesize[0],
+ memset(s->frame->data[0] + i * s->frame->linesize[0],
DEFAULT_BG_COLOR, avctx->width);
}
AnsiContext *s = avctx->priv_data;
int i;
for (i = 0; i < s->font_height; i++)
- memset(s->frame.data[0] + (s->y + i)*s->frame.linesize[0] + xoffset,
+ memset(s->frame->data[0] + (s->y + i)*s->frame->linesize[0] + xoffset,
DEFAULT_BG_COLOR, xlength);
}
AnsiContext *s = avctx->priv_data;
int i;
for (i = 0; i < avctx->height; i++)
- memset(s->frame.data[0] + i * s->frame.linesize[0], DEFAULT_BG_COLOR, avctx->width);
+ memset(s->frame->data[0] + i * s->frame->linesize[0], DEFAULT_BG_COLOR, avctx->width);
s->x = s->y = 0;
}
FFSWAP(int, fg, bg);
if ((s->attributes & ATTR_CONCEALED))
fg = bg;
- ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x,
- s->frame.linesize[0], s->font, s->font_height, c, fg, bg);
+ ff_draw_pc_font(s->frame->data[0] + s->y * s->frame->linesize[0] + s->x,
+ s->frame->linesize[0], s->font, s->font_height, c, fg, bg);
s->x += FONT_WIDTH;
if (s->x >= avctx->width) {
s->x = 0;
av_log_ask_for_sample(avctx, "unsupported screen mode\n");
}
if (width != avctx->width || height != avctx->height) {
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(s->frame);
avcodec_set_dimensions(avctx, width, height);
- ret = ff_get_buffer(avctx, &s->frame);
+ ret = ff_get_buffer(avctx, s->frame, AV_GET_BUFFER_FLAG_REF);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.palette_has_changed = 1;
- memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);
+ s->frame->pict_type = AV_PICTURE_TYPE_I;
+ s->frame->palette_has_changed = 1;
+ memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);
erase_screen(avctx);
} else if (c == 'l') {
erase_screen(avctx);
case 0:
erase_line(avctx, s->x, avctx->width - s->x);
if (s->y < avctx->height - s->font_height)
- memset(s->frame.data[0] + (s->y + s->font_height)*s->frame.linesize[0],
- DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame.linesize[0]);
+ memset(s->frame->data[0] + (s->y + s->font_height)*s->frame->linesize[0],
+ DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame->linesize[0]);
break;
case 1:
erase_line(avctx, 0, s->x);
if (s->y > 0)
- memset(s->frame.data[0], DEFAULT_BG_COLOR, s->y * s->frame.linesize[0]);
+ memset(s->frame->data[0], DEFAULT_BG_COLOR, s->y * s->frame->linesize[0]);
break;
case 2:
erase_screen(avctx);
const uint8_t *buf_end = buf+buf_size;
int ret, i, count;
- ret = avctx->reget_buffer(avctx, &s->frame);
+ ret = ff_reget_buffer(avctx, s->frame);
if (ret < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (!avctx->frame_number) {
- memset(s->frame.data[0], 0, avctx->height * FFABS(s->frame.linesize[0]));
- memset(s->frame.data[1], 0, AVPALETTE_SIZE);
+ memset(s->frame->data[0], 0, avctx->height * FFABS(s->frame->linesize[0]));
+ memset(s->frame->data[1], 0, AVPALETTE_SIZE);
}
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.palette_has_changed = 1;
- memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);
+ s->frame->pict_type = AV_PICTURE_TYPE_I;
+ s->frame->palette_has_changed = 1;
+ memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);
while(buf < buf_end) {
switch(s->state) {
}
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(data, s->frame)) < 0)
+ return ret;
return buf_size;
}
static av_cold int decode_close(AVCodecContext *avctx)
{
AnsiContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+
+ av_frame_free(&s->frame);
return 0;
}
/* get output buffer */
frame->nb_samples = blockstodecode;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
return 0;
}
-static inline void idct_put(ASV1Context *a, int mb_x, int mb_y)
+static inline void idct_put(ASV1Context *a, AVFrame *frame, int mb_x, int mb_y)
{
int16_t (*block)[64] = a->block;
- int linesize = a->picture.linesize[0];
+ int linesize = frame->linesize[0];
- uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
- uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
- uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
+ uint8_t *dest_y = frame->data[0] + (mb_y * 16* linesize ) + mb_x * 16;
+ uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
+ uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
a->dsp.idct_put(dest_y , linesize, block[0]);
a->dsp.idct_put(dest_y + 8, linesize, block[1]);
a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);
if (!(a->avctx->flags&CODEC_FLAG_GRAY)) {
- a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]);
- a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]);
+ a->dsp.idct_put(dest_cb, frame->linesize[1], block[4]);
+ a->dsp.idct_put(dest_cr, frame->linesize[2], block[5]);
}
}
ASV1Context * const a = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- AVFrame *picture = data;
- AVFrame * const p = &a->picture;
+ AVFrame * const p = data;
int mb_x, mb_y, ret;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
- idct_put(a, mb_x, mb_y);
+ idct_put(a, p, mb_x, mb_y);
}
}
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
- idct_put(a, mb_x, mb_y);
+ idct_put(a, p, mb_x, mb_y);
}
}
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
- idct_put(a, mb_x, mb_y);
+ idct_put(a, p, mb_x, mb_y);
}
}
- *picture = a->picture;
*got_frame = 1;
emms_c();
static av_cold int decode_init(AVCodecContext *avctx)
{
ASV1Context * const a = avctx->priv_data;
- AVFrame *p = &a->picture;
const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
int i;
a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
}
- p->qstride = a->mb_width;
- p->qscale_table = av_malloc(p->qstride * a->mb_height);
- p->quality = (32 * scale + a->inv_qscale / 2) / a->inv_qscale;
- memset(p->qscale_table, p->quality, p->qstride * a->mb_height);
-
return 0;
}
ASV1Context * const a = avctx->priv_data;
av_freep(&a->bitstream_buffer);
- av_freep(&a->picture.qscale_table);
a->bitstream_buffer_size = 0;
- if (a->picture.data[0])
- avctx->release_buffer(avctx, &a->picture);
-
return 0;
}
/* get output buffer */
frame->nb_samples = AT1_SU_SAMPLES;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = SAMPLES_PER_FRAME;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "internal.h"
#include "libavutil/internal.h"
-typedef struct AuraDecodeContext {
- AVCodecContext *avctx;
- AVFrame frame;
-} AuraDecodeContext;
-
static av_cold int aura_decode_init(AVCodecContext *avctx)
{
- AuraDecodeContext *s = avctx->priv_data;
-
- s->avctx = avctx;
/* width needs to be divisible by 4 for this codec to work */
if (avctx->width & 0x3)
return AVERROR(EINVAL);
void *data, int *got_frame,
AVPacket *pkt)
{
- AuraDecodeContext *s = avctx->priv_data;
+ AVFrame *frame = data;
uint8_t *Y, *U, *V;
uint8_t val;
int x, y, ret;
/* pixel data starts 48 bytes in, after 3x16-byte tables */
buf += 48;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- s->frame.reference = 0;
- if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- Y = s->frame.data[0];
- U = s->frame.data[1];
- V = s->frame.data[2];
+ Y = frame->data[0];
+ U = frame->data[1];
+ V = frame->data[2];
/* iterate through each line in the height */
for (y = 0; y < avctx->height; y++) {
Y[1] = Y[ 0] + delta_table[val & 0xF];
Y += 2; U++; V++;
}
- Y += s->frame.linesize[0] - avctx->width;
- U += s->frame.linesize[1] - (avctx->width >> 1);
- V += s->frame.linesize[2] - (avctx->width >> 1);
+ Y += frame->linesize[0] - avctx->width;
+ U += frame->linesize[1] - (avctx->width >> 1);
+ V += frame->linesize[2] - (avctx->width >> 1);
}
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return pkt->size;
}
-static av_cold int aura_decode_end(AVCodecContext *avctx)
-{
- AuraDecodeContext *s = avctx->priv_data;
-
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- return 0;
-}
-
AVCodec ff_aura2_decoder = {
.name = "aura2",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AURA2,
- .priv_data_size = sizeof(AuraDecodeContext),
.init = aura_decode_init,
- .close = aura_decode_end,
.decode = aura_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Auravision Aura 2"),
#define FF_QSCALE_TYPE_H264 2
#define FF_QSCALE_TYPE_VP56 3
+#if FF_API_GET_BUFFER
#define FF_BUFFER_TYPE_INTERNAL 1
#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user)
#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+#endif
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
/**
* @defgroup lavc_packet AVPacket
*/
enum AVSampleFormat request_sample_fmt;
+#if FF_API_GET_BUFFER
/**
* Called at the beginning of each frame to get a buffer for it.
*
*
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
+ *
+ * @deprecated use get_buffer2()
*/
+ attribute_deprecated
int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
/**
* but not by more than one thread at once, so does not need to be reentrant.
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
+ *
+ * @deprecated custom freeing callbacks should be set from get_buffer2()
*/
+ attribute_deprecated
void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
/**
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
*/
+ attribute_deprecated
int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
+#endif
+ /**
+ * This callback is called at the beginning of each frame to get data
+ * buffer(s) for it. There may be one contiguous buffer for all the data or
+ * there may be a buffer per each data plane or anything in between. Each
+ * buffer must be reference-counted using the AVBuffer API.
+ *
+ * The following fields will be set in the frame before this callback is
+ * called:
+ * - format
+ * - width, height (video only)
+ * - sample_rate, channel_layout, nb_samples (audio only)
+ * Their values may differ from the corresponding values in
+ * AVCodecContext. This callback must use the frame values, not the codec
+ * context values, to calculate the required buffer size.
+ *
+ * This callback must fill the following fields in the frame:
+ * - data[]
+ * - linesize[]
+ * - extended_data:
+ * * if the data is planar audio with more than 8 channels, then this
+ * callback must allocate and fill extended_data to contain all pointers
+ * to all data planes. data[] must hold as many pointers as it can.
+ * extended_data must be allocated with av_malloc() and will be freed in
+ * av_frame_unref().
+ * * otherwise exended_data must point to data
+ * - buf[] must contain references to the buffers that contain the frame
+ * data.
+ * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+ * this callback and filled with the extra buffers if there are more
+ * buffers than buf[] can hold. extended_buf will be freed in
+ * av_frame_unref().
+ *
+ * If CODEC_CAP_DR1 is not set then get_buffer2() must call
+ * avcodec_default_get_buffer2() instead of providing buffers allocated by
+ * some other means.
+ *
+ * Each data plane must be aligned to the maximum required by the target
+ * CPU.
+ *
+ * @see avcodec_default_get_buffer2()
+ *
+ * Video:
+ *
+ * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
+ *
+ * If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain an
+ * edge of the size returned by avcodec_get_edge_width() on all sides.
+ *
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * If frame multithreading is used and thread_safe_callbacks is set,
+ * this callback may be called from a different thread, but not from more
+ * than one at once. Does not need to be reentrant.
+ *
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
+
+ /**
+ * If non-zero, the decoded audio and video frames returned from
+ * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted
+ * and are valid indefinitely. The caller must free them with
+ * av_frame_unref() when they are not needed anymore.
+ * Otherwise, the decoded frames must not be freed by the caller and are
+ * only valid until the next decode call.
+ *
+ * - encoding: unused
+ * - decoding: set by the caller before avcodec_open2().
+ */
+ int refcounted_frames;
/* - encoding parameters */
float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
*/
AVCodec *avcodec_find_decoder_by_name(const char *name);
-int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
-void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
-int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+#if FF_API_GET_BUFFER
+attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+#endif
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders without
+ * CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
/**
* Return the amount of padding in pixels which the get_buffer callback must
*/
void avcodec_flush_buffers(AVCodecContext *avctx);
-void avcodec_default_free_buffers(AVCodecContext *s);
-
/**
* Return codec bits per sample.
*
#include "avcodec.h"
#include "get_bits.h"
+#include "internal.h"
typedef struct {
AvsBlockType type;
GetBitContext change_map;
- if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
- p->reference = 1;
p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
align_get_bits(&change_map);
}
- *picture = avs->picture;
+ if ((ret = av_frame_ref(picture, &avs->picture)) < 0)
+ return ret;
*got_frame = 1;
return buf_size;
static av_cold int avs_decode_end(AVCodecContext *avctx)
{
AvsContext *s = avctx->priv_data;
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
+ av_frame_unref(&s->picture);
return 0;
}
#include "avcodec.h"
#include "bethsoftvideo.h"
#include "bytestream.h"
+#include "internal.h"
typedef struct BethsoftvidContext {
AVFrame frame;
static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx)
{
BethsoftvidContext *vid = avctx->priv_data;
- vid->frame.reference = 1;
- vid->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
return 0;
}
int code, ret;
int yoffset;
- if ((ret = avctx->reget_buffer(avctx, &vid->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &vid->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
}
end:
+ if ((ret = av_frame_ref(data, &vid->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = vid->frame;
return avpkt->size;
}
static av_cold int bethsoftvid_decode_end(AVCodecContext *avctx)
{
BethsoftvidContext * vid = avctx->priv_data;
- if(vid->frame.data[0])
- avctx->release_buffer(avctx, &vid->frame);
+ av_frame_unref(&vid->frame);
return 0;
}
typedef struct BFIContext {
AVCodecContext *avctx;
- AVFrame frame;
uint8_t *dst;
} BFIContext;
static int bfi_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
+ AVFrame *frame = data;
GetByteContext g;
int buf_size = avpkt->size;
BFIContext *bfi = avctx->priv_data;
uint32_t *pal;
int i, j, ret, height = avctx->height;
- if (bfi->frame.data[0])
- avctx->release_buffer(avctx, &bfi->frame);
-
- bfi->frame.reference = 1;
-
- if ((ret = ff_get_buffer(avctx, &bfi->frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* Set frame parameters and palette, if necessary */
if (!avctx->frame_number) {
- bfi->frame.pict_type = AV_PICTURE_TYPE_I;
- bfi->frame.key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
/* Setting the palette */
if (avctx->extradata_size > 768) {
av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n");
return AVERROR_INVALIDDATA;
}
- pal = (uint32_t *)bfi->frame.data[1];
+ pal = (uint32_t *)frame->data[1];
for (i = 0; i < avctx->extradata_size / 3; i++) {
int shift = 16;
*pal = 0;
(avctx->extradata[i * 3 + j] >> 4)) << shift;
pal++;
}
- bfi->frame.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
} else {
- bfi->frame.pict_type = AV_PICTURE_TYPE_P;
- bfi->frame.key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ frame->key_frame = 0;
}
bytestream2_skip(&g, 4); // Unpacked size, not required.
}
src = bfi->dst;
- dst = bfi->frame.data[0];
+ dst = frame->data[0];
while (height--) {
memcpy(dst, src, avctx->width);
src += avctx->width;
- dst += bfi->frame.linesize[0];
+ dst += frame->linesize[0];
}
*got_frame = 1;
- *(AVFrame *)data = bfi->frame;
+
return buf_size;
}
static av_cold int bfi_decode_close(AVCodecContext *avctx)
{
BFIContext *bfi = avctx->priv_data;
- if (bfi->frame.data[0])
- avctx->release_buffer(avctx, &bfi->frame);
av_free(bfi->dst);
return 0;
}
AVCodecContext *avctx;
DSPContext dsp;
BinkDSPContext bdsp;
- AVFrame *pic, *last;
+ AVFrame *last;
int version; ///< internal Bink file version
int has_alpha;
int swap_planes;
memcpy(dst + i*stride, tmp + i*8, 8);
}
-static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
- int is_key, int is_chroma)
+static int binkb_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb,
+ int plane_idx, int is_key, int is_chroma)
{
int blk, ret;
int i, j, bx, by;
int ybias = is_key ? -15 : 0;
int qp;
- const int stride = c->pic->linesize[plane_idx];
+ const int stride = frame->linesize[plane_idx];
int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3;
int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3;
binkb_init_bundles(c);
- ref_start = c->pic->data[plane_idx];
- ref_end = c->pic->data[plane_idx] + (bh * c->pic->linesize[plane_idx] + bw) * 8;
+ ref_start = frame->data[plane_idx];
+ ref_end = frame->data[plane_idx] + (bh * frame->linesize[plane_idx] + bw) * 8;
for (i = 0; i < 64; i++)
coordmap[i] = (i & 7) + (i >> 3) * stride;
return ret;
}
- dst = c->pic->data[plane_idx] + 8*by*stride;
+ dst = frame->data[plane_idx] + 8*by*stride;
for (bx = 0; bx < bw; bx++, dst += 8) {
blk = binkb_get_value(c, BINKB_SRC_BLOCK_TYPES);
switch (blk) {
return 0;
}
-static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
- int is_chroma)
+static int bink_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb,
+ int plane_idx, int is_chroma)
{
int blk, ret;
int i, j, bx, by;
LOCAL_ALIGNED_16(int32_t, dctblock, [64]);
int coordmap[64];
- const int stride = c->pic->linesize[plane_idx];
+ const int stride = frame->linesize[plane_idx];
int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3;
int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3;
int width = c->avctx->width >> is_chroma;
read_bundle(gb, c, i);
ref_start = c->last->data[plane_idx] ? c->last->data[plane_idx]
- : c->pic->data[plane_idx];
+ : frame->data[plane_idx];
ref_end = ref_start
+ (bw - 1 + c->last->linesize[plane_idx] * (bh - 1)) * 8;
if (by == bh)
break;
- dst = c->pic->data[plane_idx] + 8*by*stride;
+ dst = frame->data[plane_idx] + 8*by*stride;
prev = (c->last->data[plane_idx] ? c->last->data[plane_idx]
- : c->pic->data[plane_idx]) + 8*by*stride;
+ : frame->data[plane_idx]) + 8*by*stride;
for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) {
blk = get_value(c, BINK_SRC_BLOCK_TYPES);
// 16x16 block type on odd line means part of the already decoded block, so skip it
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
{
BinkContext * const c = avctx->priv_data;
+ AVFrame *frame = data;
GetBitContext gb;
int plane, plane_idx, ret;
int bits_count = pkt->size << 3;
if (c->version > 'b') {
- if(c->pic->data[0])
- avctx->release_buffer(avctx, c->pic);
-
- if ((ret = ff_get_buffer(avctx, c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
} else {
- if ((ret = avctx->reget_buffer(avctx, c->pic)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, c->last)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
+ if ((ret = av_frame_ref(frame, c->last)) < 0)
+ return ret;
}
init_get_bits(&gb, pkt->data, bits_count);
if (c->has_alpha) {
if (c->version >= 'i')
skip_bits_long(&gb, 32);
- if ((ret = bink_decode_plane(c, &gb, 3, 0)) < 0)
+ if ((ret = bink_decode_plane(c, frame, &gb, 3, 0)) < 0)
return ret;
}
if (c->version >= 'i')
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
if (c->version > 'b') {
- if ((ret = bink_decode_plane(c, &gb, plane_idx, !!plane)) < 0)
+ if ((ret = bink_decode_plane(c, frame, &gb, plane_idx, !!plane)) < 0)
return ret;
} else {
- if ((ret = binkb_decode_plane(c, &gb, plane_idx,
+ if ((ret = binkb_decode_plane(c, frame, &gb, plane_idx,
!avctx->frame_number, !!plane)) < 0)
return ret;
}
}
emms_c();
- *got_frame = 1;
- *(AVFrame*)data = *c->pic;
+ if (c->version > 'b') {
+ av_frame_unref(c->last);
+ if ((ret = av_frame_ref(c->last, frame)) < 0)
+ return ret;
+ }
- if (c->version > 'b')
- FFSWAP(AVFrame*, c->pic, c->last);
+ *got_frame = 1;
/* always report that the buffer was completely consumed */
return pkt->size;
}
c->avctx = avctx;
- c->pic = avcodec_alloc_frame();
- c->last = avcodec_alloc_frame();
- if (!c->pic || !c->last) {
- avcodec_free_frame(&c->pic);
- avcodec_free_frame(&c->last);
+ c->last = av_frame_alloc();
+ if (!c->last)
return AVERROR(ENOMEM);
- }
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
return ret;
{
BinkContext * const c = avctx->priv_data;
- if (c->pic->data[0])
- avctx->release_buffer(avctx, c->pic);
- if (c->last->data[0])
- avctx->release_buffer(avctx, c->last);
- avcodec_free_frame(&c->pic);
- avcodec_free_frame(&c->last);
+ av_frame_free(&c->last);
free_bundles(c);
return 0;
/* get output buffer */
frame->nb_samples = s->frame_len;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "internal.h"
#include "msrledec.h"
-static av_cold int bmp_decode_init(AVCodecContext *avctx)
-{
- BMPContext *s = avctx->priv_data;
-
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame = &s->picture;
-
- return 0;
-}
-
static int bmp_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- BMPContext *s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame *p = &s->picture;
+ AVFrame *p = data;
unsigned int fsize, hsize;
int width, height;
unsigned int depth;
return AVERROR_INVALIDDATA;
}
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
}
- *picture = s->picture;
*got_frame = 1;
return buf_size;
}
-static av_cold int bmp_decode_end(AVCodecContext *avctx)
-{
- BMPContext* c = avctx->priv_data;
-
- if (c->picture.data[0])
- avctx->release_buffer(avctx, &c->picture);
-
- return 0;
-}
-
AVCodec ff_bmp_decoder = {
.name = "bmp",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_BMP,
- .priv_data_size = sizeof(BMPContext),
- .init = bmp_decode_init,
- .close = bmp_decode_end,
.decode = bmp_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"),
typedef struct BMVDecContext {
AVCodecContext *avctx;
- AVFrame pic;
uint8_t *frame, frame_base[SCREEN_WIDE * (SCREEN_HIGH + 1)];
uint32_t pal[256];
AVPacket *pkt)
{
BMVDecContext * const c = avctx->priv_data;
+ AVFrame *frame = data;
int type, scr_off;
int i, ret;
uint8_t *srcptr, *outptr;
scr_off = 0;
}
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- c->pic.reference = 3;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
return AVERROR_INVALIDDATA;
}
- memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
- c->pic.palette_has_changed = type & BMV_PALETTE;
+ memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
+ frame->palette_has_changed = type & BMV_PALETTE;
- outptr = c->pic.data[0];
+ outptr = frame->data[0];
srcptr = c->frame;
for (i = 0; i < avctx->height; i++) {
memcpy(outptr, srcptr, avctx->width);
srcptr += avctx->width;
- outptr += c->pic.linesize[0];
+ outptr += frame->linesize[0];
}
*got_frame = 1;
- *(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return pkt->size;
return 0;
}
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- BMVDecContext *c = avctx->priv_data;
-
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- return 0;
-}
-
static const int bmv_aud_mults[16] = {
16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32
};
/* get output buffer */
frame->nb_samples = total_blocks * 32;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
.id = AV_CODEC_ID_BMV_VIDEO,
.priv_data_size = sizeof(BMVDecContext),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV video"),
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
typedef struct {
AVFrame pictures[2];
{
C93DecoderContext * const c93 = avctx->priv_data;
- if (c93->pictures[0].data[0])
- avctx->release_buffer(avctx, &c93->pictures[0]);
- if (c93->pictures[1].data[0])
- avctx->release_buffer(avctx, &c93->pictures[1]);
+ av_frame_unref(&c93->pictures[0]);
+ av_frame_unref(&c93->pictures[1]);
+
return 0;
}
C93DecoderContext * const c93 = avctx->priv_data;
AVFrame * const newpic = &c93->pictures[c93->currentpic];
AVFrame * const oldpic = &c93->pictures[c93->currentpic^1];
- AVFrame *picture = data;
GetByteContext gb;
uint8_t *out;
int stride, ret, i, x, y, b, bt = 0;
c93->currentpic ^= 1;
- newpic->reference = 1;
- newpic->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE;
- if ((ret = avctx->reget_buffer(avctx, newpic)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, newpic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
}
- *picture = *newpic;
+ if ((ret = av_frame_ref(data, newpic)) < 0)
+ return ret;
*got_frame = 1;
return buf_size;
h->avctx = avctx;
avctx->pix_fmt= AV_PIX_FMT_YUV420P;
- h->cur.f = avcodec_alloc_frame();
- h->DPB[0].f = avcodec_alloc_frame();
- h->DPB[1].f = avcodec_alloc_frame();
+ h->cur.f = av_frame_alloc();
+ h->DPB[0].f = av_frame_alloc();
+ h->DPB[1].f = av_frame_alloc();
if (!h->cur.f || !h->DPB[0].f || !h->DPB[1].f) {
ff_cavs_end(avctx);
return AVERROR(ENOMEM);
av_cold int ff_cavs_end(AVCodecContext *avctx) {
AVSContext *h = avctx->priv_data;
- if (h->cur.f->data[0])
- avctx->release_buffer(avctx, h->cur.f);
- if (h->DPB[0].f->data[0])
- avctx->release_buffer(avctx, h->DPB[0].f);
- if (h->DPB[1].f->data[0])
- avctx->release_buffer(avctx, h->DPB[1].f);
- avcodec_free_frame(&h->cur.f);
- avcodec_free_frame(&h->DPB[0].f);
- avcodec_free_frame(&h->DPB[1].f);
+ av_frame_free(&h->cur.f);
+ av_frame_free(&h->DPB[0].f);
+ av_frame_free(&h->DPB[1].f);
av_free(h->top_qp);
av_free(h->top_mv[0]);
int skip_count = -1;
enum cavs_mb mb_type;
+ av_frame_unref(h->cur.f);
+
skip_bits(&h->gb, 16);//bbv_dwlay
if (h->stc == PIC_PB_START_CODE) {
h->cur.f->pict_type = get_bits(&h->gb, 2) + AV_PICTURE_TYPE_I;
if (h->stream_revision > 0)
skip_bits(&h->gb, 1); //marker_bit
}
- /* release last B frame */
- if (h->cur.f->data[0])
- h->avctx->release_buffer(h->avctx, h->cur.f);
- ff_get_buffer(h->avctx, h->cur.f);
+ ff_get_buffer(h->avctx, h->cur.f, h->cur.f->pict_type == AV_PICTURE_TYPE_B ?
+ 0 : AV_GET_BUFFER_FLAG_REF);
if (!h->edge_emu_buffer) {
int alloc_size = FFALIGN(FFABS(h->cur.f->linesize[0]) + 32, 32);
} while (ff_cavs_next_mb(h));
}
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
- if (h->DPB[1].f->data[0])
- h->avctx->release_buffer(h->avctx, h->DPB[1].f);
+ av_frame_unref(h->DPB[1].f);
FFSWAP(AVSFrame, h->cur, h->DPB[1]);
FFSWAP(AVSFrame, h->DPB[0], h->DPB[1]);
}
AVSContext *h = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- AVFrame *picture = data;
uint32_t stc = -1;
- int input_size;
+ int input_size, ret;
const uint8_t *buf_end;
const uint8_t *buf_ptr;
if (buf_size == 0) {
if (!h->low_delay && h->DPB[0].f->data[0]) {
*got_frame = 1;
- *picture = *h->DPB[0].f;
- if (h->cur.f->data[0])
- avctx->release_buffer(avctx, h->cur.f);
- FFSWAP(AVSFrame, h->cur, h->DPB[0]);
+ av_frame_move_ref(data, h->DPB[0].f);
}
return 0;
}
break;
case PIC_I_START_CODE:
if (!h->got_keyframe) {
- if(h->DPB[0].f->data[0])
- avctx->release_buffer(avctx, h->DPB[0].f);
- if(h->DPB[1].f->data[0])
- avctx->release_buffer(avctx, h->DPB[1].f);
+ av_frame_unref(h->DPB[0].f);
+ av_frame_unref(h->DPB[1].f);
h->got_keyframe = 1;
}
case PIC_PB_START_CODE:
*got_frame = 1;
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
if (h->DPB[1].f->data[0]) {
- *picture = *h->DPB[1].f;
+ if ((ret = av_frame_ref(data, h->DPB[1].f)) < 0)
+ return ret;
} else {
*got_frame = 0;
}
- } else
- *picture = *h->cur.f;
+ } else {
+ av_frame_move_ref(data, h->cur.f);
+ }
break;
case EXT_START_CODE:
//mpeg_decode_extension(avctx, buf_ptr, input_size);
#define CDG_PALETTE_SIZE 16
typedef struct CDGraphicsContext {
- AVFrame frame;
+ AVFrame *frame;
int hscroll;
int vscroll;
} CDGraphicsContext;
-static void cdg_init_frame(AVFrame *frame)
-{
- avcodec_get_frame_defaults(frame);
- frame->reference = 3;
- frame->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_READABLE |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
-}
-
static av_cold int cdg_decode_init(AVCodecContext *avctx)
{
CDGraphicsContext *cc = avctx->priv_data;
- cdg_init_frame(&cc->frame);
+ cc->frame = av_frame_alloc();
+ if (!cc->frame)
+ return AVERROR(ENOMEM);
avctx->width = CDG_FULL_WIDTH;
avctx->height = CDG_FULL_HEIGHT;
static void cdg_border_preset(CDGraphicsContext *cc, uint8_t *data)
{
int y;
- int lsize = cc->frame.linesize[0];
- uint8_t *buf = cc->frame.data[0];
+ int lsize = cc->frame->linesize[0];
+ uint8_t *buf = cc->frame->data[0];
int color = data[0] & 0x0F;
if (!(data[1] & 0x0F)) {
uint16_t color;
int i;
int array_offset = low ? 0 : 8;
- uint32_t *palette = (uint32_t *) cc->frame.data[1];
+ uint32_t *palette = (uint32_t *) cc->frame->data[1];
for (i = 0; i < 8; i++) {
color = (data[2 * i] << 6) + (data[2 * i + 1] & 0x3F);
b = ((color ) & 0x000F) * 17;
palette[i + array_offset] = r << 16 | g << 8 | b;
}
- cc->frame.palette_has_changed = 1;
+ cc->frame->palette_has_changed = 1;
}
static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b)
int color;
int x, y;
int ai;
- int stride = cc->frame.linesize[0];
- uint8_t *buf = cc->frame.data[0];
+ int stride = cc->frame->linesize[0];
+ uint8_t *buf = cc->frame->data[0];
ri = (data[2] & 0x1F) * CDG_TILE_HEIGHT + cc->vscroll;
ci = (data[3] & 0x3F) * CDG_TILE_WIDTH + cc->hscroll;
int color;
int hscmd, h_off, hinc, vscmd, v_off, vinc;
int y;
- int stride = cc->frame.linesize[0];
- uint8_t *in = cc->frame.data[0];
+ int stride = cc->frame->linesize[0];
+ uint8_t *in = cc->frame->data[0];
uint8_t *out = new_frame->data[0];
color = data[0] & 0x0F;
if (!hinc && !vinc)
return;
- memcpy(new_frame->data[1], cc->frame.data[1], CDG_PALETTE_SIZE * 4);
+ memcpy(new_frame->data[1], cc->frame->data[1], CDG_PALETTE_SIZE * 4);
for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++)
memcpy(out + FFMAX(0, hinc) + stride * y,
int ret;
uint8_t command, inst;
uint8_t cdg_data[CDG_DATA_SIZE];
- AVFrame new_frame;
+ AVFrame *frame = data;
CDGraphicsContext *cc = avctx->priv_data;
if (buf_size < CDG_MINIMUM_PKT_SIZE) {
return AVERROR(EINVAL);
}
- ret = avctx->reget_buffer(avctx, &cc->frame);
+ ret = ff_reget_buffer(avctx, cc->frame);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
if (!avctx->frame_number)
- memset(cc->frame.data[0], 0, cc->frame.linesize[0] * avctx->height);
+ memset(cc->frame->data[0], 0, cc->frame->linesize[0] * avctx->height);
command = bytestream_get_byte(&buf);
inst = bytestream_get_byte(&buf);
switch (inst) {
case CDG_INST_MEMORY_PRESET:
if (!(cdg_data[1] & 0x0F))
- memset(cc->frame.data[0], cdg_data[0] & 0x0F,
- cc->frame.linesize[0] * CDG_FULL_HEIGHT);
+ memset(cc->frame->data[0], cdg_data[0] & 0x0F,
+ cc->frame->linesize[0] * CDG_FULL_HEIGHT);
break;
case CDG_INST_LOAD_PAL_LO:
case CDG_INST_LOAD_PAL_HIGH:
return AVERROR(EINVAL);
}
- cdg_init_frame(&new_frame);
- ret = ff_get_buffer(avctx, &new_frame);
+ ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- cdg_scroll(cc, cdg_data, &new_frame, inst == CDG_INST_SCROLL_COPY);
- avctx->release_buffer(avctx, &cc->frame);
- cc->frame = new_frame;
+ cdg_scroll(cc, cdg_data, frame, inst == CDG_INST_SCROLL_COPY);
+ av_frame_unref(cc->frame);
+ ret = av_frame_ref(cc->frame, frame);
+ if (ret < 0)
+ return ret;
break;
default:
break;
}
+ if (!frame->data[0]) {
+ ret = av_frame_ref(frame, cc->frame);
+ if (ret < 0)
+ return ret;
+ }
*got_frame = 1;
} else {
*got_frame = 0;
buf_size = 0;
}
- *(AVFrame *) data = cc->frame;
return buf_size;
}
{
CDGraphicsContext *cc = avctx->priv_data;
- if (cc->frame.data[0])
- avctx->release_buffer(avctx, &cc->frame);
+ av_frame_free(&cc->frame);
return 0;
}
{
CDXLVideoContext *c = avctx->priv_data;
- avcodec_get_frame_defaults(&c->frame);
c->new_video_size = 0;
c->avctx = avctx;
}
}
-static void cdxl_decode_rgb(CDXLVideoContext *c)
+static void cdxl_decode_rgb(CDXLVideoContext *c, AVFrame *frame)
{
- uint32_t *new_palette = (uint32_t *)c->frame.data[1];
+ uint32_t *new_palette = (uint32_t *)frame->data[1];
import_palette(c, new_palette);
- import_format(c, c->frame.linesize[0], c->frame.data[0]);
+ import_format(c, frame->linesize[0], frame->data[0]);
}
-static void cdxl_decode_ham6(CDXLVideoContext *c)
+static void cdxl_decode_ham6(CDXLVideoContext *c, AVFrame *frame)
{
AVCodecContext *avctx = c->avctx;
uint32_t new_palette[16], r, g, b;
int x, y;
ptr = c->new_video;
- out = c->frame.data[0];
+ out = frame->data[0];
import_palette(c, new_palette);
import_format(c, avctx->width, c->new_video);
}
AV_WL24(out + x * 3, r | g | b);
}
- out += c->frame.linesize[0];
+ out += frame->linesize[0];
}
}
-static void cdxl_decode_ham8(CDXLVideoContext *c)
+static void cdxl_decode_ham8(CDXLVideoContext *c, AVFrame *frame)
{
AVCodecContext *avctx = c->avctx;
uint32_t new_palette[64], r, g, b;
int x, y;
ptr = c->new_video;
- out = c->frame.data[0];
+ out = frame->data[0];
import_palette(c, new_palette);
import_format(c, avctx->width, c->new_video);
}
AV_WL24(out + x * 3, r | g | b);
}
- out += c->frame.linesize[0];
+ out += frame->linesize[0];
}
}
int *got_frame, AVPacket *pkt)
{
CDXLVideoContext *c = avctx->priv_data;
- AVFrame * const p = &c->frame;
+ AVFrame * const p = data;
int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
const uint8_t *buf = pkt->data;
return AVERROR_PATCHWELCOME;
}
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (!c->new_video)
return AVERROR(ENOMEM);
if (c->bpp == 8)
- cdxl_decode_ham8(c);
+ cdxl_decode_ham8(c, p);
else
- cdxl_decode_ham6(c);
+ cdxl_decode_ham6(c, p);
} else {
- cdxl_decode_rgb(c);
+ cdxl_decode_rgb(c, p);
}
*got_frame = 1;
- *(AVFrame*)data = c->frame;
return buf_size;
}
CDXLVideoContext *c = avctx->priv_data;
av_free(c->new_video);
- if (c->frame.data[0])
- avctx->release_buffer(avctx, &c->frame);
return 0;
}
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
+#include "internal.h"
typedef struct {
s->data = buf;
s->size = buf_size;
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame))) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame))) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
if (s->palette_video)
memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE);
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return buf_size;
{
CinepakContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
#include "internal.h"
#include "put_bits.h"
-typedef struct CLJRContext {
- AVFrame picture;
-} CLJRContext;
-
-static av_cold int common_init(AVCodecContext *avctx)
-{
- CLJRContext * const a = avctx->priv_data;
-
- avctx->coded_frame = &a->picture;
-
- return 0;
-}
-
#if CONFIG_CLJR_DECODER
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- CLJRContext * const a = avctx->priv_data;
GetBitContext gb;
- AVFrame *picture = data;
- AVFrame * const p = &a->picture;
+ AVFrame * const p = data;
int x, y, ret;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
if (avctx->height <= 0 || avctx->width <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid width or height\n");
return AVERROR_INVALIDDATA;
return AVERROR_INVALIDDATA;
}
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
init_get_bits(&gb, buf, buf_size * 8);
for (y = 0; y < avctx->height; y++) {
- uint8_t *luma = &a->picture.data[0][y * a->picture.linesize[0]];
- uint8_t *cb = &a->picture.data[1][y * a->picture.linesize[1]];
- uint8_t *cr = &a->picture.data[2][y * a->picture.linesize[2]];
+ uint8_t *luma = &p->data[0][y * p->linesize[0]];
+ uint8_t *cb = &p->data[1][y * p->linesize[1]];
+ uint8_t *cr = &p->data[2][y * p->linesize[2]];
for (x = 0; x < avctx->width; x += 4) {
luma[3] = get_bits(&gb, 5) << 3;
luma[2] = get_bits(&gb, 5) << 3;
}
}
- *picture = a->picture;
*got_frame = 1;
return buf_size;
static av_cold int decode_init(AVCodecContext *avctx)
{
avctx->pix_fmt = AV_PIX_FMT_YUV411P;
- return common_init(avctx);
-}
-
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- CLJRContext *a = avctx->priv_data;
-
- if (a->picture.data[0])
- avctx->release_buffer(avctx, &a->picture);
return 0;
}
.name = "cljr",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CLJR,
- .priv_data_size = sizeof(CLJRContext),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Cirrus Logic AccuPak"),
#endif
#if CONFIG_CLJR_ENCODER
+typedef struct CLJRContext {
+ AVFrame picture;
+} CLJRContext;
+
+static av_cold int encode_init(AVCodecContext *avctx)
+{
+ CLJRContext * const a = avctx->priv_data;
+
+ avctx->coded_frame = &a->picture;
+
+ return 0;
+}
+
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *p, int *got_packet)
{
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CLJR,
.priv_data_size = sizeof(CLJRContext),
- .init = common_init,
+ .init = encode_init,
.encode2 = encode_frame,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE },
int *got_picture_ptr, AVPacket *avpkt)
{
CLLCContext *ctx = avctx->priv_data;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
uint8_t *src = avpkt->data;
uint32_t info_tag, info_offset;
int data_size;
GetBitContext gb;
int coding_type, ret;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
- pic->reference = 0;
-
/* Skip the INFO header if present */
info_offset = 0;
info_tag = AV_RL32(src);
avctx->pix_fmt = AV_PIX_FMT_RGB24;
avctx->bits_per_raw_sample = 8;
- ret = ff_get_buffer(avctx, pic);
+ ret = ff_get_buffer(avctx, pic, 0);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret;
avctx->pix_fmt = AV_PIX_FMT_ARGB;
avctx->bits_per_raw_sample = 8;
- ret = ff_get_buffer(avctx, pic);
+ ret = ff_get_buffer(avctx, pic, 0);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
*got_picture_ptr = 1;
- *(AVFrame *)data = *pic;
return avpkt->size;
}
{
CLLCContext *ctx = avctx->priv_data;
- if (avctx->coded_frame->data[0])
- avctx->release_buffer(avctx, avctx->coded_frame);
-
- av_freep(&avctx->coded_frame);
av_freep(&ctx->swapped_buf);
return 0;
ff_dsputil_init(&ctx->dsp, avctx);
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame) {
- av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
- return AVERROR(ENOMEM);
- }
-
return 0;
}
p->excitation, avctx->frame_size, p->order);
frame->nb_samples = avctx->frame_size;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
if (q->discarded_packets >= 2) {
frame->nb_samples = q->samples_per_channel;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "libavutil/lzo.h"
typedef struct {
- AVFrame pic;
int linelen, height, bpp;
unsigned int decomp_size;
unsigned char* decomp_buf;
return AVERROR_INVALIDDATA;
}
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
- c->pic.reference = 1;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
- FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
// flip upside down, add difference frame
if (buf[0] & 1) { // keyframe
- c->pic.pict_type = AV_PICTURE_TYPE_I;
- c->pic.key_frame = 1;
+ picture->pict_type = AV_PICTURE_TYPE_I;
+ picture->key_frame = 1;
switch (c->bpp) {
case 16:
- copy_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height);
+ copy_frame_16(picture, c->decomp_buf, c->linelen, c->height);
break;
case 32:
- copy_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height);
+ copy_frame_32(picture, c->decomp_buf, c->linelen, c->height);
break;
default:
- copy_frame_default(&c->pic, c->decomp_buf, FFALIGN(c->linelen, 4),
+ copy_frame_default(picture, c->decomp_buf, FFALIGN(c->linelen, 4),
c->linelen, c->height);
}
} else {
- c->pic.pict_type = AV_PICTURE_TYPE_P;
- c->pic.key_frame = 0;
+ picture->pict_type = AV_PICTURE_TYPE_P;
+ picture->key_frame = 0;
switch (c->bpp) {
case 16:
- add_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height);
+ add_frame_16(picture, c->decomp_buf, c->linelen, c->height);
break;
case 32:
- add_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height);
+ add_frame_32(picture, c->decomp_buf, c->linelen, c->height);
break;
default:
- add_frame_default(&c->pic, c->decomp_buf, FFALIGN(c->linelen, 4),
+ add_frame_default(picture, c->decomp_buf, FFALIGN(c->linelen, 4),
c->linelen, c->height);
}
}
- *picture = c->pic;
*got_frame = 1;
return buf_size;
}
return AVERROR_INVALIDDATA;
}
c->bpp = avctx->bits_per_coded_sample;
- c->pic.data[0] = NULL;
c->linelen = avctx->width * avctx->bits_per_coded_sample / 8;
c->height = avctx->height;
stride = c->linelen;
static av_cold int decode_end(AVCodecContext *avctx) {
CamStudioContext *c = avctx->priv_data;
av_freep(&c->decomp_buf);
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
return 0;
}
typedef struct CyuvDecodeContext {
AVCodecContext *avctx;
int width, height;
- AVFrame frame;
} CyuvDecodeContext;
static av_cold int cyuv_decode_init(AVCodecContext *avctx)
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
CyuvDecodeContext *s=avctx->priv_data;
+ AVFrame *frame = data;
unsigned char *y_plane;
unsigned char *u_plane;
/* pixel data starts 48 bytes in, after 3x16-byte tables */
stream_ptr = 48;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- s->frame.reference = 0;
- if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- y_plane = s->frame.data[0];
- u_plane = s->frame.data[1];
- v_plane = s->frame.data[2];
+ y_plane = frame->data[0];
+ u_plane = frame->data[1];
+ v_plane = frame->data[2];
/* iterate through each line in the height */
for (y_ptr = 0, u_ptr = 0, v_ptr = 0;
- y_ptr < (s->height * s->frame.linesize[0]);
- y_ptr += s->frame.linesize[0] - s->width,
- u_ptr += s->frame.linesize[1] - s->width / 4,
- v_ptr += s->frame.linesize[2] - s->width / 4) {
+ y_ptr < (s->height * frame->linesize[0]);
+ y_ptr += frame->linesize[0] - s->width,
+ u_ptr += frame->linesize[1] - s->width / 4,
+ v_ptr += frame->linesize[2] - s->width / 4) {
/* reset predictors */
cur_byte = buf[stream_ptr++];
}
*got_frame = 1;
- *(AVFrame*)data= s->frame;
return buf_size;
}
-static av_cold int cyuv_decode_end(AVCodecContext *avctx)
-{
- CyuvDecodeContext *s = avctx->priv_data;
-
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- return 0;
-}
-
#if CONFIG_AURA_DECODER
AVCodec ff_aura_decoder = {
.name = "aura",
.id = AV_CODEC_ID_AURA,
.priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init,
- .close = cyuv_decode_end,
.decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Auravision AURA"),
.id = AV_CODEC_ID_CYUV,
.priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init,
- .close = cyuv_decode_end,
.decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Creative YUV (CYUV)"),
/* get output buffer */
frame->nb_samples = 256 * (s->sample_blocks / 8);
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "libavutil/mem.h"
typedef struct DfaContext {
- AVFrame pic;
-
uint32_t pal[256];
uint8_t *frame_buf;
} DfaContext;
void *data, int *got_frame,
AVPacket *avpkt)
{
+ AVFrame *frame = data;
DfaContext *s = avctx->priv_data;
GetByteContext gb;
const uint8_t *buf = avpkt->data;
int ret;
int i, pal_elems;
- if (s->pic.data[0])
- avctx->release_buffer(avctx, &s->pic);
-
- if ((ret = ff_get_buffer(avctx, &s->pic))) {
+ if ((ret = ff_get_buffer(avctx, frame, 0))) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
s->pal[i] = bytestream2_get_be24(&gb) << 2;
s->pal[i] |= (s->pal[i] >> 6) & 0x333;
}
- s->pic.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
} else if (chunk_type <= 9) {
if (decoder[chunk_type - 2](&gb, s->frame_buf, avctx->width, avctx->height)) {
av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n",
}
buf = s->frame_buf;
- dst = s->pic.data[0];
+ dst = frame->data[0];
for (i = 0; i < avctx->height; i++) {
memcpy(dst, buf, avctx->width);
- dst += s->pic.linesize[0];
+ dst += frame->linesize[0];
buf += avctx->width;
}
- memcpy(s->pic.data[1], s->pal, sizeof(s->pal));
+ memcpy(frame->data[1], s->pal, sizeof(s->pal));
*got_frame = 1;
- *(AVFrame*)data = s->pic;
return avpkt->size;
}
{
DfaContext *s = avctx->priv_data;
- if (s->pic.data[0])
- avctx->release_buffer(avctx, &s->pic);
-
av_freep(&s->frame_buf);
return 0;
typedef struct DNXHDContext {
AVCodecContext *avctx;
- AVFrame picture;
GetBitContext gb;
int cid; ///< compression id
unsigned int width, height;
DNXHDContext *ctx = avctx->priv_data;
ctx->avctx = avctx;
- avctx->coded_frame = &ctx->picture;
- ctx->picture.type = AV_PICTURE_TYPE_I;
- ctx->picture.key_frame = 1;
return 0;
}
return 0;
}
-static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_size, int first_field)
+static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
+ const uint8_t *buf, int buf_size, int first_field)
{
static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 };
int i, cid;
}
if (buf[5] & 2) { /* interlaced */
ctx->cur_field = buf[5] & 1;
- ctx->picture.interlaced_frame = 1;
- ctx->picture.top_field_first = first_field ^ ctx->cur_field;
+ frame->interlaced_frame = 1;
+ frame->top_field_first = first_field ^ ctx->cur_field;
av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field);
}
av_dlog(ctx->avctx, "mb width %d, mb height %d\n", ctx->mb_width, ctx->mb_height);
- if ((ctx->height+15)>>4 == ctx->mb_height && ctx->picture.interlaced_frame)
+ if ((ctx->height+15)>>4 == ctx->mb_height && frame->interlaced_frame)
ctx->height <<= 1;
if (ctx->mb_height > 68 ||
- (ctx->mb_height<<ctx->picture.interlaced_frame) > (ctx->height+15)>>4) {
+ (ctx->mb_height << frame->interlaced_frame) > (ctx->height+15)>>4) {
av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height);
return -1;
}
dnxhd_decode_dct_block(ctx, block, n, qscale, 6, 8, 4);
}
-static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y)
+static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame, int x, int y)
{
int shift1 = ctx->bit_depth == 10;
- int dct_linesize_luma = ctx->picture.linesize[0];
- int dct_linesize_chroma = ctx->picture.linesize[1];
+ int dct_linesize_luma = frame->linesize[0];
+ int dct_linesize_chroma = frame->linesize[1];
uint8_t *dest_y, *dest_u, *dest_v;
int dct_y_offset, dct_x_offset;
int qscale, i;
ctx->decode_dct_block(ctx, ctx->blocks[i], i, qscale);
}
- if (ctx->picture.interlaced_frame) {
+ if (frame->interlaced_frame) {
dct_linesize_luma <<= 1;
dct_linesize_chroma <<= 1;
}
- dest_y = ctx->picture.data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1));
- dest_u = ctx->picture.data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
- dest_v = ctx->picture.data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
+ dest_y = frame->data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1));
+ dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
+ dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
if (ctx->cur_field) {
- dest_y += ctx->picture.linesize[0];
- dest_u += ctx->picture.linesize[1];
- dest_v += ctx->picture.linesize[2];
+ dest_y += frame->linesize[0];
+ dest_u += frame->linesize[1];
+ dest_v += frame->linesize[2];
}
dct_y_offset = dct_linesize_luma << 3;
return 0;
}
-static int dnxhd_decode_macroblocks(DNXHDContext *ctx, const uint8_t *buf, int buf_size)
+static int dnxhd_decode_macroblocks(DNXHDContext *ctx, AVFrame *frame,
+ const uint8_t *buf, int buf_size)
{
int x, y;
for (y = 0; y < ctx->mb_height; y++) {
init_get_bits(&ctx->gb, buf + ctx->mb_scan_index[y], (buf_size - ctx->mb_scan_index[y]) << 3);
for (x = 0; x < ctx->mb_width; x++) {
//START_TIMER;
- dnxhd_decode_macroblock(ctx, x, y);
+ dnxhd_decode_macroblock(ctx, frame, x, y);
//STOP_TIMER("decode macroblock");
}
}
DNXHDContext *ctx = avctx->priv_data;
AVFrame *picture = data;
int first_field = 1;
+ int ret;
av_dlog(avctx, "frame size %d\n", buf_size);
decode_coding_unit:
- if (dnxhd_decode_header(ctx, buf, buf_size, first_field) < 0)
+ if (dnxhd_decode_header(ctx, picture, buf, buf_size, first_field) < 0)
return -1;
if ((avctx->width || avctx->height) &&
avcodec_set_dimensions(avctx, ctx->width, ctx->height);
if (first_field) {
- if (ctx->picture.data[0])
- avctx->release_buffer(avctx, &ctx->picture);
- if (ff_get_buffer(avctx, &ctx->picture) < 0) {
+ if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
+ picture->pict_type = AV_PICTURE_TYPE_I;
+ picture->key_frame = 1;
}
- dnxhd_decode_macroblocks(ctx, buf + 0x280, buf_size - 0x280);
+ dnxhd_decode_macroblocks(ctx, picture, buf + 0x280, buf_size - 0x280);
- if (first_field && ctx->picture.interlaced_frame) {
+ if (first_field && picture->interlaced_frame) {
buf += ctx->cid_table->coding_unit_size;
buf_size -= ctx->cid_table->coding_unit_size;
first_field = 0;
goto decode_coding_unit;
}
- *picture = ctx->picture;
*got_frame = 1;
return buf_size;
}
{
DNXHDContext *ctx = avctx->priv_data;
- if (ctx->picture.data[0])
- avctx->release_buffer(avctx, &ctx->picture);
ff_free_vlc(&ctx->ac_vlc);
ff_free_vlc(&ctx->dc_vlc);
ff_free_vlc(&ctx->run_vlc);
/* get output buffer */
frame->nb_samples = out / avctx->channels;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "avcodec.h"
#include "internal.h"
-typedef struct DPXContext {
- AVFrame picture;
-} DPXContext;
-
-
static unsigned int read32(const uint8_t **ptr, int is_big)
{
unsigned int temp;
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + avpkt->size;
int buf_size = avpkt->size;
- DPXContext *const s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame *const p = &s->picture;
+ AVFrame *const p = data;
uint8_t *ptr;
unsigned int offset;
return AVERROR_INVALIDDATA;
}
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
return ret;
if (w != avctx->width || h != avctx->height)
avcodec_set_dimensions(avctx, w, h);
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
break;
}
- *picture = s->picture;
*got_frame = 1;
return buf_size;
}
-static av_cold int decode_init(AVCodecContext *avctx)
-{
- DPXContext *s = avctx->priv_data;
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame = &s->picture;
- return 0;
-}
-
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- DPXContext *s = avctx->priv_data;
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
AVCodec ff_dpx_decoder = {
.name = "dpx",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DPX,
- .priv_data_size = sizeof(DPXContext),
- .init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("DPX image"),
.capabilities = CODEC_CAP_DR1,
break;
}
- cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if (avctx->reget_buffer(avctx, &cin->frame)) {
+ if ((res = ff_reget_buffer(avctx, &cin->frame)) < 0) {
av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
- return -1;
+ return res;
}
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]);
+ if ((res = av_frame_ref(data, &cin->frame)) < 0)
+ return res;
+
*got_frame = 1;
- *(AVFrame *)data = cin->frame;
return buf_size;
}
CinVideoContext *cin = avctx->priv_data;
int i;
- if (cin->frame.data[0])
- avctx->release_buffer(avctx, &cin->frame);
+ av_frame_unref(&cin->frame);
for (i = 0; i < 3; ++i)
av_free(cin->bitmap_table[i]);
/* get output buffer */
frame->nb_samples = avpkt->size - cin->initial_decode_frame;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
return -1; /* NOTE: we only accept several full frames */
}
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- s->picture.reference = 0;
s->picture.key_frame = 1;
s->picture.pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt;
avctx->time_base = s->sys->time_base;
avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
- if (ff_get_buffer(avctx, &s->picture) < 0) {
+ if (ff_get_buffer(avctx, &s->picture, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
/* return image */
*got_frame = 1;
- *(AVFrame*)data = s->picture;
+ av_frame_move_ref(data, &s->picture);
/* Determine the codec's sample_aspect ratio from the packet */
vsc_pack = buf + 80*5 + 48 + 5;
{
DVVideoContext *s = c->priv_data;
- if (s->picture.data[0])
- c->release_buffer(c, &s->picture);
+ av_frame_unref(&s->picture);
return 0;
}
* Decoder context
*/
typedef struct DxaDecContext {
- AVFrame pic, prev;
+ AVFrame prev;
int dsize;
uint8_t *decomp_buf;
static const int shift1[6] = { 0, 8, 8, 8, 4, 4 };
static const int shift2[6] = { 0, 0, 8, 4, 0, 4 };
-static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint8_t *src, uint8_t *ref)
+static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst,
+ int stride, uint8_t *src, uint8_t *ref)
{
uint8_t *code, *data, *mv, *msk, *tmp, *tmp2;
int i, j, k;
int type, x, y, d, d2;
- int stride = c->pic.linesize[0];
uint32_t mask;
code = src + 12;
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DxaDecContext * const c = avctx->priv_data;
buf_size -= 768+4;
}
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
- c->pic.palette_has_changed = pc;
+ memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
+ frame->palette_has_changed = pc;
- outptr = c->pic.data[0];
+ outptr = frame->data[0];
srcptr = c->decomp_buf;
tmpptr = c->prev.data[0];
- stride = c->pic.linesize[0];
+ stride = frame->linesize[0];
if(buf[0]=='N' && buf[1]=='U' && buf[2]=='L' && buf[3]=='L')
compr = -1;
}
switch(compr){
case -1:
- c->pic.key_frame = 0;
- c->pic.pict_type = AV_PICTURE_TYPE_P;
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
if(c->prev.data[0])
- memcpy(c->pic.data[0], c->prev.data[0], c->pic.linesize[0] * avctx->height);
+ memcpy(frame->data[0], c->prev.data[0], frame->linesize[0] * avctx->height);
else{ // Should happen only when first frame is 'NULL'
- memset(c->pic.data[0], 0, c->pic.linesize[0] * avctx->height);
- c->pic.key_frame = 1;
- c->pic.pict_type = AV_PICTURE_TYPE_I;
+ memset(frame->data[0], 0, frame->linesize[0] * avctx->height);
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
}
break;
case 2:
case 3:
case 4:
case 5:
- c->pic.key_frame = !(compr & 1);
- c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
+ frame->key_frame = !(compr & 1);
+ frame->pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
for(j = 0; j < avctx->height; j++){
if(compr & 1){
for(i = 0; i < avctx->width; i++)
break;
case 12: // ScummVM coding
case 13:
- c->pic.key_frame = 0;
- c->pic.pict_type = AV_PICTURE_TYPE_P;
- decode_13(avctx, c, c->pic.data[0], srcptr, c->prev.data[0]);
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ decode_13(avctx, c, frame->data[0], frame->linesize[0], srcptr, c->prev.data[0]);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", buf[4]);
return AVERROR_INVALIDDATA;
}
- FFSWAP(AVFrame, c->pic, c->prev);
- if(c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ av_frame_unref(&c->prev);
+ if ((ret = av_frame_ref(&c->prev, frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = c->prev;
/* always report that the buffer was completely consumed */
return orig_buf_size;
DxaDecContext * const c = avctx->priv_data;
av_freep(&c->decomp_buf);
- if(c->prev.data[0])
- avctx->release_buffer(avctx, &c->prev);
- if(c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ av_frame_unref(&c->prev);
return 0;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame)
- return AVERROR(ENOMEM);
return 0;
}
AVPacket *avpkt)
{
int h, w;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
const uint8_t *src = avpkt->data;
uint8_t *Y1, *Y2, *U, *V;
int ret;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
if (avpkt->size < avctx->width * avctx->height * 3 / 2 + 16) {
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA;
}
- pic->reference = 0;
- if ((ret = ff_get_buffer(avctx, pic)) < 0)
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
}
*got_frame = 1;
- *(AVFrame*)data = *pic;
return avpkt->size;
}
-static av_cold int decode_close(AVCodecContext *avctx)
-{
- AVFrame *pic = avctx->coded_frame;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
- av_freep(&avctx->coded_frame);
-
- return 0;
-}
-
AVCodec ff_dxtory_decoder = {
.name = "dxtory",
.long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DXTORY,
.init = decode_init,
- .close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
};
ff_dxva2_get_surface_index(ctx, r),
r->long_ref != 0);
- if ((r->f.reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)
+ if ((r->reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)
pp->FieldOrderCntList[i][0] = r->field_poc[0];
- if ((r->f.reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX)
+ if ((r->reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX)
pp->FieldOrderCntList[i][1] = r->field_poc[1];
pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num;
- if (r->f.reference & PICT_TOP_FIELD)
+ if (r->reference & PICT_TOP_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 0);
- if (r->f.reference & PICT_BOTTOM_FIELD)
+ if (r->reference & PICT_BOTTOM_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 1);
} else {
pp->RefFrameList[i].bPicEntry = 0xff;
unsigned plane;
fill_picture_entry(&slice->RefPicList[list][i],
ff_dxva2_get_surface_index(ctx, r),
- r->f.reference == PICT_BOTTOM_FIELD);
+ r->reference == PICT_BOTTOM_FIELD);
for (plane = 0; plane < 3; plane++) {
int w, o;
if (plane == 0 && h->luma_weight_flag[list]) {
typedef struct CmvContext {
AVCodecContext *avctx;
- AVFrame frame; ///< current
- AVFrame last_frame; ///< last
- AVFrame last2_frame; ///< second-last
+ AVFrame *last_frame; ///< last
+ AVFrame *last2_frame; ///< second-last
int width, height;
unsigned int palette[AVPALETTE_COUNT];
} CmvContext;
CmvContext *s = avctx->priv_data;
s->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
+
+ s->last_frame = av_frame_alloc();
+ s->last2_frame = av_frame_alloc();
+ if (!s->last_frame || !s->last2_frame) {
+ av_frame_free(&s->last_frame);
+ av_frame_free(&s->last2_frame);
+ return AVERROR(ENOMEM);
+ }
+
return 0;
}
-static void cmv_decode_intra(CmvContext * s, const uint8_t *buf, const uint8_t *buf_end){
- unsigned char *dst = s->frame.data[0];
+static void cmv_decode_intra(CmvContext * s, AVFrame *frame,
+ const uint8_t *buf, const uint8_t *buf_end)
+{
+ unsigned char *dst = frame->data[0];
int i;
for (i=0; i < s->avctx->height && buf_end - buf >= s->avctx->width; i++) {
memcpy(dst, buf, s->avctx->width);
- dst += s->frame.linesize[0];
+ dst += frame->linesize[0];
buf += s->avctx->width;
}
}
}
}
-static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *buf_end){
+static void cmv_decode_inter(CmvContext *s, AVFrame *frame, const uint8_t *buf,
+ const uint8_t *buf_end)
+{
const uint8_t *raw = buf + (s->avctx->width*s->avctx->height/16);
int x,y,i;
for(y=0; y<s->avctx->height/4; y++)
for(x=0; x<s->avctx->width/4 && buf_end - buf > i; x++) {
if (buf[i]==0xFF) {
- unsigned char *dst = s->frame.data[0] + (y*4)*s->frame.linesize[0] + x*4;
+ unsigned char *dst = frame->data[0] + (y*4)*frame->linesize[0] + x*4;
if (raw+16<buf_end && *raw==0xFF) { /* intra */
raw++;
memcpy(dst, raw, 4);
- memcpy(dst+s->frame.linesize[0], raw+4, 4);
- memcpy(dst+2*s->frame.linesize[0], raw+8, 4);
- memcpy(dst+3*s->frame.linesize[0], raw+12, 4);
+ memcpy(dst + frame->linesize[0], raw+4, 4);
+ memcpy(dst + 2 * frame->linesize[0], raw+8, 4);
+ memcpy(dst + 3 * frame->linesize[0], raw+12, 4);
raw+=16;
}else if(raw<buf_end) { /* inter using second-last frame as reference */
int xoffset = (*raw & 0xF) - 7;
int yoffset = ((*raw >> 4)) - 7;
- if (s->last2_frame.data[0])
- cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
- s->last2_frame.data[0], s->last2_frame.linesize[0],
+ if (s->last2_frame->data[0])
+ cmv_motcomp(frame->data[0], frame->linesize[0],
+ s->last2_frame->data[0], s->last2_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
raw++;
}
}else{ /* inter using last frame as reference */
int xoffset = (buf[i] & 0xF) - 7;
int yoffset = ((buf[i] >> 4)) - 7;
- cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
- s->last_frame.data[0], s->last_frame.linesize[0],
+ cmv_motcomp(frame->data[0], frame->linesize[0],
+ s->last_frame->data[0], s->last_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
}
i++;
int buf_size = avpkt->size;
CmvContext *s = avctx->priv_data;
const uint8_t *buf_end = buf + buf_size;
+ AVFrame *frame = data;
+ int ret;
if (buf_end - buf < EA_PREAMBLE_SIZE)
return AVERROR_INVALIDDATA;
if (av_image_check_size(s->width, s->height, 0, s->avctx))
return -1;
- /* shuffle */
- if (s->last2_frame.data[0])
- avctx->release_buffer(avctx, &s->last2_frame);
- FFSWAP(AVFrame, s->last_frame, s->last2_frame);
- FFSWAP(AVFrame, s->frame, s->last_frame);
-
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- if (ff_get_buffer(avctx, &s->frame)<0) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
- memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
buf += EA_PREAMBLE_SIZE;
if ((buf[0]&1)) { // subtype
- cmv_decode_inter(s, buf+2, buf_end);
- s->frame.key_frame = 0;
- s->frame.pict_type = AV_PICTURE_TYPE_P;
+ cmv_decode_inter(s, frame, buf+2, buf_end);
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
}else{
- s->frame.key_frame = 1;
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- cmv_decode_intra(s, buf+2, buf_end);
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ cmv_decode_intra(s, frame, buf+2, buf_end);
}
+ av_frame_unref(s->last2_frame);
+ av_frame_move_ref(s->last2_frame, s->last_frame);
+ if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return buf_size;
}
static av_cold int cmv_decode_end(AVCodecContext *avctx){
CmvContext *s = avctx->priv_data;
- if (s->frame.data[0])
- s->avctx->release_buffer(avctx, &s->frame);
- if (s->last_frame.data[0])
- s->avctx->release_buffer(avctx, &s->last_frame);
- if (s->last2_frame.data[0])
- s->avctx->release_buffer(avctx, &s->last2_frame);
+
+ av_frame_free(&s->last_frame);
+ av_frame_free(&s->last2_frame);
return 0;
}
typedef struct MadContext {
AVCodecContext *avctx;
DSPContext dsp;
- AVFrame frame;
AVFrame last_frame;
GetBitContext gb;
void *bitstream_buf;
dst[j*dst_stride + i] = av_clip_uint8(src[j*src_stride + i] + add);
}
-static inline void comp_block(MadContext *t, int mb_x, int mb_y,
+static inline void comp_block(MadContext *t, AVFrame *frame,
+ int mb_x, int mb_y,
int j, int mv_x, int mv_y, int add)
{
if (j < 4) {
- comp(t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3),
- t->frame.linesize[0],
+ comp(frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
+ frame->linesize[0],
t->last_frame.data[0] + (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x,
t->last_frame.linesize[0], add);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
int index = j - 3;
- comp(t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x * 8,
- t->frame.linesize[index],
+ comp(frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x * 8,
+ frame->linesize[index],
t->last_frame.data[index] + (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2),
t->last_frame.linesize[index], add);
}
}
-static inline void idct_put(MadContext *t, int16_t *block, int mb_x, int mb_y, int j)
+static inline void idct_put(MadContext *t, AVFrame *frame, int16_t *block,
+ int mb_x, int mb_y, int j)
{
if (j < 4) {
ff_ea_idct_put_c(
- t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3),
- t->frame.linesize[0], block);
+ frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
+ frame->linesize[0], block);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
int index = j - 3;
ff_ea_idct_put_c(
- t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x*8,
- t->frame.linesize[index], block);
+ frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x*8,
+ frame->linesize[index], block);
}
}
return value;
}
-static void decode_mb(MadContext *s, int inter)
+static void decode_mb(MadContext *s, AVFrame *frame, int inter)
{
int mv_map = 0;
int mv_x, mv_y;
for (j=0; j<6; j++) {
if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map
int add = 2*decode_motion(&s->gb);
- comp_block(s, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
+ comp_block(s, frame, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
} else {
s->dsp.clear_block(s->block);
decode_block_intra(s, s->block);
- idct_put(s, s->block, s->mb_x, s->mb_y, j);
+ idct_put(s, frame, s->block, s->mb_x, s->mb_y, j);
}
}
}
int buf_size = avpkt->size;
const uint8_t *buf_end = buf+buf_size;
MadContext *s = avctx->priv_data;
+ AVFrame *frame = data;
int width, height;
int chunk_type;
- int inter;
+ int inter, ret;
if (buf_size < 17) {
av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n");
if (av_image_check_size(width, height, 0, avctx) < 0)
return -1;
avcodec_set_dimensions(avctx, width, height);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->last_frame);
}
- s->frame.reference = 1;
- if (!s->frame.data[0]) {
- if (ff_get_buffer(avctx, &s->frame) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
- }
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++)
- decode_mb(s, inter);
+ decode_mb(s, frame, inter);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
- if (chunk_type != MADe_TAG)
- FFSWAP(AVFrame, s->frame, s->last_frame);
+ if (chunk_type != MADe_TAG) {
+ av_frame_unref(&s->last_frame);
+ if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
+ return ret;
+ }
return buf_size;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
MadContext *t = avctx->priv_data;
- if (t->frame.data[0])
- avctx->release_buffer(avctx, &t->frame);
- if (t->last_frame.data[0])
- avctx->release_buffer(avctx, &t->last_frame);
+ av_frame_unref(&t->last_frame);
av_free(t->bitstream_buf);
return 0;
}
typedef struct TgqContext {
AVCodecContext *avctx;
- AVFrame frame;
int width, height;
ScanTable scantable;
int qtable[64];
block[0] += 128 << 4;
}
-static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64],
+static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64], AVFrame *frame,
int mb_x, int mb_y)
{
- int linesize = s->frame.linesize[0];
- uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16;
- uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8;
- uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8;
+ int linesize = frame->linesize[0];
+ uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
+ uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
+ uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
ff_ea_idct_put_c(dest_y , linesize, block[0]);
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]);
ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]);
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
- ff_ea_idct_put_c(dest_cb, s->frame.linesize[1], block[4]);
- ff_ea_idct_put_c(dest_cr, s->frame.linesize[2], block[5]);
+ ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]);
+ ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]);
}
}
memset(dst + j * dst_stride, level, 8);
}
-static void tgq_idct_put_mb_dconly(TgqContext *s, int mb_x, int mb_y, const int8_t *dc)
+static void tgq_idct_put_mb_dconly(TgqContext *s, AVFrame *frame,
+ int mb_x, int mb_y, const int8_t *dc)
{
- int linesize = s->frame.linesize[0];
- uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16;
- uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8;
- uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8;
+ int linesize = frame->linesize[0];
+ uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
+ uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
+ uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
tgq_dconly(s, dest_y, linesize, dc[0]);
tgq_dconly(s, dest_y + 8, linesize, dc[1]);
tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]);
tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]);
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
- tgq_dconly(s, dest_cb, s->frame.linesize[1], dc[4]);
- tgq_dconly(s, dest_cr, s->frame.linesize[2], dc[5]);
+ tgq_dconly(s, dest_cb, frame->linesize[1], dc[4]);
+ tgq_dconly(s, dest_cr, frame->linesize[2], dc[5]);
}
}
-static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x)
+static void tgq_decode_mb(TgqContext *s, AVFrame *frame, int mb_y, int mb_x)
{
int mode;
int i;
init_get_bits(&gb, s->gb.buffer, FFMIN(s->gb.buffer_end - s->gb.buffer, mode) * 8);
for (i = 0; i < 6; i++)
tgq_decode_block(s, s->block[i], &gb);
- tgq_idct_put_mb(s, s->block, mb_x, mb_y);
+ tgq_idct_put_mb(s, s->block, frame, mb_x, mb_y);
bytestream2_skip(&s->gb, mode);
} else {
if (mode == 3) {
} else {
av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
}
- tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc);
+ tgq_idct_put_mb_dconly(s, frame, mb_x, mb_y, dc);
}
}
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data;
+ AVFrame *frame = data;
int x, y, ret;
int big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
}
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3);
- if (!s->frame.data[0]) {
- s->frame.key_frame = 1;
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return ret;
- }
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
- tgq_decode_mb(s, y, x);
+ tgq_decode_mb(s, frame, y, x);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return avpkt->size;
}
-static av_cold int tgq_decode_end(AVCodecContext *avctx)
-{
- TgqContext *s = avctx->priv_data;
- if (s->frame.data[0])
- s->avctx->release_buffer(avctx, &s->frame);
- return 0;
-}
-
AVCodec ff_eatgq_decoder = {
.name = "eatgq",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TGQ,
.priv_data_size = sizeof(TgqContext),
.init = tgq_decode_init,
- .close = tgq_decode_end,
.decode = tgq_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGQ video"),
#include "avcodec.h"
#define BITSTREAM_READER_LE
#include "get_bits.h"
+#include "internal.h"
#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
typedef struct TgvContext {
AVCodecContext *avctx;
- AVFrame frame;
AVFrame last_frame;
+ uint8_t *frame_buffer;
int width,height;
uint32_t palette[AVPALETTE_COUNT];
* Decode inter-frame
* @return 0 on success, -1 on critical buffer underflow
*/
-static int tgv_decode_inter(TgvContext *s, const uint8_t *buf,
- const uint8_t *buf_end)
+static int tgv_decode_inter(TgvContext *s, AVFrame *frame,
+ const uint8_t *buf, const uint8_t *buf_end)
{
int num_mvs;
int num_blocks_raw;
for (j = 0; j < 4; j++)
for (i = 0; i < 4; i++)
- s->frame.data[0][(y * 4 + j) * s->frame.linesize[0] + (x * 4 + i)] =
+ frame->data[0][(y * 4 + j) * frame->linesize[0] + (x * 4 + i)] =
src[j * src_stride + i];
}
return 0;
}
-/** release AVFrame buffers if allocated */
-static void cond_release_buffer(AVFrame *pic)
-{
- if (pic->data[0]) {
- av_freep(&pic->data[0]);
- av_free(pic->data[1]);
- }
-}
-
static int tgv_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
int buf_size = avpkt->size;
TgvContext *s = avctx->priv_data;
const uint8_t *buf_end = buf + buf_size;
+ AVFrame *frame = data;
int chunk_type, ret;
chunk_type = AV_RL32(&buf[0]);
s->height = AV_RL16(&buf[2]);
if (s->avctx->width != s->width || s->avctx->height != s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height);
- cond_release_buffer(&s->frame);
- cond_release_buffer(&s->last_frame);
+ av_freep(&s->frame_buffer);
+ av_frame_unref(&s->last_frame);
}
pal_count = AV_RL16(&buf[6]);
if ((ret = av_image_check_size(s->width, s->height, 0, avctx)) < 0)
return ret;
- /* shuffle */
- FFSWAP(AVFrame, s->frame, s->last_frame);
- if (!s->frame.data[0]) {
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- s->frame.linesize[0] = s->width;
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
+ return ret;
- s->frame.data[0] = av_malloc(s->width * s->height);
- if (!s->frame.data[0])
- return AVERROR(ENOMEM);
- s->frame.data[1] = av_malloc(AVPALETTE_SIZE);
- if (!s->frame.data[1]) {
- av_freep(&s->frame.data[0]);
- return AVERROR(ENOMEM);
- }
- }
- memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
if (chunk_type == kVGT_TAG) {
- s->frame.key_frame = 1;
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- if (unpack(buf, buf_end, s->frame.data[0], s->avctx->width, s->avctx->height) < 0) {
+ int y;
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+
+ if (!s->frame_buffer &&
+ !(s->frame_buffer = av_malloc(s->width * s->height)))
+ return AVERROR(ENOMEM);
+
+ if (unpack(buf, buf_end, s->frame_buffer, s->avctx->width, s->avctx->height) < 0) {
av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n");
return AVERROR_INVALIDDATA;
}
+ for (y = 0; y < s->height; y++)
+ memcpy(frame->data[0] + y * frame->linesize[0],
+ s->frame_buffer + y * s->width,
+ s->width);
} else {
if (!s->last_frame.data[0]) {
av_log(avctx, AV_LOG_WARNING, "inter frame without corresponding intra frame\n");
return buf_size;
}
- s->frame.key_frame = 0;
- s->frame.pict_type = AV_PICTURE_TYPE_P;
- if (tgv_decode_inter(s, buf, buf_end) < 0) {
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ if (tgv_decode_inter(s, frame, buf, buf_end) < 0) {
av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n");
return AVERROR_INVALIDDATA;
}
}
+ av_frame_unref(&s->last_frame);
+ if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return buf_size;
}
static av_cold int tgv_decode_end(AVCodecContext *avctx)
{
TgvContext *s = avctx->priv_data;
- cond_release_buffer(&s->frame);
- cond_release_buffer(&s->last_frame);
+ av_frame_unref(&s->last_frame);
+ av_freep(&s->frame_buffer);
av_free(s->mv_codebook);
av_free(s->block_codebook);
return 0;
.close = tgv_decode_end,
.decode = tgv_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGV video"),
+ .capabilities = CODEC_CAP_DR1,
};
typedef struct TqiContext {
MpegEncContext s;
- AVFrame frame;
void *bitstream_buf;
unsigned int bitstream_buf_size;
DECLARE_ALIGNED(16, int16_t, block)[6][64];
return 0;
}
-static inline void tqi_idct_put(TqiContext *t, int16_t (*block)[64])
+static inline void tqi_idct_put(TqiContext *t, AVFrame *frame, int16_t (*block)[64])
{
MpegEncContext *s = &t->s;
- int linesize= t->frame.linesize[0];
- uint8_t *dest_y = t->frame.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16;
- uint8_t *dest_cb = t->frame.data[1] + (s->mb_y * 8 * t->frame.linesize[1]) + s->mb_x * 8;
- uint8_t *dest_cr = t->frame.data[2] + (s->mb_y * 8 * t->frame.linesize[2]) + s->mb_x * 8;
+ int linesize = frame->linesize[0];
+ uint8_t *dest_y = frame->data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16;
+ uint8_t *dest_cb = frame->data[1] + (s->mb_y * 8 * frame->linesize[1]) + s->mb_x * 8;
+ uint8_t *dest_cr = frame->data[2] + (s->mb_y * 8 * frame->linesize[2]) + s->mb_x * 8;
ff_ea_idct_put_c(dest_y , linesize, block[0]);
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
ff_ea_idct_put_c(dest_y + 8*linesize , linesize, block[2]);
ff_ea_idct_put_c(dest_y + 8*linesize + 8, linesize, block[3]);
if(!(s->avctx->flags&CODEC_FLAG_GRAY)) {
- ff_ea_idct_put_c(dest_cb, t->frame.linesize[1], block[4]);
- ff_ea_idct_put_c(dest_cr, t->frame.linesize[2], block[5]);
+ ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]);
+ ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]);
}
}
const uint8_t *buf_end = buf+buf_size;
TqiContext *t = avctx->priv_data;
MpegEncContext *s = &t->s;
+ AVFrame *frame = data;
+ int ret;
s->width = AV_RL16(&buf[0]);
s->height = AV_RL16(&buf[2]);
tqi_calculate_qtable(s, buf[4]);
buf += 8;
- if (t->frame.data[0])
- avctx->release_buffer(avctx, &t->frame);
-
if (s->avctx->width!=s->width || s->avctx->height!=s->height)
avcodec_set_dimensions(s->avctx, s->width, s->height);
- if(ff_get_buffer(avctx, &t->frame) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
av_fast_padded_malloc(&t->bitstream_buf, &t->bitstream_buf_size,
{
if (tqi_decode_mb(s, t->block) < 0)
break;
- tqi_idct_put(t, t->block);
+ tqi_idct_put(t, frame, t->block);
}
*got_frame = 1;
- *(AVFrame*)data = t->frame;
return buf_size;
}
static av_cold int tqi_decode_end(AVCodecContext *avctx)
{
TqiContext *t = avctx->priv_data;
- if(t->frame.data[0])
- avctx->release_buffer(avctx, &t->frame);
av_free(t->bitstream_buf);
return 0;
}
mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
error = s->error_status_table[mb_index];
- if (IS_INTER(s->cur_pic->f.mb_type[mb_index]))
+ if (IS_INTER(s->cur_pic->mb_type[mb_index]))
continue; // inter
if (!(error & ER_DC_ERROR))
continue; // dc-ok
for (j = b_x + 1; j < w; j++) {
int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j];
- int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[0] = dc[j + b_y * stride];
distance[0] = j - b_x;
for (j = b_x - 1; j >= 0; j--) {
int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j];
- int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[1] = dc[j + b_y * stride];
distance[1] = b_x - j;
for (j = b_y + 1; j < h; j++) {
int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j];
- int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[2] = dc[b_x + j * stride];
for (j = b_y - 1; j >= 0; j--) {
int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j];
- int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[3] = dc[b_x + j * stride];
distance[3] = b_y - j;
int y;
int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
- int left_intra = IS_INTRA(s->cur_pic->f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
- int right_intra = IS_INTRA(s->cur_pic->f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
+ int left_intra = IS_INTRA(s->cur_pic->mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
+ int right_intra = IS_INTRA(s->cur_pic->mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
int left_damage = left_status & ER_MB_ERROR;
int right_damage = right_status & ER_MB_ERROR;
int offset = b_x * 8 + b_y * stride * 8;
- int16_t *left_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
- int16_t *right_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
+ int16_t *left_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
+ int16_t *right_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
if (!(left_damage || right_damage))
continue; // both undamaged
if ((!left_intra) && (!right_intra) &&
int x;
int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
- int top_intra = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
- int bottom_intra = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
+ int top_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
+ int bottom_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
int top_damage = top_status & ER_MB_ERROR;
int bottom_damage = bottom_status & ER_MB_ERROR;
int offset = b_x * 8 + b_y * stride * 8;
- int16_t *top_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
- int16_t *bottom_mv = s->cur_pic->f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
+ int16_t *top_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
+ int16_t *bottom_mv = s->cur_pic->motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
if (!(top_damage || bottom_damage))
continue; // both undamaged
int f = 0;
int error = s->error_status_table[mb_xy];
- if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
+ if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
f = MV_FROZEN; // intra // FIXME check
if (!(error & ER_MV_ERROR))
f = MV_FROZEN; // inter with undamaged MV
const int mb_xy = mb_x + mb_y * s->mb_stride;
int mv_dir = (s->last_pic && s->last_pic->f.data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
- if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
+ if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
continue;
if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
continue;
if (fixed[mb_xy] == MV_FROZEN)
continue;
- assert(!IS_INTRA(s->cur_pic->f.mb_type[mb_xy]));
+ assert(!IS_INTRA(s->cur_pic->mb_type[mb_xy]));
assert(s->last_pic && s->last_pic->f.data[0]);
j = 0;
if (mb_x > 0 && fixed[mb_xy - 1]) {
mv_predictor[pred_count][0] =
- s->cur_pic->f.motion_val[0][mot_index - mot_step][0];
+ s->cur_pic->motion_val[0][mot_index - mot_step][0];
mv_predictor[pred_count][1] =
- s->cur_pic->f.motion_val[0][mot_index - mot_step][1];
+ s->cur_pic->motion_val[0][mot_index - mot_step][1];
ref[pred_count] =
- s->cur_pic->f.ref_index[0][4 * (mb_xy - 1)];
+ s->cur_pic->ref_index[0][4 * (mb_xy - 1)];
pred_count++;
}
if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
mv_predictor[pred_count][0] =
- s->cur_pic->f.motion_val[0][mot_index + mot_step][0];
+ s->cur_pic->motion_val[0][mot_index + mot_step][0];
mv_predictor[pred_count][1] =
- s->cur_pic->f.motion_val[0][mot_index + mot_step][1];
+ s->cur_pic->motion_val[0][mot_index + mot_step][1];
ref[pred_count] =
- s->cur_pic->f.ref_index[0][4 * (mb_xy + 1)];
+ s->cur_pic->ref_index[0][4 * (mb_xy + 1)];
pred_count++;
}
if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
mv_predictor[pred_count][0] =
- s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][0];
+ s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][0];
mv_predictor[pred_count][1] =
- s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][1];
+ s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][1];
ref[pred_count] =
- s->cur_pic->f.ref_index[0][4 * (mb_xy - s->mb_stride)];
+ s->cur_pic->ref_index[0][4 * (mb_xy - s->mb_stride)];
pred_count++;
}
if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
mv_predictor[pred_count][0] =
- s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][0];
+ s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][0];
mv_predictor[pred_count][1] =
- s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][1];
+ s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][1];
ref[pred_count] =
- s->cur_pic->f.ref_index[0][4 * (mb_xy + s->mb_stride)];
+ s->cur_pic->ref_index[0][4 * (mb_xy + s->mb_stride)];
pred_count++;
}
if (pred_count == 0)
if (s->avctx->codec_id == AV_CODEC_ID_H264) {
// FIXME
} else {
- ff_thread_await_progress(&s->last_pic->f,
+ ff_thread_await_progress(&s->last_pic->tf,
mb_y, 0);
}
- if (!s->last_pic->f.motion_val[0] ||
- !s->last_pic->f.ref_index[0])
+ if (!s->last_pic->motion_val[0] ||
+ !s->last_pic->ref_index[0])
goto skip_last_mv;
- prev_x = s->last_pic->f.motion_val[0][mot_index][0];
- prev_y = s->last_pic->f.motion_val[0][mot_index][1];
- prev_ref = s->last_pic->f.ref_index[0][4 * mb_xy];
+ prev_x = s->last_pic->motion_val[0][mot_index][0];
+ prev_y = s->last_pic->motion_val[0][mot_index][1];
+ prev_ref = s->last_pic->ref_index[0][4 * mb_xy];
} else {
- prev_x = s->cur_pic->f.motion_val[0][mot_index][0];
- prev_y = s->cur_pic->f.motion_val[0][mot_index][1];
- prev_ref = s->cur_pic->f.ref_index[0][4 * mb_xy];
+ prev_x = s->cur_pic->motion_val[0][mot_index][0];
+ prev_y = s->cur_pic->motion_val[0][mot_index][1];
+ prev_ref = s->cur_pic->ref_index[0][4 * mb_xy];
}
/* last MV */
uint8_t *src = s->cur_pic->f.data[0] +
mb_x * 16 + mb_y * 16 * linesize[0];
- s->cur_pic->f.motion_val[0][mot_index][0] =
+ s->cur_pic->motion_val[0][mot_index][0] =
s->mv[0][0][0] = mv_predictor[j][0];
- s->cur_pic->f.motion_val[0][mot_index][1] =
+ s->cur_pic->motion_val[0][mot_index][1] =
s->mv[0][0][1] = mv_predictor[j][1];
// predictor intra or otherwise not available
for (i = 0; i < mot_step; i++)
for (j = 0; j < mot_step; j++) {
- s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
- s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
+ s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
+ s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
}
s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
if (s->avctx->codec_id == AV_CODEC_ID_H264) {
// FIXME
} else {
- ff_thread_await_progress(&s->last_pic->f, mb_y, 0);
+ ff_thread_await_progress(&s->last_pic->tf, mb_y, 0);
}
is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr,
linesize[0], 16);
last_mb_ptr + linesize[0] * 16,
linesize[0], 16);
} else {
- if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
+ if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
is_intra_likely++;
else
is_intra_likely--;
return;
};
- if (s->cur_pic->f.motion_val[0] == NULL) {
+ if (s->cur_pic->motion_val[0] == NULL) {
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
for (i = 0; i < 2; i++) {
- s->cur_pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
- s->cur_pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * sizeof(uint16_t));
- s->cur_pic->f.motion_val[i] = s->cur_pic->motion_val_base[i] + 4;
+ s->cur_pic->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
+ s->cur_pic->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t));
+ if (!s->cur_pic->ref_index_buf[i] || !s->cur_pic->motion_val_buf[i])
+ break;
+ s->cur_pic->ref_index[i] = s->cur_pic->ref_index_buf[i]->data;
+ s->cur_pic->motion_val[i] = (int16_t (*)[2])s->cur_pic->motion_val_buf[i]->data + 4;
+ }
+ if (i < 2) {
+ for (i = 0; i < 2; i++) {
+ av_buffer_unref(&s->cur_pic->ref_index_buf[i]);
+ av_buffer_unref(&s->cur_pic->motion_val_buf[i]);
+ s->cur_pic->ref_index[i] = NULL;
+ s->cur_pic->motion_val[i] = NULL;
+ }
+ return;
}
s->cur_pic->f.motion_subsample_log2 = 3;
}
continue;
if (is_intra_likely)
- s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
+ s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
else
- s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+ s->cur_pic->mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
}
// change inter to intra blocks if no reference frames are available
!(s->next_pic && s->next_pic->f.data[0]))
for (i = 0; i < s->mb_num; i++) {
const int mb_xy = s->mb_index2xy[i];
- if (!IS_INTRA(s->cur_pic->f.mb_type[mb_xy]))
- s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
+ if (!IS_INTRA(s->cur_pic->mb_type[mb_xy]))
+ s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
}
/* handle inter blocks with damaged AC */
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
const int mb_xy = mb_x + mb_y * s->mb_stride;
- const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+ const int mb_type = s->cur_pic->mb_type[mb_xy];
const int dir = !(s->last_pic && s->last_pic->f.data[0]);
const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
int mv_type;
int j;
mv_type = MV_TYPE_8X8;
for (j = 0; j < 4; j++) {
- s->mv[0][j][0] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
- s->mv[0][j][1] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
+ s->mv[0][j][0] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
+ s->mv[0][j][1] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
}
} else {
mv_type = MV_TYPE_16X16;
- s->mv[0][0][0] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
- s->mv[0][0][1] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
+ s->mv[0][0][0] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
+ s->mv[0][0][1] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
}
s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */,
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
const int mb_xy = mb_x + mb_y * s->mb_stride;
- const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+ const int mb_type = s->cur_pic->mb_type[mb_xy];
int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
error = s->error_status_table[mb_xy];
int time_pp = s->pp_time;
int time_pb = s->pb_time;
- ff_thread_await_progress(&s->next_pic->f, mb_y, 0);
+ ff_thread_await_progress(&s->next_pic->tf, mb_y, 0);
- s->mv[0][0][0] = s->next_pic->f.motion_val[0][xy][0] * time_pb / time_pp;
- s->mv[0][0][1] = s->next_pic->f.motion_val[0][xy][1] * time_pb / time_pp;
- s->mv[1][0][0] = s->next_pic->f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
- s->mv[1][0][1] = s->next_pic->f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
+ s->mv[0][0][0] = s->next_pic->motion_val[0][xy][0] * time_pb / time_pp;
+ s->mv[0][0][1] = s->next_pic->motion_val[0][xy][1] * time_pb / time_pp;
+ s->mv[1][0][0] = s->next_pic->motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
+ s->mv[1][0][1] = s->next_pic->motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
} else {
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
int16_t *dc_ptr;
uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy = mb_x + mb_y * s->mb_stride;
- const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+ const int mb_type = s->cur_pic->mb_type[mb_xy];
error = s->error_status_table[mb_xy];
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy = mb_x + mb_y * s->mb_stride;
- const int mb_type = s->cur_pic->f.mb_type[mb_xy];
+ const int mb_type = s->cur_pic->mb_type[mb_xy];
error = s->error_status_table[mb_xy];
for (i = 0; i < 3; i++)
av_free(s->codebooks[i].blocks);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
Escape124Context *s = avctx->priv_data;
+ AVFrame *frame = data;
GetBitContext gb;
unsigned frame_flags, frame_size;
uint16_t* old_frame_data, *new_frame_data;
unsigned old_stride, new_stride;
-
- AVFrame new_frame = { { 0 } };
+ int ret;
init_get_bits(&gb, buf, buf_size * 8);
// Leave last frame unchanged
// FIXME: Is this necessary? I haven't seen it in any real samples
if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) {
+ if (!s->frame.data[0])
+ return AVERROR_INVALIDDATA;
+
av_log(NULL, AV_LOG_DEBUG, "Skipping frame\n");
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(frame, &s->frame)) < 0)
+ return ret;
return frame_size;
}
}
}
- new_frame.reference = 3;
- if (ff_get_buffer(avctx, &new_frame)) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
- new_frame_data = (uint16_t*)new_frame.data[0];
- new_stride = new_frame.linesize[0] / 2;
+ new_frame_data = (uint16_t*)frame->data[0];
+ new_stride = frame->linesize[0] / 2;
old_frame_data = (uint16_t*)s->frame.data[0];
old_stride = s->frame.linesize[0] / 2;
"Escape sizes: %i, %i, %i\n",
frame_size, buf_size, get_bits_count(&gb) / 8);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
+ if ((ret = av_frame_ref(&s->frame, frame)) < 0)
+ return ret;
- *(AVFrame*)data = s->frame = new_frame;
*got_frame = 1;
return frame_size;
FFV1Context *s = avctx->priv_data;
int i, j;
- if (avctx->codec->decode && s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
- if (avctx->codec->decode && s->last_picture.data[0])
- avctx->release_buffer(avctx, &s->last_picture);
+ av_frame_unref(&s->last_picture);
for (j = 0; j < s->slice_count; j++) {
FFV1Context *fs = s->slice_context[j];
int flags;
int picture_number;
AVFrame picture, last_picture;
+
+ AVFrame *cur;
int plane_count;
int ac; // 1 = range coder <-> 0 = golomb rice
int ac_byte_count; // number of bytes used for AC coding
ps = get_symbol(c, state, 0);
if (ps == 1) {
- f->picture.interlaced_frame = 1;
- f->picture.top_field_first = 1;
+ f->cur->interlaced_frame = 1;
+ f->cur->top_field_first = 1;
} else if (ps == 2) {
- f->picture.interlaced_frame = 1;
- f->picture.top_field_first = 0;
+ f->cur->interlaced_frame = 1;
+ f->cur->top_field_first = 0;
} else if (ps == 3) {
- f->picture.interlaced_frame = 0;
+ f->cur->interlaced_frame = 0;
}
- f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0);
- f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0);
+ f->cur->sample_aspect_ratio.num = get_symbol(c, state, 0);
+ f->cur->sample_aspect_ratio.den = get_symbol(c, state, 0);
return 0;
}
const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & PIX_FMT_PLANAR)
? (c->bits_per_raw_sample > 8) + 1
: 4;
- AVFrame *const p = &f->picture;
+ AVFrame *const p = f->cur;
if (f->version > 2) {
if (decode_slice_header(f, fs) < 0) {
}
if ((ret = ffv1_init_slice_state(f, fs)) < 0)
return ret;
- if (f->picture.key_frame)
+ if (f->cur->key_frame)
ffv1_clear_slice_state(f, fs);
width = fs->slice_width;
height = fs->slice_height;
int buf_size = avpkt->size;
FFV1Context *f = avctx->priv_data;
RangeCoder *const c = &f->slice_context[0]->c;
- AVFrame *const p = &f->picture;
int i, ret;
uint8_t keystate = 128;
const uint8_t *buf_p;
+ AVFrame *const p = data;
- AVFrame *picture = data;
-
- /* release previously stored data */
- if (p->data[0])
- avctx->release_buffer(avctx, p);
+ f->cur = p;
ff_init_range_decoder(c, buf, buf_size);
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
p->key_frame = 0;
}
- p->reference = 3; //for error concealment
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
ff_init_range_decoder(&fs->c, buf_p, v);
} else
fs->c.bytestream_end = (uint8_t *)(buf_p + v);
+
+ fs->cur = p;
}
avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL,
for (j = 0; j < 4; j++) {
int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0;
int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
- dst[j] = f->picture.data[j] + f->picture.linesize[j] *
+ dst[j] = p->data[j] + p->linesize[j] *
(fs->slice_y >> sv) + (fs->slice_x >> sh);
src[j] = f->last_picture.data[j] +
f->last_picture.linesize[j] *
(fs->slice_y >> sv) + (fs->slice_x >> sh);
}
- av_image_copy(dst, f->picture.linesize, (const uint8_t **)src,
+ av_image_copy(dst, p->linesize, (const uint8_t **)src,
f->last_picture.linesize,
avctx->pix_fmt, fs->slice_width,
fs->slice_height);
f->picture_number++;
- *picture = *p;
- *got_frame = 1;
+ av_frame_unref(&f->last_picture);
+ if ((ret = av_frame_ref(&f->last_picture, p)) < 0)
+ return ret;
+ f->cur = NULL;
- FFSWAP(AVFrame, f->picture, f->last_picture);
+ *got_frame = 1;
return buf_size;
}
/* get output buffer */
frame->nb_samples = s->blocksize;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "avcodec.h"
#include "bytestream.h"
#include "get_bits.h"
+#include "internal.h"
typedef struct BlockInfo {
uint8_t *pos;
{
int buf_size = avpkt->size;
FlashSVContext *s = avctx->priv_data;
- int h_blocks, v_blocks, h_part, v_part, i, j;
+ int h_blocks, v_blocks, h_part, v_part, i, j, ret;
GetBitContext gb;
/* no supplementary picture */
s->image_width, s->image_height, s->block_width, s->block_height,
h_blocks, v_blocks, h_part, v_part);
- s->frame.reference = 3;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if (avctx->reget_buffer(avctx, &s->frame) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return -1;
+ return ret;
}
/* loop over all block columns */
s->diff_height = cur_blk_height;
if (8 * size > get_bits_left(&gb)) {
- avctx->release_buffer(avctx, &s->frame);
- s->frame.data[0] = NULL;
+ av_frame_unref(&s->frame);
return AVERROR_INVALIDDATA;
}
memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height);
}
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
if ((get_bits_count(&gb) / 8) != buf_size)
av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
FlashSVContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
/* release the frame if needed */
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
/* free the tmpblock */
av_free(s->tmpblock);
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
#include "mathops.h"
#define FLI_256_COLOR 4
bytestream2_init(&g2, buf, buf_size);
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
s->new_palette = 0;
}
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return buf_size;
}
bytestream2_init(&g2, buf, buf_size);
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
"and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return buf_size;
}
{
FlicDecodeContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
#include "huffman.h"
#include "bytestream.h"
#include "dsputil.h"
+#include "internal.h"
#define FPS_TAG MKTAG('F', 'P', 'S', 'x')
{
FrapsContext * const s = avctx->priv_data;
- avctx->coded_frame = &s->frame;
avctx->pix_fmt = AV_PIX_FMT_NONE; /* set in decode_frame */
s->avctx = avctx;
pix_fmt = version & 1 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUVJ420P;
if (avctx->pix_fmt != pix_fmt && f->data[0]) {
- avctx->release_buffer(avctx, f);
+ av_frame_unref(f);
}
avctx->pix_fmt = pix_fmt;
return AVERROR_INVALIDDATA;
}
- f->reference = 1;
- f->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
return AVERROR_INVALIDDATA;
}
- f->reference = 1;
- f->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
* Fraps v4 is virtually the same
*/
planes = 3;
- f->reference = 1;
- f->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
case 5:
/* Virtually the same as version 4, but is for RGB24 */
planes = 3;
- f->reference = 1;
- f->buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
break;
}
- *frame = *f;
+ if ((ret = av_frame_ref(frame, f)) < 0)
+ return ret;
*got_frame = 1;
return buf_size;
{
FrapsContext *s = (FrapsContext*)avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
av_freep(&s->tmpbuf);
return 0;
}
avctx->pix_fmt = AV_PIX_FMT_UYVY422;
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame)
- return AVERROR(ENOMEM);
-
return 0;
}
AVPacket *avpkt)
{
int field, ret;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = buf + avpkt->size;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
if (avpkt->size < avctx->width * 2 * avctx->height + 4 + 2*8) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small.\n");
return AVERROR_INVALIDDATA;
return AVERROR_INVALIDDATA;
}
- pic->reference = 0;
- if ((ret = ff_get_buffer(avctx, pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
*got_frame = 1;
- *(AVFrame*)data = *pic;
return avpkt->size;
}
-static av_cold int decode_close(AVCodecContext *avctx)
-{
- AVFrame *pic = avctx->coded_frame;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
- av_freep(&avctx->coded_frame);
-
- return 0;
-}
-
AVCodec ff_frwu_decoder = {
.name = "frwu",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FRWU,
.init = decode_init,
- .close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Forward Uncompressed"),
/* get output buffer */
frame->nb_samples = avpkt->size * 2;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
frame->nb_samples = FRAME_LEN;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = out_samples;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#define GCE_DISPOSAL_RESTORE 3
typedef struct GifState {
- AVFrame picture;
int screen_width;
int screen_height;
int bits_per_pixel;
static const uint8_t gif87a_sig[6] = "GIF87a";
static const uint8_t gif89a_sig[6] = "GIF89a";
-static int gif_read_image(GifState *s)
+static int gif_read_image(GifState *s, AVFrame *frame)
{
int left, top, width, height, bits_per_pixel, code_size, flags;
int is_interleaved, has_local_palette, y, pass, y1, linesize, n, i;
s->bytestream_end - s->bytestream, FF_LZW_GIF);
/* read all the image */
- linesize = s->picture.linesize[0];
- ptr1 = s->picture.data[0] + top * linesize + left;
+ linesize = frame->linesize[0];
+ ptr1 = frame->data[0] + top * linesize + left;
ptr = ptr1;
pass = 0;
y1 = 0;
return 0;
}
-static int gif_parse_next_image(GifState *s)
+static int gif_parse_next_image(GifState *s, AVFrame *frame)
{
while (s->bytestream < s->bytestream_end) {
int code = bytestream_get_byte(&s->bytestream);
switch (code) {
case ',':
- return gif_read_image(s);
+ return gif_read_image(s, frame);
case '!':
if ((ret = gif_read_extension(s)) < 0)
return ret;
s->avctx = avctx;
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame= &s->picture;
- s->picture.data[0] = NULL;
ff_lzw_decode_open(&s->lzw);
return 0;
}
return ret;
avcodec_set_dimensions(avctx, s->screen_width, s->screen_height);
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
- if ((ret = ff_get_buffer(avctx, &s->picture)) < 0) {
+ if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- s->image_palette = (uint32_t *)s->picture.data[1];
- ret = gif_parse_next_image(s);
+ s->image_palette = (uint32_t *)picture->data[1];
+ ret = gif_parse_next_image(s, picture);
if (ret < 0)
return ret;
- *picture = s->picture;
*got_frame = 1;
return s->bytestream - buf;
}
GifState *s = avctx->priv_data;
ff_lzw_decode_close(&s->lzw);
- if(s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
return 0;
}
/* get output buffer */
frame->nb_samples = avctx->frame_size;
- if ((res = ff_get_buffer(avctx, frame)) < 0) {
+ if ((res = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res;
}
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = 1;
}
if(s->mb_intra){
- s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
+ s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
goto intra;
}
//set motion vectors
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
s->mv[0][0][1] = h->current_mv_y * 2;
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
- *pict = s->current_picture_ptr->f;
- ff_print_debug_info(s, pict);
+ if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->current_picture_ptr);
*got_frame = 1;
const int wrap = s->b8_stride;
const int xy = s->block_index[0];
- s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped;
+ s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
if(s->mv_type != MV_TYPE_8X8){
int motion_x, motion_y;
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
}
- s->current_picture.f.ref_index[0][4*mb_xy ] =
- s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
- s->current_picture.f.ref_index[0][4*mb_xy + 2] =
- s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
+ s->current_picture.ref_index[0][4*mb_xy ] =
+ s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
+ s->current_picture.ref_index[0][4*mb_xy + 2] =
+ s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
}
/* no update if 8X8 because it has been done during parsing */
- s->current_picture.f.motion_val[0][xy][0] = motion_x;
- s->current_picture.f.motion_val[0][xy][1] = motion_y;
- s->current_picture.f.motion_val[0][xy + 1][0] = motion_x;
- s->current_picture.f.motion_val[0][xy + 1][1] = motion_y;
- s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x;
- s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y;
- s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x;
- s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y;
+ s->current_picture.motion_val[0][xy][0] = motion_x;
+ s->current_picture.motion_val[0][xy][1] = motion_y;
+ s->current_picture.motion_val[0][xy + 1][0] = motion_x;
+ s->current_picture.motion_val[0][xy + 1][1] = motion_y;
+ s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
+ s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
+ s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
+ s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
}
if(s->encoding){ //FIXME encoding MUST be cleaned up
if (s->mv_type == MV_TYPE_8X8)
- s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
else if(s->mb_intra)
- s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA;
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
else
- s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
+ s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
}
}
Diag Top
Left Center
*/
- if (!IS_SKIP(s->current_picture.f.mb_type[xy])) {
+ if (!IS_SKIP(s->current_picture.mb_type[xy])) {
qp_c= s->qscale;
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
if(s->mb_y){
int qp_dt, qp_tt, qp_tc;
- if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride]))
+ if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
qp_tt=0;
else
- qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride];
+ qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
if(qp_c)
qp_tc= qp_c;
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
if(s->mb_x){
- if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride]))
+ if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
qp_dt= qp_tt;
else
- qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride];
+ qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
if(qp_dt){
const int chroma_qp= s->chroma_qscale_table[qp_dt];
if(s->mb_x){
int qp_lc;
- if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1]))
+ if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
qp_lc= qp_c;
else
- qp_lc = s->current_picture.f.qscale_table[xy - 1];
+ qp_lc = s->current_picture.qscale_table[xy - 1];
if(qp_lc){
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride;
- mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block];
+ mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1];
/* special case for first (slice) line */
if (buf_size == 0) {
/* special case for last picture */
if (s->low_delay==0 && s->next_picture_ptr) {
- *pict = s->next_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
+ return ret;
s->next_picture_ptr= NULL;
*got_frame = 1;
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = s->current_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->current_picture_ptr);
} else if (s->last_picture_ptr != NULL) {
- *pict = s->last_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->last_picture_ptr);
}
if(s->last_picture_ptr || s->low_delay){
*got_frame = 1;
- ff_print_debug_info(s, pict);
}
#ifdef PRINT_FRAME_TIME
* practice then correct remapping should be added. */
if (ref >= h->ref_count[0])
ref = 0;
- fill_rectangle(&h->cur_pic.f.ref_index[0][4 * h->mb_xy],
+ fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
2, 2, 2, ref, 1);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
}
}
-static void free_frame_buffer(H264Context *h, Picture *pic)
-{
- ff_thread_release_buffer(h->avctx, &pic->f);
- av_freep(&pic->f.hwaccel_picture_private);
-}
-
-static void free_picture(H264Context *h, Picture *pic)
+static void unref_picture(H264Context *h, Picture *pic)
{
+ int off = offsetof(Picture, tf) + sizeof(pic->tf);
int i;
- if (pic->f.data[0])
- free_frame_buffer(h, pic);
+ if (!pic->f.data[0])
+ return;
+
+ ff_thread_release_buffer(h->avctx, &pic->tf);
+ av_buffer_unref(&pic->hwaccel_priv_buf);
- av_freep(&pic->qscale_table_base);
- pic->f.qscale_table = NULL;
- av_freep(&pic->mb_type_base);
- pic->f.mb_type = NULL;
+ av_buffer_unref(&pic->qscale_table_buf);
+ av_buffer_unref(&pic->mb_type_buf);
for (i = 0; i < 2; i++) {
- av_freep(&pic->motion_val_base[i]);
- av_freep(&pic->f.ref_index[i]);
- pic->f.motion_val[i] = NULL;
+ av_buffer_unref(&pic->motion_val_buf[i]);
+ av_buffer_unref(&pic->ref_index_buf[i]);
}
+
+ memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
}
static void release_unused_pictures(H264Context *h, int remove_current)
int i;
/* release non reference frames */
- for (i = 0; i < h->picture_count; i++) {
- if (h->DPB[i].f.data[0] && !h->DPB[i].f.reference &&
- (!h->DPB[i].owner2 || h->DPB[i].owner2 == h) &&
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ if (h->DPB[i].f.data[0] && !h->DPB[i].reference &&
(remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
- free_frame_buffer(h, &h->DPB[i]);
+ unref_picture(h, &h->DPB[i]);
}
}
}
+static int ref_picture(H264Context *h, Picture *dst, Picture *src)
+{
+ int ret, i;
+
+ av_assert0(!dst->f.buf[0]);
+ av_assert0(src->f.buf[0]);
+
+ src->tf.f = &src->f;
+ dst->tf.f = &dst->f;
+ ret = ff_thread_ref_frame(&dst->tf, &src->tf);
+ if (ret < 0)
+ goto fail;
+
+
+ dst->qscale_table_buf = av_buffer_ref(src->qscale_table_buf);
+ dst->mb_type_buf = av_buffer_ref(src->mb_type_buf);
+ if (!dst->qscale_table_buf || !dst->mb_type_buf)
+ goto fail;
+ dst->qscale_table = src->qscale_table;
+ dst->mb_type = src->mb_type;
+
+ for (i = 0; i < 2; i ++) {
+ dst->motion_val_buf[i] = av_buffer_ref(src->motion_val_buf[i]);
+ dst->ref_index_buf[i] = av_buffer_ref(src->ref_index_buf[i]);
+ if (!dst->motion_val_buf[i] || !dst->ref_index_buf[i])
+ goto fail;
+ dst->motion_val[i] = src->motion_val[i];
+ dst->ref_index[i] = src->ref_index[i];
+ }
+
+ if (src->hwaccel_picture_private) {
+ dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
+ if (!dst->hwaccel_priv_buf)
+ goto fail;
+ dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
+ }
+
+ for (i = 0; i < 2; i++)
+ dst->field_poc[i] = src->field_poc[i];
+
+ memcpy(dst->ref_poc, src->ref_poc, sizeof(src->ref_poc));
+ memcpy(dst->ref_count, src->ref_count, sizeof(src->ref_count));
+
+ dst->poc = src->poc;
+ dst->frame_num = src->frame_num;
+ dst->mmco_reset = src->mmco_reset;
+ dst->pic_id = src->pic_id;
+ dst->long_ref = src->long_ref;
+ dst->mbaff = src->mbaff;
+ dst->field_picture = src->field_picture;
+ dst->needs_realloc = src->needs_realloc;
+ dst->reference = src->reference;
+
+ return 0;
+fail:
+ unref_picture(h, dst);
+ return ret;
+}
+
+
static int alloc_scratch_buffers(H264Context *h, int linesize)
{
int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
return 0;
}
-static int alloc_picture(H264Context *h, Picture *pic)
+static int init_table_pools(H264Context *h)
{
const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
const int mb_array_size = h->mb_stride * h->mb_height;
const int b4_stride = h->mb_width * 4 + 1;
const int b4_array_size = b4_stride * h->mb_height * 4;
+
+ h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
+ av_buffer_allocz);
+ h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
+ sizeof(uint32_t), av_buffer_allocz);
+ h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
+ sizeof(int16_t), av_buffer_allocz);
+ h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
+
+ if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
+ !h->ref_index_pool) {
+ av_buffer_pool_uninit(&h->qscale_table_pool);
+ av_buffer_pool_uninit(&h->mb_type_pool);
+ av_buffer_pool_uninit(&h->motion_val_pool);
+ av_buffer_pool_uninit(&h->ref_index_pool);
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static int alloc_picture(H264Context *h, Picture *pic)
+{
int i, ret = 0;
av_assert0(!pic->f.data[0]);
if (h->avctx->hwaccel) {
const AVHWAccel *hwaccel = h->avctx->hwaccel;
- av_assert0(!pic->f.hwaccel_picture_private);
+ av_assert0(!pic->hwaccel_picture_private);
if (hwaccel->priv_data_size) {
- pic->f.hwaccel_picture_private = av_mallocz(hwaccel->priv_data_size);
- if (!pic->f.hwaccel_picture_private)
+ pic->hwaccel_priv_buf = av_buffer_allocz(hwaccel->priv_data_size);
+ if (!pic->hwaccel_priv_buf)
return AVERROR(ENOMEM);
+ pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
}
}
- ret = ff_thread_get_buffer(h->avctx, &pic->f);
+ pic->tf.f = &pic->f;
+ ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
+ AV_GET_BUFFER_FLAG_REF : 0);
if (ret < 0)
goto fail;
h->linesize = pic->f.linesize[0];
h->uvlinesize = pic->f.linesize[1];
- if (pic->f.qscale_table == NULL) {
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->qscale_table_base,
- (big_mb_num + h->mb_stride) * sizeof(uint8_t),
- fail)
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->mb_type_base,
- (big_mb_num + h->mb_stride) * sizeof(uint32_t),
- fail)
- pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
- pic->f.qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
+ if (!h->qscale_table_pool) {
+ ret = init_table_pools(h);
+ if (ret < 0)
+ goto fail;
+ }
- for (i = 0; i < 2; i++) {
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->motion_val_base[i],
- 2 * (b4_array_size + 4) * sizeof(int16_t),
- fail)
- pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
- FF_ALLOCZ_OR_GOTO(h->avctx, pic->f.ref_index[i],
- 4 * mb_array_size * sizeof(uint8_t), fail)
- }
- pic->f.motion_subsample_log2 = 2;
+ pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
+ pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
+ if (!pic->qscale_table_buf || !pic->mb_type_buf)
+ goto fail;
- pic->f.qstride = h->mb_stride;
- }
+ pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
+ pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
+
+ for (i = 0; i < 2; i++) {
+ pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
+ pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
+ if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
+ goto fail;
- pic->owner2 = h;
+ pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
+ pic->ref_index[i] = pic->ref_index_buf[i]->data;
+ }
+ pic->f.motion_subsample_log2 = 2;
return 0;
fail:
- free_frame_buffer(h, pic);
+ unref_picture(h, pic);
return (ret < 0) ? ret : AVERROR(ENOMEM);
}
{
if (pic->f.data[0] == NULL)
return 1;
- if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
- if (!pic->owner2 || pic->owner2 == h)
- return 1;
+ if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
+ return 1;
return 0;
}
{
int i;
- for (i = h->picture_range_start; i < h->picture_range_end; i++) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (pic_is_unused(h, &h->DPB[i]))
break;
}
- if (i == h->picture_range_end)
+ if (i == MAX_PICTURE_COUNT)
return AVERROR_INVALIDDATA;
if (h->DPB[i].needs_realloc) {
h->DPB[i].needs_realloc = 0;
- free_picture(h, &h->DPB[i]);
- avcodec_get_frame_defaults(&h->DPB[i].f);
+ unref_picture(h, &h->DPB[i]);
}
return i;
// Error resilience puts the current picture in the ref list.
// Don't try to wait on these as it will cause a deadlock.
// Fields can wait on each other, though.
- if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
- (ref->f.reference & 3) != h->picture_structure) {
+ if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
+ (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
if (refs[0][ref_n] < 0)
nrefs[0] += 1;
int ref_n = h->ref_cache[1][scan8[n]];
Picture *ref = &h->ref_list[1][ref_n];
- if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
- (ref->f.reference & 3) != h->picture_structure) {
+ if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
+ (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
if (refs[1][ref_n] < 0)
nrefs[1] += 1;
static void await_references(H264Context *h)
{
const int mb_xy = h->mb_xy;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
int refs[2][48];
int nrefs[2] = { 0 };
int ref, list;
int row = refs[list][ref];
if (row >= 0) {
Picture *ref_pic = &h->ref_list[list][ref];
- int ref_field = ref_pic->f.reference - 1;
+ int ref_field = ref_pic->reference - 1;
int ref_field_picture = ref_pic->field_picture;
int pic_height = 16 * h->mb_height >> ref_field_picture;
nrefs[list]--;
if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1) - !(row & 1),
pic_height - 1),
1);
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1), pic_height - 1),
0);
} else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN(row * 2 + ref_field,
pic_height - 1),
0);
} else if (FIELD_PICTURE) {
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1),
ref_field);
} else {
- ff_thread_await_progress(&ref_pic->f,
+ ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1),
0);
}
ysh = 3 - (chroma_idc == 2 /* yuv422 */);
if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) {
// chroma offset when predicting from a field of opposite parity
- my += 2 * ((h->mb_y & 1) - (pic->f.reference - 1));
+ my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
}
av_freep(&h->mb2b_xy);
av_freep(&h->mb2br_xy);
- if (free_rbsp) {
- for (i = 0; i < h->picture_count && !h->avctx->internal->is_copy; i++)
- free_picture(h, &h->DPB[i]);
+ av_buffer_pool_uninit(&h->qscale_table_pool);
+ av_buffer_pool_uninit(&h->mb_type_pool);
+ av_buffer_pool_uninit(&h->motion_val_pool);
+ av_buffer_pool_uninit(&h->ref_index_pool);
+
+ if (free_rbsp && h->DPB) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
+ unref_picture(h, &h->DPB[i]);
av_freep(&h->DPB);
- h->picture_count = 0;
} else if (h->DPB) {
- for (i = 0; i < h->picture_count; i++)
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
h->DPB[i].needs_realloc = 1;
}
init_dequant_tables(h);
if (!h->DPB) {
- h->picture_count = MAX_PICTURE_COUNT * FFMAX(1, h->avctx->thread_count);
- h->DPB = av_mallocz_array(h->picture_count, sizeof(*h->DPB));
+ h->DPB = av_mallocz_array(MAX_PICTURE_COUNT, sizeof(*h->DPB));
if (!h->DPB)
return AVERROR(ENOMEM);
- for (i = 0; i < h->picture_count; i++)
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
avcodec_get_frame_defaults(&h->DPB[i].f);
avcodec_get_frame_defaults(&h->cur_pic.f);
}
common_init(h);
h->picture_structure = PICT_FRAME;
- h->picture_range_start = 0;
- h->picture_range_end = MAX_PICTURE_COUNT;
h->slice_context_count = 1;
h->workaround_bugs = avctx->workaround_bugs;
h->flags = avctx->flags;
h->low_delay = 0;
}
+ avctx->internal->allocate_progress = 1;
+
return 0;
}
#undef REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \
((pic && pic >= old_ctx->DPB && \
- pic < old_ctx->DPB + old_ctx->picture_count) ? \
+ pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \
&new_ctx->DPB[pic - old_ctx->DPB] : NULL)
static void copy_picture_range(Picture **to, Picture **from, int count,
for (i = 0; i < count; i++) {
assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
IN_RANGE(from[i], old_base->DPB,
- sizeof(Picture) * old_base->picture_count) ||
+ sizeof(Picture) * MAX_PICTURE_COUNT) ||
!from[i]));
to[i] = REBASE_PICTURE(from[i], new_base, old_base);
}
H264Context *h = dst->priv_data, *h1 = src->priv_data;
int inited = h->context_initialized, err = 0;
int context_reinitialized = 0;
- int i;
+ int i, ret;
if (dst == src || !h1->context_initialized)
return 0;
memset(&h->me, 0, sizeof(h->me));
h->context_initialized = 0;
- h->picture_range_start += MAX_PICTURE_COUNT;
- h->picture_range_end += MAX_PICTURE_COUNT;
+ memset(&h->cur_pic, 0, sizeof(h->cur_pic));
+ avcodec_get_frame_defaults(&h->cur_pic.f);
+ h->cur_pic.tf.f = &h->cur_pic.f;
h->avctx = dst;
h->DPB = NULL;
- h->cur_pic.f.extended_data = h->cur_pic.f.data;
+ h->qscale_table_pool = NULL;
+ h->mb_type_pool = NULL;
+ h->ref_index_pool = NULL;
+ h->motion_val_pool = NULL;
if (ff_h264_alloc_tables(h) < 0) {
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
h->data_partitioning = h1->data_partitioning;
h->low_delay = h1->low_delay;
- memcpy(h->DPB, h1->DPB, h1->picture_count * sizeof(*h1->DPB));
-
- // reset s->picture[].f.extended_data to s->picture[].f.data
- for (i = 0; i < h->picture_count; i++)
- h->DPB[i].f.extended_data = h->DPB[i].f.data;
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ unref_picture(h, &h->DPB[i]);
+ if (h1->DPB[i].f.data[0] &&
+ (ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
+ return ret;
+ }
h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
- h->cur_pic = h1->cur_pic;
- h->cur_pic.f.extended_data = h->cur_pic.f.data;
+ unref_picture(h, &h->cur_pic);
+ if ((ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
+ return ret;
h->workaround_bugs = h1->workaround_bugs;
h->low_delay = h1->low_delay;
}
pic = &h->DPB[i];
- pic->f.reference = h->droppable ? 0 : h->picture_structure;
+ pic->reference = h->droppable ? 0 : h->picture_structure;
pic->f.coded_picture_number = h->coded_picture_number++;
pic->field_picture = h->picture_structure != PICT_FRAME;
/*
return ret;
h->cur_pic_ptr = pic;
- h->cur_pic = *h->cur_pic_ptr;
- h->cur_pic.f.extended_data = h->cur_pic.f.data;
+ unref_picture(h, &h->cur_pic);
+ if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
+ return ret;
ff_er_frame_start(&h->er);
* get released even with set reference, besides SVQ3 and others do not
* mark frames as reference later "naturally". */
if (h->avctx->codec_id != AV_CODEC_ID_SVQ3)
- h->cur_pic_ptr->f.reference = 0;
+ h->cur_pic_ptr->reference = 0;
h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
int i, pics, out_of_order, out_idx;
int invalid = 0, cnt = 0;
- h->cur_pic_ptr->f.qscale_type = FF_QSCALE_TYPE_H264;
h->cur_pic_ptr->f.pict_type = h->pict_type;
if (h->next_output_pic)
assert(pics <= MAX_DELAYED_PIC_COUNT);
h->delayed_pic[pics++] = cur;
- if (cur->f.reference == 0)
- cur->f.reference = DELAYED_PIC_REF;
+ if (cur->reference == 0)
+ cur->reference = DELAYED_PIC_REF;
/* Frame reordering. This code takes pictures from coding order and sorts
* them by their incremental POC value into display order. It supports POC
}
if (pics > h->avctx->has_b_frames) {
- out->f.reference &= ~DELAYED_PIC_REF;
+ out->reference &= ~DELAYED_PIC_REF;
// for frame threading, the owner must be the second field's thread or
// else the first thread can release the picture and reuse it unsafely
- out->owner2 = h;
for (i = out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i + 1];
}
void ff_h264_hl_decode_mb(H264Context *h)
{
const int mb_xy = h->mb_xy;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
if (CHROMA444) {
h->prev_interlaced_frame = 1;
idr(h);
if (h->cur_pic_ptr)
- h->cur_pic_ptr->f.reference = 0;
+ h->cur_pic_ptr->reference = 0;
h->first_field = 0;
memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
if (h->delayed_pic[i])
- h->delayed_pic[i]->f.reference = 0;
+ h->delayed_pic[i]->reference = 0;
h->delayed_pic[i] = NULL;
}
flush_change(h);
- for (i = 0; i < h->picture_count; i++) {
- if (h->DPB[i].f.data[0])
- free_frame_buffer(h, &h->DPB[i]);
- }
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
+ unref_picture(h, &h->DPB[i]);
h->cur_pic_ptr = NULL;
+ unref_picture(h, &h->cur_pic);
h->mb_x = h->mb_y = 0;
h->mb_y = 0;
if (!in_setup && !h->droppable)
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
if (CONFIG_H264_VDPAU_DECODER &&
h0->current_slice = 0;
if (!h0->first_field) {
- if (h->cur_pic_ptr && !h->droppable &&
- h->cur_pic_ptr->owner2 == h) {
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
+ if (h->cur_pic_ptr && !h->droppable) {
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
h->cur_pic_ptr = NULL;
if (h0->first_field) {
assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.data[0]);
- assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
-
- /* Mark old field/frame as completed */
- if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) {
- ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
- last_pic_structure == PICT_BOTTOM_FIELD);
- }
+ assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
/* Previous field is unmatched. Don't display it, but let it
* remain for reference if marked as such. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
- ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
+ ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD);
}
} else {
* pair. Throw away previous field except for reference
* purposes. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
- ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
+ ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD);
}
} else {
h->droppable = last_pic_droppable;
return AVERROR_PATCHWELCOME;
}
-
- /* Take ownership of this buffer. Note that if another thread owned
- * the first field of this buffer, we're not operating on that pointer,
- * so the original thread is still responsible for reporting progress
- * on that first field (or if that was us, we just did that above).
- * By taking ownership, we assign responsibility to ourselves to
- * report progress on the second field. */
- h0->cur_pic_ptr->owner2 = h0;
}
}
}
h->prev_frame_num++;
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
h->cur_pic_ptr->frame_num = h->prev_frame_num;
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 0);
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 1);
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
h->avctx->err_recognition & AV_EF_EXPLODE)
return ret;
if (h0->first_field) {
assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.data[0]);
- assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
+ assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
for (i = 0; i < 16; i++) {
id_list[i] = 60;
- if (h->ref_list[j][i].f.data[0]) {
+ if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) {
int k;
- uint8_t *base = h->ref_list[j][i].f.base[0];
+ AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer;
for (k = 0; k < h->short_ref_count; k++)
- if (h->short_ref[k]->f.base[0] == base) {
+ if (h->short_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = k;
break;
}
for (k = 0; k < h->long_ref_count; k++)
- if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) {
+ if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = h->short_ref_count + k;
break;
}
ref2frm[1] = -1;
for (i = 0; i < 16; i++)
ref2frm[i + 2] = 4 * id_list[i] +
- (h->ref_list[j][i].f.reference & 3);
+ (h->ref_list[j][i].reference & 3);
ref2frm[18 + 0] =
ref2frm[18 + 1] = -1;
for (i = 16; i < 48; i++)
ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
- (h->ref_list[j][i].f.reference & 3);
+ (h->ref_list[j][i].reference & 3);
}
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
const int b8_xy = 4 * top_xy + 2;
int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
- AV_COPY128(mv_dst - 1 * 8, h->cur_pic.f.motion_val[list][b_xy + 0]);
+ AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
ref_cache[0 - 1 * 8] =
- ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 0]];
+ ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 0]];
ref_cache[2 - 1 * 8] =
- ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 1]];
+ ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 1]];
} else {
AV_ZERO128(mv_dst - 1 * 8);
AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
const int b8_xy = 4 * left_xy[LTOP] + 1;
int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
- AV_COPY32(mv_dst - 1 + 0, h->cur_pic.f.motion_val[list][b_xy + b_stride * 0]);
- AV_COPY32(mv_dst - 1 + 8, h->cur_pic.f.motion_val[list][b_xy + b_stride * 1]);
- AV_COPY32(mv_dst - 1 + 16, h->cur_pic.f.motion_val[list][b_xy + b_stride * 2]);
- AV_COPY32(mv_dst - 1 + 24, h->cur_pic.f.motion_val[list][b_xy + b_stride * 3]);
+ AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
+ AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
+ AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
+ AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
ref_cache[-1 + 0] =
- ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 0]];
+ ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
ref_cache[-1 + 16] =
- ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 1]];
+ ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
} else {
AV_ZERO32(mv_dst - 1 + 0);
AV_ZERO32(mv_dst - 1 + 8);
}
{
- int8_t *ref = &h->cur_pic.f.ref_index[list][4 * mb_xy];
+ int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
}
{
- int16_t(*mv_src)[2] = &h->cur_pic.f.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
+ int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
if (FRAME_MBAFF) {
- const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]);
+ const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if (h->mb_y & 1) {
if (left_mb_field_flag != curr_mb_field_flag)
} else {
if (curr_mb_field_flag)
top_xy += h->mb_stride &
- (((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1);
+ (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
if (left_mb_field_flag != curr_mb_field_flag)
left_xy[LBOT] += h->mb_stride;
}
* This is a conservative estimate: could also check beta_offset
* and more accurate chroma_qp. */
int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
- int qp = h->cur_pic.f.qscale_table[mb_xy];
+ int qp = h->cur_pic.qscale_table[mb_xy];
if (qp <= qp_thresh &&
(left_xy[LTOP] < 0 ||
- ((qp + h->cur_pic.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
+ ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
(top_xy < 0 ||
- ((qp + h->cur_pic.f.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
+ ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
if (!FRAME_MBAFF)
return 1;
if ((left_xy[LTOP] < 0 ||
- ((qp + h->cur_pic.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
+ ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
(top_xy < h->mb_stride ||
- ((qp + h->cur_pic.f.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
+ ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
return 1;
}
}
- top_type = h->cur_pic.f.mb_type[top_xy];
- left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]];
- left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]];
+ top_type = h->cur_pic.mb_type[top_xy];
+ left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
+ left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
if (h->deblocking_filter == 2) {
if (h->slice_table[top_xy] != h->slice_num)
top_type = 0;
int mb_xy, mb_type;
mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
h->slice_num = h->slice_table[mb_xy];
- mb_type = h->cur_pic.f.mb_type[mb_xy];
+ mb_type = h->cur_pic.mb_type[mb_xy];
h->list_count = h->list_counts[mb_xy];
if (FRAME_MBAFF)
uvlinesize, 0);
if (fill_filter_caches(h, mb_type))
continue;
- h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mb_xy]);
- h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mb_xy]);
+ h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]);
+ h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]);
if (FRAME_MBAFF) {
ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
{
const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
- h->cur_pic.f.mb_type[mb_xy - 1] :
+ h->cur_pic.mb_type[mb_xy - 1] :
(h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ?
- h->cur_pic.f.mb_type[mb_xy - h->mb_stride] : 0;
+ h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
}
if (h->droppable)
return;
- ff_thread_report_progress(&h->cur_pic_ptr->f, top + height - 1,
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
h->picture_structure == PICT_BOTTOM_FIELD);
}
end:
/* clean up */
- if (h->cur_pic_ptr && h->cur_pic_ptr->owner2 == h &&
- !h->droppable) {
- ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
+ if (h->cur_pic_ptr && !h->droppable) {
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
H264Context *h = avctx->priv_data;
AVFrame *pict = data;
int buf_index = 0;
+ int ret;
h->flags = avctx->flags;
h->delayed_pic[i] = h->delayed_pic[i + 1];
if (out) {
+ if ((ret = av_frame_ref(pict, &out->f)) < 0)
+ return ret;
*got_frame = 1;
- *pict = out->f;
}
return buf_index;
/* Wait for second field. */
*got_frame = 0;
} else {
+ if ((ret = av_frame_ref(pict, &h->next_output_pic->f)) < 0)
+ return ret;
*got_frame = 1;
- *pict = h->next_output_pic->f;
}
}
ff_h264_free_context(h);
- if (h->DPB && !h->avctx->internal->is_copy) {
- for (i = 0; i < h->picture_count; i++) {
- free_picture(h, &h->DPB[i]);
+ if (h->DPB) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ unref_picture(h, &h->DPB[i]);
}
}
av_freep(&h->DPB);
+ unref_picture(h, &h->cur_pic);
+
return 0;
}
Picture *DPB;
Picture *cur_pic_ptr;
Picture cur_pic;
- int picture_count;
- int picture_range_start, picture_range_end;
int pixel_shift; ///< 0 for 8-bit H264, 1 for high-bit-depth H264
int chroma_qp[2]; // QPc
uint8_t *bipred_scratchpad;
uint8_t *edge_emu_buffer;
int16_t *dc_val_base;
+
+ AVBufferPool *qscale_table_pool;
+ AVBufferPool *mb_type_pool;
+ AVBufferPool *motion_val_pool;
+ AVBufferPool *ref_index_pool;
} H264Context;
extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM + 1]; ///< One chroma qp table for each supported bit depth (8, 9, 10).
int b_xy, int b8_xy,
int mb_type, int list)
{
- int16_t(*mv_dst)[2] = &h->cur_pic.f.motion_val[list][b_xy];
+ int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
int16_t(*mv_src)[2] = &h->mv_cache[list][scan8[0]];
AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
}
{
- int8_t *ref_index = &h->cur_pic.f.ref_index[list][b8_xy];
+ int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
int8_t *ref_cache = h->ref_cache[list];
ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
if (USES_LIST(mb_type, 0)) {
write_back_motion_list(h, b_stride, b_xy, b8_xy, mb_type, 0);
} else {
- fill_rectangle(&h->cur_pic.f.ref_index[0][b8_xy],
+ fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
}
if (USES_LIST(mb_type, 1))
unsigned long ctx = 0;
- ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
- ctx += (h->cur_pic.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num);
+ ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
+ ctx += (h->cur_pic.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num);
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
}
mba_xy = mb_xy - 1;
if( (mb_y&1)
&& h->slice_table[mba_xy] == h->slice_num
- && MB_FIELD == !!IS_INTERLACED( h->cur_pic.f.mb_type[mba_xy] ) )
+ && MB_FIELD == !!IS_INTERLACED( h->cur_pic.mb_type[mba_xy] ) )
mba_xy += h->mb_stride;
if( MB_FIELD ){
mbb_xy = mb_xy - h->mb_stride;
if( !(mb_y&1)
&& h->slice_table[mbb_xy] == h->slice_num
- && IS_INTERLACED( h->cur_pic.f.mb_type[mbb_xy] ) )
+ && IS_INTERLACED( h->cur_pic.mb_type[mbb_xy] ) )
mbb_xy -= h->mb_stride;
}else
mbb_xy = mb_x + (mb_y-1)*h->mb_stride;
mbb_xy = mb_xy - (h->mb_stride << FIELD_PICTURE);
}
- if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP(h->cur_pic.f.mb_type[mba_xy] ))
+ if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP(h->cur_pic.mb_type[mba_xy] ))
ctx++;
- if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP(h->cur_pic.f.mb_type[mbb_xy] ))
+ if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP(h->cur_pic.mb_type[mbb_xy] ))
ctx++;
if( h->slice_type_nos == AV_PICTURE_TYPE_B )
/* read skip flags */
if( skip ) {
if( FRAME_MBAFF && (h->mb_y&1)==0 ){
- h->cur_pic.f.mb_type[mb_xy] = MB_TYPE_SKIP;
+ h->cur_pic.mb_type[mb_xy] = MB_TYPE_SKIP;
h->next_mb_skipped = decode_cabac_mb_skip( h, h->mb_x, h->mb_y+1 );
if(!h->next_mb_skipped)
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
h->cbp_table[mb_xy] = 0xf7ef;
h->chroma_pred_mode_table[mb_xy] = 0;
// In deblocking, the quantizer is 0
- h->cur_pic.f.qscale_table[mb_xy] = 0;
+ h->cur_pic.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
- h->cur_pic.f.mb_type[mb_xy] = mb_type;
+ h->cur_pic.mb_type[mb_xy] = mb_type;
h->last_qscale_diff = 0;
return 0;
}
AV_WN32A(&nnz_cache[4+8*10], top_empty);
}
}
- h->cur_pic.f.mb_type[mb_xy] = mb_type;
+ h->cur_pic.mb_type[mb_xy] = mb_type;
if( cbp || IS_INTRA16x16( mb_type ) ) {
const uint8_t *scan, *scan8x8;
h->last_qscale_diff = 0;
}
- h->cur_pic.f.qscale_table[mb_xy] = h->qscale;
+ h->cur_pic.qscale_table[mb_xy] = h->qscale;
write_back_non_zero_count(h);
return 0;
skip_bits_long(&h->gb, mb_size);
// In deblocking, the quantizer is 0
- h->cur_pic.f.qscale_table[mb_xy] = 0;
+ h->cur_pic.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
- h->cur_pic.f.mb_type[mb_xy] = mb_type;
+ h->cur_pic.mb_type[mb_xy] = mb_type;
return 0;
}
}
h->cbp=
h->cbp_table[mb_xy]= cbp;
- h->cur_pic.f.mb_type[mb_xy] = mb_type;
+ h->cur_pic.mb_type[mb_xy] = mb_type;
if(cbp || IS_INTRA16x16(mb_type)){
int i4x4, i8x8, chroma_idx;
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
}
- h->cur_pic.f.qscale_table[mb_xy] = h->qscale;
+ h->cur_pic.qscale_table[mb_xy] = h->qscale;
write_back_non_zero_count(h);
return 0;
poc= (poc&~3) + rfield + 1;
for(j=start; j<end; j++){
- if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) {
+ if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference & 3) == poc) {
int cur_ref= mbafi ? (j-16)^field : j;
if (ref1->mbaff)
map[list][2 * old_ref + (rfield^field) + 16] = cur_ref;
Picture * const cur = h->cur_pic_ptr;
int list, j, field;
int sidx= (h->picture_structure&1)^1;
- int ref1sidx = (ref1->f.reference&1)^1;
+ int ref1sidx = (ref1->reference&1)^1;
for(list=0; list<2; list++){
cur->ref_count[sidx][list] = h->ref_count[list];
for(j=0; j<h->ref_count[list]; j++)
- cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3);
+ cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference & 3);
}
if(h->picture_structure == PICT_FRAME){
int *col_poc = h->ref_list[1]->field_poc;
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
ref1sidx=sidx= h->col_parity;
- } else if (!(h->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
- h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3;
+ } else if (!(h->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
+ h->col_fieldoff = 2 * h->ref_list[1][0].reference - 3;
}
if (h->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
{
- int ref_field = ref->f.reference - 1;
+ int ref_field = ref->reference - 1;
int ref_field_picture = ref->field_picture;
int ref_height = 16*h->mb_height >> ref_field_picture;
//FIXME it can be safe to access mb stuff
//even if pixels aren't deblocked yet
- ff_thread_await_progress(&ref->f,
+ ff_thread_await_progress(&ref->tf,
FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1),
ref_field_picture && ref_field);
}
int mv[2];
int list;
- assert(h->ref_list[1][0].f.reference & 3);
+ assert(h->ref_list[1][0].reference & 3);
await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
return;
}
- if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
+ if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (h->mb_y&~1) + h->col_parity;
mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = h->mb_y&~1;
mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
- mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
- mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + h->mb_stride];
+ mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
+ mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
b8_stride = 2+4*h->mb_stride;
b4_stride *= 6;
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
}else{ // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
- mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
+ mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
- l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
- l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
- l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
- l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
+ l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
+ l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
+ l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
+ l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
if(!b8_stride){
if(h->mb_y&1){
l1ref0 += 2;
unsigned int sub_mb_type;
int i8, i4;
- assert(h->ref_list[1][0].f.reference & 3);
+ assert(h->ref_list[1][0].reference & 3);
await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
- if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
+ if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (h->mb_y&~1) + h->col_parity;
mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = h->mb_y&~1;
mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
- mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
- mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + h->mb_stride];
+ mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
+ mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
b8_stride = 2+4*h->mb_stride;
b4_stride *= 6;
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
}else{ // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
- mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
+ mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
- l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
- l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
- l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
- l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
+ l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
+ l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
+ l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
+ l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
if(!b8_stride){
if(h->mb_y&1){
l1ref0 += 2;
int a = h->slice_alpha_c0_offset - qp_bd_offset;
int b = h->slice_beta_offset - qp_bd_offset;
- int mb_type = h->cur_pic.f.mb_type[mb_xy];
- int qp = h->cur_pic.f.qscale_table[mb_xy];
- int qp0 = h->cur_pic.f.qscale_table[mb_xy - 1];
- int qp1 = h->cur_pic.f.qscale_table[h->top_mb_xy];
+ int mb_type = h->cur_pic.mb_type[mb_xy];
+ int qp = h->cur_pic.qscale_table[mb_xy];
+ int qp0 = h->cur_pic.qscale_table[mb_xy - 1];
+ int qp1 = h->cur_pic.qscale_table[h->top_mb_xy];
int qpc = get_chroma_qp( h, 0, qp );
int qpc0 = get_chroma_qp( h, 0, qp0 );
int qpc1 = get_chroma_qp( h, 0, qp1 );
for(j=0; j<2; j++, mbn_xy += h->mb_stride){
DECLARE_ALIGNED(8, int16_t, bS)[4];
int qp;
- if (IS_INTRA(mb_type | h->cur_pic.f.mb_type[mbn_xy])) {
+ if (IS_INTRA(mb_type | h->cur_pic.mb_type[mbn_xy])) {
AV_WN64A(bS, 0x0003000300030003ULL);
} else {
- if (!CABAC && IS_8x8DCT(h->cur_pic.f.mb_type[mbn_xy])) {
+ if (!CABAC && IS_8x8DCT(h->cur_pic.mb_type[mbn_xy])) {
bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
}
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
- qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbn_xy] + 1) >> 1;
+ qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbn_xy] + 1) >> 1;
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
- chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1;
- chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1;
+ chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
+ chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
if (chroma) {
if (chroma444) {
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
if(bS[0]+bS[1]+bS[2]+bS[3]){
- qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbm_xy] + 1) >> 1;
+ qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbm_xy] + 1) >> 1;
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
- chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1;
- chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1;
+ chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
+ chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
if( dir == 0 ) {
filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
if (chroma) {
/* Filter edge */
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
- qp = h->cur_pic.f.qscale_table[mb_xy];
+ qp = h->cur_pic.qscale_table[mb_xy];
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
if( dir == 0 ) {
filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 );
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
const int mb_xy= mb_x + mb_y*h->mb_stride;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
int first_vertical_edge_done = 0;
av_unused int dir;
}
}
- mb_qp = h->cur_pic.f.qscale_table[mb_xy];
- mbn0_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[0]];
- mbn1_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[1]];
+ mb_qp = h->cur_pic.qscale_table[mb_xy];
+ mbn0_qp = h->cur_pic.qscale_table[h->left_mb_xy[0]];
+ mbn1_qp = h->cur_pic.qscale_table[h->left_mb_xy[1]];
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
const int mb_x = h->mb_x;
const int mb_y = h->mb_y;
const int mb_xy = h->mb_xy;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
uint8_t *dest_y, *dest_cb, *dest_cr;
int linesize, uvlinesize /*dct_offset*/;
int i, j;
const int mb_x = h->mb_x;
const int mb_y = h->mb_y;
const int mb_xy = h->mb_xy;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
uint8_t *dest[3];
int linesize;
int i, j, p;
h264_biweight_func *weight_avg)
{
const int mb_xy = h->mb_xy;
- const int mb_type = h->cur_pic.f.mb_type[mb_xy];
+ const int mb_type = h->cur_pic.mb_type[mb_xy];
assert(IS_INTER(mb_type));
const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \
if (!USES_LIST(mb_type, list)) \
return LIST_NOT_USED; \
- mv = h->cur_pic_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
+ mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
h->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
h->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
- return h->cur_pic_ptr->f.ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
+ return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
if (topright_ref == PART_NOT_AVAILABLE
&& i >= scan8[0] + 8 && (i & 7) == 4
&& h->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) {
- const uint32_t *mb_types = h->cur_pic_ptr->f.mb_type;
+ const uint32_t *mb_types = h->cur_pic_ptr->mb_type;
const int16_t *mv;
AV_ZERO32(h->mv_cache[list][scan8[0] - 2]);
*C = h->mv_cache[list][scan8[0] - 2];
{
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 };
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
- int8_t *ref = h->cur_pic.f.ref_index[0];
- int16_t(*mv)[2] = h->cur_pic.f.motion_val[0];
+ int8_t *ref = h->cur_pic.ref_index[0];
+ int16_t(*mv)[2] = h->cur_pic.motion_val[0];
int top_ref, left_ref, diagonal_ref, match_count, mx, my;
const int16_t *A, *B, *C;
int b_stride = h->b_stride;
left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
h->left_block = left_block_options[0];
if (FRAME_MBAFF) {
- const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]);
+ const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if (h->mb_y & 1) {
if (left_mb_field_flag != curr_mb_field_flag) {
}
} else {
if (curr_mb_field_flag) {
- topleft_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
- topright_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
- top_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1);
+ topleft_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy - 1] >> 7) & 1) - 1);
+ topright_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy + 1] >> 7) & 1) - 1);
+ top_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
}
if (left_mb_field_flag != curr_mb_field_flag) {
if (curr_mb_field_flag) {
h->left_mb_xy[LBOT] = left_xy[LBOT];
//FIXME do we need all in the context?
- h->topleft_type = h->cur_pic.f.mb_type[topleft_xy];
- h->top_type = h->cur_pic.f.mb_type[top_xy];
- h->topright_type = h->cur_pic.f.mb_type[topright_xy];
- h->left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]];
- h->left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]];
+ h->topleft_type = h->cur_pic.mb_type[topleft_xy];
+ h->top_type = h->cur_pic.mb_type[top_xy];
+ h->topright_type = h->cur_pic.mb_type[topright_xy];
+ h->left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
+ h->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
if (FMO) {
if (h->slice_table[topleft_xy] != h->slice_num)
h->left_samples_available &= 0xFF5F;
}
} else {
- int left_typei = h->cur_pic.f.mb_type[left_xy[LTOP] + h->mb_stride];
+ int left_typei = h->cur_pic.mb_type[left_xy[LTOP] + h->mb_stride];
assert(left_xy[LTOP] == left_xy[LBOT]);
if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
int b_stride = h->b_stride;
for (list = 0; list < h->list_count; list++) {
int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
- int8_t *ref = h->cur_pic.f.ref_index[list];
+ int8_t *ref = h->cur_pic.ref_index[list];
int16_t(*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
- int16_t(*mv)[2] = h->cur_pic.f.motion_val[list];
+ int16_t(*mv)[2] = h->cur_pic.motion_val[list];
if (!USES_LIST(mb_type, list))
continue;
assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred));
}
write_back_motion(h, mb_type);
- h->cur_pic.f.mb_type[mb_xy] = mb_type;
- h->cur_pic.f.qscale_table[mb_xy] = h->qscale;
+ h->cur_pic.mb_type[mb_xy] = mb_type;
+ h->cur_pic.qscale_table[mb_xy] = h->qscale;
h->slice_table[mb_xy] = h->slice_num;
h->prev_mb_skipped = 1;
}
//#undef NDEBUG
#include <assert.h>
+#define COPY_PICTURE(dst, src) \
+do {\
+ *(dst) = *(src);\
+ (dst)->f.extended_data = (dst)->f.data;\
+ (dst)->tf.f = &(dst)->f;\
+} while (0)
+
static void pic_as_field(Picture *pic, const int parity){
int i;
for (i = 0; i < 4; ++i) {
if (parity == PICT_BOTTOM_FIELD)
pic->f.data[i] += pic->f.linesize[i];
- pic->f.reference = parity;
+ pic->reference = parity;
pic->f.linesize[i] *= 2;
}
pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
static int split_field_copy(Picture *dest, Picture *src,
int parity, int id_add){
- int match = !!(src->f.reference & parity);
+ int match = !!(src->reference & parity);
if (match) {
- *dest = *src;
+ COPY_PICTURE(dest, src);
if(parity != PICT_FRAME){
pic_as_field(dest, parity);
dest->pic_id *= 2;
int index=0;
while(i[0]<len || i[1]<len){
- while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference & sel)))
+ while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
i[0]++;
- while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3))))
+ while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3))))
i[1]++;
if(i[0] < len){
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
if(lens[0] == lens[1] && lens[1] > 1){
for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++);
- if(i == lens[0])
- FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
+ if (i == lens[0]) {
+ Picture tmp;
+ COPY_PICTURE(&tmp, &h->default_ref_list[1][0]);
+ COPY_PICTURE(&h->default_ref_list[1][0], &h->default_ref_list[1][1]);
+ COPY_PICTURE(&h->default_ref_list[1][1], &tmp);
+ }
}
}else{
len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, h->picture_structure);
}
int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
- int list, index, pic_structure;
+ int list, index, pic_structure, i;
print_short_term(h);
print_long_term(h);
for(list=0; list<h->list_count; list++){
- memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]);
+ for (i = 0; i < h->ref_count[list]; i++)
+ COPY_PICTURE(&h->ref_list[list][i], &h->default_ref_list[list][i]);
if(get_bits1(&h->gb)){
int pred= h->curr_pic_num;
for(i= h->short_ref_count-1; i>=0; i--){
ref = h->short_ref[i];
- assert(ref->f.reference);
+ assert(ref->reference);
assert(!ref->long_ref);
if(
ref->frame_num == frame_num &&
- (ref->f.reference & pic_structure)
+ (ref->reference & pic_structure)
)
break;
}
return -1;
}
ref = h->long_ref[long_idx];
- assert(!(ref && !ref->f.reference));
- if (ref && (ref->f.reference & pic_structure)) {
+ assert(!(ref && !ref->reference));
+ if (ref && (ref->reference & pic_structure)) {
ref->pic_id= pic_id;
assert(ref->long_ref);
i=0;
break;
}
for(; i > index; i--){
- h->ref_list[list][i]= h->ref_list[list][i-1];
+ COPY_PICTURE(&h->ref_list[list][i], &h->ref_list[list][i - 1]);
}
- h->ref_list[list][index]= *ref;
+ COPY_PICTURE(&h->ref_list[list][index], ref);
if (FIELD_PICTURE){
pic_as_field(&h->ref_list[list][index], pic_structure);
}
if (!h->ref_list[list][index].f.data[0]) {
av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture\n");
if (h->default_ref_list[list][0].f.data[0])
- h->ref_list[list][index]= h->default_ref_list[list][0];
+ COPY_PICTURE(&h->ref_list[list][index], &h->default_ref_list[list][0]);
else
return -1;
}
for(i=0; i<h->ref_count[list]; i++){
Picture *frame = &h->ref_list[list][i];
Picture *field = &h->ref_list[list][16+2*i];
- field[0] = *frame;
+ COPY_PICTURE(field, frame);
for(j=0; j<3; j++)
field[0].f.linesize[j] <<= 1;
- field[0].f.reference = PICT_TOP_FIELD;
+ field[0].reference = PICT_TOP_FIELD;
field[0].poc= field[0].field_poc[0];
- field[1] = field[0];
+ COPY_PICTURE(field + 1, field);
for(j=0; j<3; j++)
field[1].f.data[j] += frame->f.linesize[j];
- field[1].f.reference = PICT_BOTTOM_FIELD;
+ field[1].reference = PICT_BOTTOM_FIELD;
field[1].poc= field[1].field_poc[1];
h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0];
*/
static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
int i;
- if (pic->f.reference &= refmask) {
+ if (pic->reference &= refmask) {
return 0;
} else {
for(i = 0; h->delayed_pic[i]; i++)
if(pic == h->delayed_pic[i]){
- pic->f.reference = DELAYED_PIC_REF;
+ pic->reference = DELAYED_PIC_REF;
break;
}
return 1;
if (h->short_ref_count &&
h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
- !(FIELD_PICTURE && !h->first_field && h->cur_pic_ptr->f.reference)) {
+ !(FIELD_PICTURE && !h->first_field && h->cur_pic_ptr->reference)) {
mmco[0].opcode = MMCO_SHORT2UNUSED;
mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num;
mmco_index = 1;
h->long_ref_count++;
}
- h->cur_pic_ptr->f.reference |= h->picture_structure;
+ h->cur_pic_ptr->reference |= h->picture_structure;
current_ref_assigned=1;
break;
case MMCO_SET_MAX_LONG:
*/
if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) {
/* Just mark the second field valid */
- h->cur_pic_ptr->f.reference = PICT_FRAME;
+ h->cur_pic_ptr->reference = PICT_FRAME;
} else if (h->cur_pic_ptr->long_ref) {
av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference "
"assignment for second field "
h->short_ref[0]= h->cur_pic_ptr;
h->short_ref_count++;
- h->cur_pic_ptr->f.reference |= h->picture_structure;
+ h->cur_pic_ptr->reference |= h->picture_structure;
}
}
ff_huffyuv_common_init(avctx);
memset(s->vlc, 0, 3 * sizeof(VLC));
- avctx->coded_frame = &s->picture;
s->interlaced = s->height > 288;
s->bgr32 = 1;
HYuvContext *s = avctx->priv_data;
int i;
- avctx->coded_frame= &s->picture;
ff_huffyuv_alloc_temp(s);
for (i = 0; i < 6; i++)
}
}
-static void draw_slice(HYuvContext *s, int y)
+static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
{
int h, cy, i;
int offset[AV_NUM_DATA_POINTERS];
cy = y;
}
- offset[0] = s->picture.linesize[0]*y;
- offset[1] = s->picture.linesize[1]*cy;
- offset[2] = s->picture.linesize[2]*cy;
+ offset[0] = frame->linesize[0] * y;
+ offset[1] = frame->linesize[1] * cy;
+ offset[2] = frame->linesize[2] * cy;
for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
offset[i] = 0;
emms_c();
- s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
+ s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
s->last_slice_end = y + h;
}
const int width2 = s->width>>1;
const int height = s->height;
int fake_ystride, fake_ustride, fake_vstride;
- AVFrame * const p = &s->picture;
+ ThreadFrame frame = { .f = data };
+ AVFrame * const p = data;
int table_size = 0;
- AVFrame *picture = data;
-
av_fast_malloc(&s->bitstream_buffer,
&s->bitstream_buffer_size,
buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
(const uint32_t*)buf, buf_size / 4);
- if (p->data[0])
- ff_thread_release_buffer(avctx, p);
-
- p->reference = 0;
- if (ff_thread_get_buffer(avctx, p) < 0) {
+ if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
if (y >= s->height) break;
}
- draw_slice(s, y);
+ draw_slice(s, p, y);
ydst = p->data[0] + p->linesize[0]*y;
udst = p->data[1] + p->linesize[1]*cy;
}
}
}
- draw_slice(s, height);
+ draw_slice(s, p, height);
break;
case MEDIAN:
}
if (y >= height) break;
}
- draw_slice(s, y);
+ draw_slice(s, p, y);
decode_422_bitstream(s, width);
}
}
- draw_slice(s, height);
+ draw_slice(s, p, height);
break;
}
}
}
}
// just 1 large slice as this is not possible in reverse order
- draw_slice(s, height);
+ draw_slice(s, p, height);
break;
default:
av_log(avctx, AV_LOG_ERROR,
}
emms_c();
- *picture = *p;
*got_frame = 1;
return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
HYuvContext *s = avctx->priv_data;
int i;
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
ff_huffyuv_common_end(s);
av_freep(&s->bitstream_buffer);
typedef struct IdcinContext {
AVCodecContext *avctx;
- AVFrame frame;
const unsigned char *buf;
int size;
huff_build_tree(s, i);
}
- avcodec_get_frame_defaults(&s->frame);
-
return 0;
}
-static void idcin_decode_vlcs(IdcinContext *s)
+static void idcin_decode_vlcs(IdcinContext *s, AVFrame *frame)
{
hnode *hnodes;
long x, y;
int bit_pos, node_num, dat_pos;
prev = bit_pos = dat_pos = 0;
- for (y = 0; y < (s->frame.linesize[0] * s->avctx->height);
- y += s->frame.linesize[0]) {
+ for (y = 0; y < (frame->linesize[0] * s->avctx->height);
+ y += frame->linesize[0]) {
for (x = y; x < y + s->avctx->width; x++) {
node_num = s->num_huff_nodes[prev];
hnodes = s->huff_nodes[prev];
bit_pos--;
}
- s->frame.data[0][x] = node_num;
+ frame->data[0][x] = node_num;
prev = node_num;
}
}
int buf_size = avpkt->size;
IdcinContext *s = avctx->priv_data;
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
+ AVFrame *frame = data;
+ int ret;
s->buf = buf;
s->size = buf_size;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- if (ff_get_buffer(avctx, &s->frame)) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, " id CIN Video: get_buffer() failed\n");
- return -1;
+ return ret;
}
- idcin_decode_vlcs(s);
+ idcin_decode_vlcs(s, frame);
if (pal) {
- s->frame.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
memcpy(s->pal, pal, AVPALETTE_SIZE);
}
/* make the palette available on the way out */
- memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
+ memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return buf_size;
}
-static av_cold int idcin_decode_end(AVCodecContext *avctx)
-{
- IdcinContext *s = avctx->priv_data;
-
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- return 0;
-}
-
AVCodec ff_idcin_decoder = {
.name = "idcinvideo",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_IDCIN,
.priv_data_size = sizeof(IdcinContext),
.init = idcin_decode_init,
- .close = idcin_decode_end,
.decode = idcin_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("id Quake II CIN video"),
if (!s->planebuf)
return AVERROR(ENOMEM);
- s->frame.reference = 1;
-
return 0;
}
const uint8_t *buf_end = buf+buf_size;
int y, plane, res;
- if (s->init) {
- if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return res;
- }
- } else if ((res = ff_get_buffer(avctx, &s->frame)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ if ((res = ff_reget_buffer(avctx, &s->frame)) < 0)
return res;
- } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != AV_PIX_FMT_GRAY8) {
+
+ if (!s->init && avctx->bits_per_coded_sample <= 8 &&
+ avctx->pix_fmt != AV_PIX_FMT_GRAY8) {
if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
return res;
}
}
}
+ if ((res = av_frame_ref(data, &s->frame)) < 0)
+ return res;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+
return buf_size;
}
const uint8_t *buf_end = buf+buf_size;
int y, plane, res;
- if (s->init) {
- if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return res;
- }
- } else if ((res = ff_get_buffer(avctx, &s->frame)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ if ((res = ff_reget_buffer(avctx, &s->frame)) < 0)
return res;
- } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != AV_PIX_FMT_GRAY8) {
+
+ if (!s->init && avctx->bits_per_coded_sample <= 8 &&
+ avctx->pix_fmt != AV_PIX_FMT_GRAY8) {
if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
return res;
}
}
}
+ if ((res = av_frame_ref(data, &s->frame)) < 0)
+ return res;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+
return buf_size;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
IffContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
av_freep(&s->planebuf);
return 0;
}
/* get output buffer */
frame->nb_samples = COEFFS;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "avcodec.h"
#include "get_bits.h"
#include "indeo2data.h"
+#include "internal.h"
#include "mathops.h"
typedef struct Ir2Context{
AVFrame * const p = &s->picture;
int start, ret;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 1;
- p->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, p)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
return ret;
}
- *picture = s->picture;
+ if ((ret = av_frame_ref(picture, &s->picture)) < 0)
+ return ret;
+
*got_frame = 1;
return buf_size;
Ir2Context * const ic = avctx->priv_data;
AVFrame *pic = &ic->picture;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
+ av_frame_unref(pic);
return 0;
}
typedef struct Indeo3DecodeContext {
AVCodecContext *avctx;
- AVFrame frame;
DSPContext dsp;
GetBitContext gb;
Indeo3DecodeContext *ctx = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
+ AVFrame *frame = data;
int res;
res = decode_frame_headers(ctx, avctx, buf, buf_size);
if ((res = decode_plane(ctx, avctx, &ctx->planes[2], ctx->v_data_ptr, ctx->v_data_size, 10)))
return res;
- if (ctx->frame.data[0])
- avctx->release_buffer(avctx, &ctx->frame);
-
- ctx->frame.reference = 0;
- if ((res = ff_get_buffer(avctx, &ctx->frame)) < 0) {
+ if ((res = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res;
}
output_plane(&ctx->planes[0], ctx->buf_sel,
- ctx->frame.data[0], ctx->frame.linesize[0],
+ frame->data[0], frame->linesize[0],
avctx->height);
output_plane(&ctx->planes[1], ctx->buf_sel,
- ctx->frame.data[1], ctx->frame.linesize[1],
+ frame->data[1], frame->linesize[1],
(avctx->height + 3) >> 2);
output_plane(&ctx->planes[2], ctx->buf_sel,
- ctx->frame.data[2], ctx->frame.linesize[2],
+ frame->data[2], frame->linesize[2],
(avctx->height + 3) >> 2);
*got_frame = 1;
- *(AVFrame*)data = ctx->frame;
return buf_size;
}
static av_cold int decode_close(AVCodecContext *avctx)
{
- Indeo3DecodeContext *ctx = avctx->priv_data;
-
free_frame_buffers(avctx->priv_data);
- if (ctx->frame.data[0])
- avctx->release_buffer(avctx, &ctx->frame);
-
return 0;
}
#include <stdint.h>
+#include "libavutil/buffer.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixfmt.h"
#include "avcodec.h"
#define FF_SANE_NB_CHANNELS 128U
-typedef struct InternalBuffer {
- uint8_t *base[AV_NUM_DATA_POINTERS];
- uint8_t *data[AV_NUM_DATA_POINTERS];
- int linesize[AV_NUM_DATA_POINTERS];
- int width;
- int height;
- enum AVPixelFormat pix_fmt;
-} InternalBuffer;
-
-typedef struct AVCodecInternal {
+typedef struct FramePool {
/**
- * internal buffer count
- * used by default get/release/reget_buffer().
+ * Pools for each data plane. For audio all the planes have the same size,
+ * so only pools[0] is used.
*/
- int buffer_count;
+ AVBufferPool *pools[4];
- /**
- * internal buffers
- * used by default get/release/reget_buffer().
+ /*
+ * Pool parameters
*/
- InternalBuffer *buffer;
+ int format;
+ int width, height;
+ int stride_align[AV_NUM_DATA_POINTERS];
+ int linesize[4];
+ int planes;
+ int channels;
+ int samples;
+} FramePool;
+typedef struct AVCodecInternal {
/**
* Whether the parent AVCodecContext is a copy of the context which had
* init() called on it.
*/
int is_copy;
+ /**
+ * Whether to allocate progress for frame threading.
+ *
+ * The codec must set it to 1 if it uses ff_thread_await/report_progress(),
+ * then progress will be allocated in ff_thread_get_buffer(). The frames
+ * then MUST be freed with ff_thread_release_buffer().
+ *
+ * If the codec does not need to call the progress functions (there are no
+ * dependencies between the frames), it should leave this at 0. Then it can
+ * decode straight to the user-provided frames (which the user will then
+ * free with av_frame_unref()), there is no need to call
+ * ff_thread_release_buffer().
+ */
+ int allocate_progress;
+
#if FF_API_OLD_ENCODE_AUDIO
/**
* Internal sample count used by avcodec_encode_audio() to fabricate pts.
*/
int last_audio_frame;
- /**
- * The data for the last allocated audio frame.
- * Stored here so we can free it.
- */
- uint8_t *audio_data;
+ AVFrame to_free;
+
+ FramePool *pool;
} AVCodecInternal;
struct AVCodecDefault {
* AVCodecContext.get_buffer() and should be used instead calling get_buffer()
* directly.
*/
-int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame);
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
+
+/**
+ * Identical in function to av_frame_make_writable(), except it uses
+ * ff_get_buffer() to allocate the buffer when needed.
+ */
+int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame);
#endif /* AVCODEC_INTERNAL_H */
AVCodecContext *avctx;
DSPContext dsp;
- AVFrame second_last_frame;
- AVFrame last_frame;
- AVFrame current_frame;
+ AVFrame *second_last_frame;
+ AVFrame *last_frame;
const unsigned char *decoding_map;
int decoding_map_size;
uint32_t pal[256];
} IpvideoContext;
-static int copy_from(IpvideoContext *s, AVFrame *src, int delta_x, int delta_y)
+static int copy_from(IpvideoContext *s, AVFrame *src, AVFrame *dst, int delta_x, int delta_y)
{
- int current_offset = s->pixel_ptr - s->current_frame.data[0];
- int motion_offset = current_offset + delta_y * s->current_frame.linesize[0]
+ int current_offset = s->pixel_ptr - dst->data[0];
+ int motion_offset = current_offset + delta_y * dst->linesize[0]
+ delta_x * (1 + s->is_16bpp);
if (motion_offset < 0) {
av_log(s->avctx, AV_LOG_ERROR, " Interplay video: motion offset < 0 (%d)\n", motion_offset);
return AVERROR(EINVAL);
}
s->dsp.put_pixels_tab[!s->is_16bpp][0](s->pixel_ptr, src->data[0] + motion_offset,
- s->current_frame.linesize[0], 8);
+ dst->linesize[0], 8);
return 0;
}
-static int ipvideo_decode_block_opcode_0x0(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x0(IpvideoContext *s, AVFrame *frame)
{
- return copy_from(s, &s->last_frame, 0, 0);
+ return copy_from(s, s->last_frame, frame, 0, 0);
}
-static int ipvideo_decode_block_opcode_0x1(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x1(IpvideoContext *s, AVFrame *frame)
{
- return copy_from(s, &s->second_last_frame, 0, 0);
+ return copy_from(s, s->second_last_frame, frame, 0, 0);
}
-static int ipvideo_decode_block_opcode_0x2(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x2(IpvideoContext *s, AVFrame *frame)
{
unsigned char B;
int x, y;
}
av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y);
- return copy_from(s, &s->second_last_frame, x, y);
+ return copy_from(s, s->second_last_frame, frame, x, y);
}
-static int ipvideo_decode_block_opcode_0x3(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x3(IpvideoContext *s, AVFrame *frame)
{
unsigned char B;
int x, y;
}
av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y);
- return copy_from(s, &s->current_frame, x, y);
+ return copy_from(s, frame, frame, x, y);
}
-static int ipvideo_decode_block_opcode_0x4(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x4(IpvideoContext *s, AVFrame *frame)
{
int x, y;
unsigned char B, BL, BH;
y = -8 + BH;
av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y);
- return copy_from(s, &s->last_frame, x, y);
+ return copy_from(s, s->last_frame, frame, x, y);
}
-static int ipvideo_decode_block_opcode_0x5(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x5(IpvideoContext *s, AVFrame *frame)
{
signed char x, y;
y = bytestream2_get_byte(&s->stream_ptr);
av_dlog(NULL, " motion bytes = %d, %d\n", x, y);
- return copy_from(s, &s->last_frame, x, y);
+ return copy_from(s, s->last_frame, frame, x, y);
}
-static int ipvideo_decode_block_opcode_0x6(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x6(IpvideoContext *s, AVFrame *frame)
{
/* mystery opcode? skip multiple blocks? */
av_log(s->avctx, AV_LOG_ERROR, " Interplay video: Help! Mystery opcode 0x6 seen\n");
return 0;
}
-static int ipvideo_decode_block_opcode_0x7(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x7(IpvideoContext *s, AVFrame *frame)
{
int x, y;
unsigned char P[2];
return 0;
}
-static int ipvideo_decode_block_opcode_0x8(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x8(IpvideoContext *s, AVFrame *frame)
{
int x, y;
unsigned char P[4];
return 0;
}
-static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s, AVFrame *frame)
{
int x, y;
unsigned char P[4];
return 0;
}
-static int ipvideo_decode_block_opcode_0xA(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xA(IpvideoContext *s, AVFrame *frame)
{
int x, y;
unsigned char P[8];
return 0;
}
-static int ipvideo_decode_block_opcode_0xB(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xB(IpvideoContext *s, AVFrame *frame)
{
int y;
return 0;
}
-static int ipvideo_decode_block_opcode_0xC(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xC(IpvideoContext *s, AVFrame *frame)
{
int x, y;
return 0;
}
-static int ipvideo_decode_block_opcode_0xD(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xD(IpvideoContext *s, AVFrame *frame)
{
int y;
unsigned char P[2];
return 0;
}
-static int ipvideo_decode_block_opcode_0xE(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xE(IpvideoContext *s, AVFrame *frame)
{
int y;
unsigned char pix;
return 0;
}
-static int ipvideo_decode_block_opcode_0xF(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xF(IpvideoContext *s, AVFrame *frame)
{
int x, y;
unsigned char sample[2];
return 0;
}
-static int ipvideo_decode_block_opcode_0x6_16(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x6_16(IpvideoContext *s, AVFrame *frame)
{
signed char x, y;
y = bytestream2_get_byte(&s->stream_ptr);
av_dlog(NULL, " motion bytes = %d, %d\n", x, y);
- return copy_from(s, &s->second_last_frame, x, y);
+ return copy_from(s, s->second_last_frame, frame, x, y);
}
-static int ipvideo_decode_block_opcode_0x7_16(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x7_16(IpvideoContext *s, AVFrame *frame)
{
int x, y;
uint16_t P[2];
return 0;
}
-static int ipvideo_decode_block_opcode_0x8_16(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x8_16(IpvideoContext *s, AVFrame *frame)
{
int x, y;
uint16_t P[4];
return 0;
}
-static int ipvideo_decode_block_opcode_0x9_16(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0x9_16(IpvideoContext *s, AVFrame *frame)
{
int x, y;
uint16_t P[4];
return 0;
}
-static int ipvideo_decode_block_opcode_0xA_16(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xA_16(IpvideoContext *s, AVFrame *frame)
{
int x, y;
uint16_t P[8];
return 0;
}
-static int ipvideo_decode_block_opcode_0xB_16(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xB_16(IpvideoContext *s, AVFrame *frame)
{
int x, y;
uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
return 0;
}
-static int ipvideo_decode_block_opcode_0xC_16(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xC_16(IpvideoContext *s, AVFrame *frame)
{
int x, y;
uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
return 0;
}
-static int ipvideo_decode_block_opcode_0xD_16(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xD_16(IpvideoContext *s, AVFrame *frame)
{
int x, y;
uint16_t P[2];
return 0;
}
-static int ipvideo_decode_block_opcode_0xE_16(IpvideoContext *s)
+static int ipvideo_decode_block_opcode_0xE_16(IpvideoContext *s, AVFrame *frame)
{
int x, y;
uint16_t pix;
return 0;
}
-static int (* const ipvideo_decode_block[])(IpvideoContext *s) = {
+static int (* const ipvideo_decode_block[])(IpvideoContext *s, AVFrame *frame) = {
ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1,
ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3,
ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5,
ipvideo_decode_block_opcode_0xE, ipvideo_decode_block_opcode_0xF,
};
-static int (* const ipvideo_decode_block16[])(IpvideoContext *s) = {
+static int (* const ipvideo_decode_block16[])(IpvideoContext *s, AVFrame *frame) = {
ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1,
ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3,
ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5,
ipvideo_decode_block_opcode_0xE_16, ipvideo_decode_block_opcode_0x1,
};
-static void ipvideo_decode_opcodes(IpvideoContext *s)
+static void ipvideo_decode_opcodes(IpvideoContext *s, AVFrame *frame)
{
int x, y;
unsigned char opcode;
bytestream2_skip(&s->stream_ptr, 14); /* data starts 14 bytes in */
if (!s->is_16bpp) {
/* this is PAL8, so make the palette available */
- memcpy(s->current_frame.data[1], s->pal, AVPALETTE_SIZE);
+ memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
- s->stride = s->current_frame.linesize[0];
+ s->stride = frame->linesize[0];
} else {
- s->stride = s->current_frame.linesize[0] >> 1;
+ s->stride = frame->linesize[0] >> 1;
s->mv_ptr = s->stream_ptr;
bytestream2_skip(&s->mv_ptr, bytestream2_get_le16(&s->stream_ptr));
}
s->line_inc = s->stride - 8;
- s->upper_motion_limit_offset = (s->avctx->height - 8) * s->current_frame.linesize[0]
+ s->upper_motion_limit_offset = (s->avctx->height - 8) * frame->linesize[0]
+ (s->avctx->width - 8) * (1 + s->is_16bpp);
init_get_bits(&gb, s->decoding_map, s->decoding_map_size * 8);
x, y, opcode, bytestream2_tell(&s->stream_ptr));
if (!s->is_16bpp) {
- s->pixel_ptr = s->current_frame.data[0] + x
- + y*s->current_frame.linesize[0];
- ret = ipvideo_decode_block[opcode](s);
+ s->pixel_ptr = frame->data[0] + x
+ + y*frame->linesize[0];
+ ret = ipvideo_decode_block[opcode](s, frame);
} else {
- s->pixel_ptr = s->current_frame.data[0] + x*2
- + y*s->current_frame.linesize[0];
- ret = ipvideo_decode_block16[opcode](s);
+ s->pixel_ptr = frame->data[0] + x*2
+ + y*frame->linesize[0];
+ ret = ipvideo_decode_block16[opcode](s, frame);
}
if (ret != 0) {
av_log(s->avctx, AV_LOG_ERROR, " Interplay video: decode problem on frame %d, @ block (%d, %d)\n",
ff_dsputil_init(&s->dsp, avctx);
- s->current_frame.data[0] = s->last_frame.data[0] =
- s->second_last_frame.data[0] = NULL;
+ s->last_frame = av_frame_alloc();
+ s->second_last_frame = av_frame_alloc();
+ if (!s->last_frame || !s->second_last_frame) {
+ av_frame_free(&s->last_frame);
+ av_frame_free(&s->second_last_frame);
+ return AVERROR(ENOMEM);
+ }
return 0;
}
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
IpvideoContext *s = avctx->priv_data;
+ AVFrame *frame = data;
int ret;
/* decoding map contains 4 bits of information per 8x8 block */
bytestream2_init(&s->stream_ptr, buf + s->decoding_map_size,
buf_size - s->decoding_map_size);
- s->current_frame.reference = 3;
- if ((ret = ff_get_buffer(avctx, &s->current_frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, " Interplay Video: get_buffer() failed\n");
return ret;
}
if (!s->is_16bpp) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
if (pal) {
- s->current_frame.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
memcpy(s->pal, pal, AVPALETTE_SIZE);
}
}
- ipvideo_decode_opcodes(s);
+ ipvideo_decode_opcodes(s, frame);
*got_frame = 1;
- *(AVFrame*)data = s->current_frame;
/* shuffle frames */
- if (s->second_last_frame.data[0])
- avctx->release_buffer(avctx, &s->second_last_frame);
- s->second_last_frame = s->last_frame;
- s->last_frame = s->current_frame;
- s->current_frame.data[0] = NULL; /* catch any access attempts */
+ av_frame_unref(s->second_last_frame);
+ FFSWAP(AVFrame*, s->second_last_frame, s->last_frame);
+ if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
+ return ret;
/* report that the buffer was completely consumed */
return buf_size;
{
IpvideoContext *s = avctx->priv_data;
- /* release the last frame */
- if (s->last_frame.data[0])
- avctx->release_buffer(avctx, &s->last_frame);
- if (s->second_last_frame.data[0])
- avctx->release_buffer(avctx, &s->second_last_frame);
+ av_frame_free(&s->last_frame);
+ av_frame_free(&s->second_last_frame);
return 0;
}
/*emulate MB info in the relevant tables*/
s->mbskip_table [mb_xy]=0;
s->mbintra_table[mb_xy]=1;
- s->current_picture.f.qscale_table[mb_xy] = w->quant;
+ s->current_picture.qscale_table[mb_xy] = w->quant;
mb_xy++;
}
s->dest[0]+= 8;
do{
if (get_bits1(&s->gb)) {
/* skip mb */
- mot_val = s->current_picture.f.motion_val[0][s->block_index[0]];
+ mot_val = s->current_picture.motion_val[0][s->block_index[0]];
mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= 0;
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= 0;
- s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
goto end;
}
cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
}while(cbpc == 20);
if(cbpc & 4){
- s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
+ s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
}else{
get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if (cbpc & 8) {
}
if ((cbpc & 16) == 0) {
- s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus)
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= my;
} else {
- s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus)
s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
- s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = !(s->obmc | s->loop_filter);
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
- s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
} else {
- s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
} else if(s->pict_type==AV_PICTURE_TYPE_B) {
int mb_type;
const int stride= s->b8_stride;
- int16_t *mot_val0 = s->current_picture.f.motion_val[0][2 * (s->mb_x + s->mb_y * stride)];
- int16_t *mot_val1 = s->current_picture.f.motion_val[1][2 * (s->mb_x + s->mb_y * stride)];
+ int16_t *mot_val0 = s->current_picture.motion_val[0][2 * (s->mb_x + s->mb_y * stride)];
+ int16_t *mot_val1 = s->current_picture.motion_val[1][2 * (s->mb_x + s->mb_y * stride)];
// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
//FIXME ugly
}
}
- s->current_picture.f.mb_type[xy] = mb_type;
+ s->current_picture.mb_type[xy] = mb_type;
} else { /* I-Frame */
do{
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
dquant = cbpc & 4;
s->mb_intra = 1;
intra:
- s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
+ s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
if (s->h263_aic) {
s->ac_pred = get_bits1(&s->gb);
if(s->ac_pred){
- s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
+ s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
s->h263_aic_dir = get_bits1(&s->gb);
}
*/
void ff_clean_h263_qscales(MpegEncContext *s){
int i;
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
ff_init_qscale_tab(s);
/* motion vectors: 8x8 mode*/
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
- motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0];
- motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1];
+ motion_x = s->current_picture.motion_val[0][s->block_index[i]][0];
+ motion_y = s->current_picture.motion_val[0][s->block_index[i]][1];
if (!s->umvplus) {
ff_h263_encode_motion_vector(s, motion_x - pred_x,
motion_y - pred_y, 1);
{
IVI45DecContext *ctx = avctx->priv_data;
const uint8_t *buf = avpkt->data;
+ AVFrame *frame = data;
int buf_size = avpkt->size;
int result, p, b;
av_log(avctx, AV_LOG_ERROR, "Buffer contains IP frames!\n");
}
- if (ctx->frame.data[0])
- avctx->release_buffer(avctx, &ctx->frame);
-
- ctx->frame.reference = 0;
avcodec_set_dimensions(avctx, ctx->planes[0].width, ctx->planes[0].height);
- if ((result = ff_get_buffer(avctx, &ctx->frame)) < 0) {
+ if ((result = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return result;
}
if (ctx->is_scalable) {
if (avctx->codec_id == AV_CODEC_ID_INDEO4)
- ff_ivi_recompose_haar(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]);
+ ff_ivi_recompose_haar(&ctx->planes[0], frame->data[0], frame->linesize[0]);
else
- ff_ivi_recompose53 (&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]);
+ ff_ivi_recompose53 (&ctx->planes[0], frame->data[0], frame->linesize[0]);
} else {
- ivi_output_plane(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]);
+ ivi_output_plane(&ctx->planes[0], frame->data[0], frame->linesize[0]);
}
- ivi_output_plane(&ctx->planes[2], ctx->frame.data[1], ctx->frame.linesize[1]);
- ivi_output_plane(&ctx->planes[1], ctx->frame.data[2], ctx->frame.linesize[2]);
+ ivi_output_plane(&ctx->planes[2], frame->data[1], frame->linesize[1]);
+ ivi_output_plane(&ctx->planes[1], frame->data[2], frame->linesize[2]);
*got_frame = 1;
- *(AVFrame*)data = ctx->frame;
return buf_size;
}
if (ctx->mb_vlc.cust_tab.table)
ff_free_vlc(&ctx->mb_vlc.cust_tab);
- if (ctx->frame.data[0])
- avctx->release_buffer(avctx, &ctx->frame);
-
#if IVI4_STREAM_ANALYSER
if (avctx->codec_id == AV_CODEC_ID_INDEO4) {
if (ctx->is_scalable)
typedef struct IVI45DecContext {
GetBitContext gb;
- AVFrame frame;
RVMapDesc rvmap_tabs[9]; ///< local corrected copy of the static rvmap tables
uint32_t frame_num;
#include "avcodec.h"
#include "dsputil.h"
#include "get_bits.h"
+#include "internal.h"
#include "libavutil/intreadwrite.h"
typedef struct JvContext {
int buf_size = avpkt->size;
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = buf + buf_size;
- int video_size, video_type, i, j;
+ int video_size, video_type, i, j, ret;
video_size = AV_RL32(buf);
video_type = buf[4];
buf += 5;
if (video_size) {
- if (avctx->reget_buffer(avctx, &s->frame) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
if (video_type == 0 || video_type == 1) {
s->palette_has_changed = 0;
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = s->frame;
}
return buf_size;
{
JvContext *s = avctx->priv_data;
- if(s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
typedef struct {
AVCodecContext *avctx;
- AVFrame prev, cur;
+ AVFrame prev;
} KgvContext;
static void decode_flush(AVCodecContext *avctx)
{
KgvContext * const c = avctx->priv_data;
- if (c->prev.data[0])
- avctx->release_buffer(avctx, &c->prev);
+ av_frame_unref(&c->prev);
}
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = buf + avpkt->size;
KgvContext * const c = avctx->priv_data;
return res;
if (w != avctx->width || h != avctx->height) {
- if (c->prev.data[0])
- avctx->release_buffer(avctx, &c->prev);
+ av_frame_unref(&c->prev);
avcodec_set_dimensions(avctx, w, h);
}
maxcnt = w * h;
- c->cur.reference = 3;
- if ((res = ff_get_buffer(avctx, &c->cur)) < 0)
+ if ((res = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
return res;
- out = (uint16_t *) c->cur.data[0];
+ out = (uint16_t *) frame->data[0];
if (c->prev.data[0]) {
prev = (uint16_t *) c->prev.data[0];
} else {
if (outcnt - maxcnt)
av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt);
- *got_frame = 1;
- *(AVFrame*)data = c->cur;
+ av_frame_unref(&c->prev);
+ if ((res = av_frame_ref(&c->prev, frame)) < 0)
+ return res;
- if (c->prev.data[0])
- avctx->release_buffer(avctx, &c->prev);
- FFSWAP(AVFrame, c->cur, c->prev);
+ *got_frame = 1;
return avpkt->size;
}
*/
typedef struct KmvcContext {
AVCodecContext *avctx;
- AVFrame pic;
int setpal;
int palsize;
AVPacket *avpkt)
{
KmvcContext *const ctx = avctx->priv_data;
+ AVFrame *frame = data;
uint8_t *out, *src;
int i, ret;
int header;
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
bytestream2_init(&ctx->g, avpkt->data, avpkt->size);
- if (ctx->pic.data[0])
- avctx->release_buffer(avctx, &ctx->pic);
- ctx->pic.reference = 1;
- ctx->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = ff_get_buffer(avctx, &ctx->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
if (header & KMVC_KEYFRAME) {
- ctx->pic.key_frame = 1;
- ctx->pic.pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
} else {
- ctx->pic.key_frame = 0;
- ctx->pic.pict_type = AV_PICTURE_TYPE_P;
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
}
if (header & KMVC_PALETTE) {
- ctx->pic.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
// palette starts from index 1 and has 127 entries
for (i = 1; i <= ctx->palsize; i++) {
ctx->pal[i] = bytestream2_get_be24(&ctx->g);
}
if (pal) {
- ctx->pic.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
memcpy(ctx->pal, pal, AVPALETTE_SIZE);
}
if (ctx->setpal) {
ctx->setpal = 0;
- ctx->pic.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
}
/* make the palette available on the way out */
- memcpy(ctx->pic.data[1], ctx->pal, 1024);
+ memcpy(frame->data[1], ctx->pal, 1024);
blocksize = bytestream2_get_byte(&ctx->g);
return AVERROR_INVALIDDATA;
}
- out = ctx->pic.data[0];
+ out = frame->data[0];
src = ctx->cur;
for (i = 0; i < avctx->height; i++) {
memcpy(out, src, avctx->width);
src += 320;
- out += ctx->pic.linesize[0];
+ out += frame->linesize[0];
}
/* flip buffers */
}
*got_frame = 1;
- *(AVFrame *) data = ctx->pic;
/* always report that the buffer was completely consumed */
return avpkt->size;
av_freep(&c->frm0);
av_freep(&c->frm1);
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
return 0;
}
typedef struct LagarithContext {
AVCodecContext *avctx;
- AVFrame picture;
DSPContext dsp;
int zeros; /**< number of consecutive zero bytes encountered */
int zeros_rem; /**< number of zero bytes remaining to output */
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LagarithContext *l = avctx->priv_data;
- AVFrame *const p = &l->picture;
+ ThreadFrame frame = { .f = data };
+ AVFrame *const p = data;
uint8_t frametype = 0;
uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
uint32_t offs[4];
uint8_t *srcs[4], *dst;
int i, j, planes = 3;
- AVFrame *picture = data;
-
- if (p->data[0])
- ff_thread_release_buffer(avctx, p);
-
- p->reference = 0;
p->key_frame = 1;
frametype = buf[0];
case FRAME_SOLID_RGBA:
avctx->pix_fmt = AV_PIX_FMT_RGB32;
- if (ff_thread_get_buffer(avctx, p) < 0) {
+ if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
avctx->pix_fmt = AV_PIX_FMT_RGB24;
- if (ff_thread_get_buffer(avctx, p) < 0) {
+ if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
case FRAME_ARITH_YUY2:
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
- if (ff_thread_get_buffer(avctx, p) < 0) {
+ if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
case FRAME_ARITH_YV12:
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- if (ff_thread_get_buffer(avctx, p) < 0) {
+ if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
return -1;
}
- *picture = *p;
*got_frame = 1;
return buf_size;
{
LagarithContext *l = avctx->priv_data;
- if (l->picture.data[0])
- ff_thread_release_buffer(avctx, &l->picture);
av_freep(&l->rgb_planes);
return 0;
* Decoder context
*/
typedef struct LclDecContext {
- AVFrame pic;
-
// Image type
int imgtype;
// Compression type
*/
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LclDecContext * const c = avctx->priv_data;
unsigned int mthread_inlen, mthread_outlen;
unsigned int len = buf_size;
- if(c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- c->pic.reference = 0;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- outptr = c->pic.data[0]; // Output image pointer
+ outptr = frame->data[0]; // Output image pointer
/* Decompress frame */
switch (avctx->codec_id) {
}
/* Convert colorspace */
- y_out = c->pic.data[0] + (height - 1) * c->pic.linesize[0];
- u_out = c->pic.data[1] + (height - 1) * c->pic.linesize[1];
- v_out = c->pic.data[2] + (height - 1) * c->pic.linesize[2];
+ y_out = frame->data[0] + (height - 1) * frame->linesize[0];
+ u_out = frame->data[1] + (height - 1) * frame->linesize[1];
+ v_out = frame->data[2] + (height - 1) * frame->linesize[2];
switch (c->imgtype) {
case IMGTYPE_YUV111:
for (row = 0; row < height; row++) {
u_out[col] = *encoded++ + 128;
v_out[col] = *encoded++ + 128;
}
- y_out -= c->pic.linesize[0];
- u_out -= c->pic.linesize[1];
- v_out -= c->pic.linesize[2];
+ y_out -= frame->linesize[0];
+ u_out -= frame->linesize[1];
+ v_out -= frame->linesize[2];
}
break;
case IMGTYPE_YUV422:
v_out[ col >> 1 ] = *encoded++ + 128;
v_out[(col >> 1) + 1] = *encoded++ + 128;
}
- y_out -= c->pic.linesize[0];
- u_out -= c->pic.linesize[1];
- v_out -= c->pic.linesize[2];
+ y_out -= frame->linesize[0];
+ u_out -= frame->linesize[1];
+ v_out -= frame->linesize[2];
}
break;
case IMGTYPE_RGB24:
for (row = height - 1; row >= 0; row--) {
- pixel_ptr = row * c->pic.linesize[0];
+ pixel_ptr = row * frame->linesize[0];
memcpy(outptr + pixel_ptr, encoded, 3 * width);
encoded += 3 * width;
}
u_out[col >> 2] = *encoded++ + 128;
v_out[col >> 2] = *encoded++ + 128;
}
- y_out -= c->pic.linesize[0];
- u_out -= c->pic.linesize[1];
- v_out -= c->pic.linesize[2];
+ y_out -= frame->linesize[0];
+ u_out -= frame->linesize[1];
+ v_out -= frame->linesize[2];
}
break;
case IMGTYPE_YUV211:
u_out[col >> 1] = *encoded++ + 128;
v_out[col >> 1] = *encoded++ + 128;
}
- y_out -= c->pic.linesize[0];
- u_out -= c->pic.linesize[1];
- v_out -= c->pic.linesize[2];
+ y_out -= frame->linesize[0];
+ u_out -= frame->linesize[1];
+ v_out -= frame->linesize[2];
}
break;
case IMGTYPE_YUV420:
- u_out = c->pic.data[1] + ((height >> 1) - 1) * c->pic.linesize[1];
- v_out = c->pic.data[2] + ((height >> 1) - 1) * c->pic.linesize[2];
+ u_out = frame->data[1] + ((height >> 1) - 1) * frame->linesize[1];
+ v_out = frame->data[2] + ((height >> 1) - 1) * frame->linesize[2];
for (row = 0; row < height - 1; row += 2) {
for (col = 0; col < width - 1; col += 2) {
memcpy(y_out + col, encoded, 2);
encoded += 2;
- memcpy(y_out + col - c->pic.linesize[0], encoded, 2);
+ memcpy(y_out + col - frame->linesize[0], encoded, 2);
encoded += 2;
u_out[col >> 1] = *encoded++ + 128;
v_out[col >> 1] = *encoded++ + 128;
}
- y_out -= c->pic.linesize[0] << 1;
- u_out -= c->pic.linesize[1];
- v_out -= c->pic.linesize[2];
+ y_out -= frame->linesize[0] << 1;
+ u_out -= frame->linesize[1];
+ v_out -= frame->linesize[2];
}
break;
default:
}
*got_frame = 1;
- *(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return buf_size;
LclDecContext * const c = avctx->priv_data;
av_freep(&c->decomp_buf);
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
#if CONFIG_ZLIB_DECODER
if (avctx->codec_id == AV_CODEC_ID_ZLIB)
inflateEnd(&c->zstream);
/* get output buffer */
frame->nb_samples = avctx->frame_size;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
frame->nb_samples = s->decoder.blockl;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = 160;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
typedef struct {
AVClass *class;
opj_dparameters_t dec_params;
- AVFrame image;
int lowres;
int lowqual;
} LibOpenJPEGContext;
LibOpenJPEGContext *ctx = avctx->priv_data;
opj_set_default_decoder_parameters(&ctx->dec_params);
- avcodec_get_frame_defaults(&ctx->image);
- avctx->coded_frame = &ctx->image;
- return 0;
-}
-
-static av_cold int libopenjpeg_decode_init_thread_copy(AVCodecContext *avctx)
-{
- LibOpenJPEGContext *ctx = avctx->priv_data;
-
- avctx->coded_frame = &ctx->image;
return 0;
}
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LibOpenJPEGContext *ctx = avctx->priv_data;
- AVFrame *picture = &ctx->image, *output = data;
+ ThreadFrame frame = { .f = data };
+ AVFrame *picture = data;
const AVPixFmtDescriptor *desc;
opj_dinfo_t *dec;
opj_cio_t *stream;
if (image->comps[i].prec > avctx->bits_per_raw_sample)
avctx->bits_per_raw_sample = image->comps[i].prec;
- if (picture->data[0])
- ff_thread_release_buffer(avctx, picture);
-
- if (ff_thread_get_buffer(avctx, picture) < 0) {
+ if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "ff_thread_get_buffer() failed\n");
goto done;
}
goto done;
}
- *output = ctx->image;
*got_frame = 1;
ret = buf_size;
return ret;
}
-static av_cold int libopenjpeg_decode_close(AVCodecContext *avctx)
-{
- LibOpenJPEGContext *ctx = avctx->priv_data;
-
- if (ctx->image.data[0])
- ff_thread_release_buffer(avctx, &ctx->image);
- return 0;
-}
-
#define OFFSET(x) offsetof(LibOpenJPEGContext, x)
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
.id = AV_CODEC_ID_JPEG2000,
.priv_data_size = sizeof(LibOpenJPEGContext),
.init = libopenjpeg_decode_init,
- .close = libopenjpeg_decode_close,
.decode = libopenjpeg_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
.long_name = NULL_IF_CONFIG_SMALL("OpenJPEG JPEG 2000"),
.priv_class = &class,
- .init_thread_copy = ONLY_IF_THREADS_ENABLED(libopenjpeg_decode_init_thread_copy),
};
int ret, nb_samples;
frame->nb_samples = MAX_FRAME_SIZE;
- ret = ff_get_buffer(avc, frame);
+ ret = ff_get_buffer(avc, frame, 0);
if (ret < 0) {
av_log(avc, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
/** end of sequence pulled */
int eos_pulled;
-
- /** decoded picture */
- AVFrame dec_frame;
} SchroDecoderParams;
typedef struct SchroParseUnitContext {
SchroDecoder *decoder = p_schro_params->decoder;
SchroBuffer *enc_buf;
SchroFrame* frame;
+ AVFrame *avframe = data;
int state;
int go = 1;
int outer = 1;
framewithpts = ff_schro_queue_pop(&p_schro_params->dec_frame_queue);
if (framewithpts && framewithpts->frame) {
- if (p_schro_params->dec_frame.data[0])
- avctx->release_buffer(avctx, &p_schro_params->dec_frame);
- if (ff_get_buffer(avctx, &p_schro_params->dec_frame) < 0) {
+ if (ff_get_buffer(avctx, avframe, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "Unable to allocate buffer\n");
return AVERROR(ENOMEM);
}
- memcpy(p_schro_params->dec_frame.data[0],
+ memcpy(avframe->data[0],
framewithpts->frame->components[0].data,
framewithpts->frame->components[0].length);
- memcpy(p_schro_params->dec_frame.data[1],
+ memcpy(avframe->data[1],
framewithpts->frame->components[1].data,
framewithpts->frame->components[1].length);
- memcpy(p_schro_params->dec_frame.data[2],
+ memcpy(avframe->data[2],
framewithpts->frame->components[2].data,
framewithpts->frame->components[2].length);
/* Fill frame with current buffer data from Schroedinger. */
- p_schro_params->dec_frame.format = -1; /* Unknown -1 */
- p_schro_params->dec_frame.width = framewithpts->frame->width;
- p_schro_params->dec_frame.height = framewithpts->frame->height;
- p_schro_params->dec_frame.pkt_pts = framewithpts->pts;
- p_schro_params->dec_frame.linesize[0] = framewithpts->frame->components[0].stride;
- p_schro_params->dec_frame.linesize[1] = framewithpts->frame->components[1].stride;
- p_schro_params->dec_frame.linesize[2] = framewithpts->frame->components[2].stride;
-
- *(AVFrame*)data = p_schro_params->dec_frame;
+ avframe->pkt_pts = framewithpts->pts;
+ avframe->linesize[0] = framewithpts->frame->components[0].stride;
+ avframe->linesize[1] = framewithpts->frame->components[1].stride;
+ avframe->linesize[2] = framewithpts->frame->components[2].stride;
+
*got_frame = 1;
/* Now free the frame resources. */
schro_decoder_free(p_schro_params->decoder);
av_freep(&p_schro_params->format);
- if (p_schro_params->dec_frame.data[0])
- avctx->release_buffer(avctx, &p_schro_params->dec_frame);
-
/* Free data in the output frame queue. */
ff_schro_queue_free(&p_schro_params->dec_frame_queue,
libschroedinger_decode_frame_free);
/* get output buffer */
frame->nb_samples = s->frame_size;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "avcodec.h"
+#include "internal.h"
typedef struct VP8DecoderContext {
struct vpx_codec_ctx decoder;
AVFrame *picture = data;
const void *iter = NULL;
struct vpx_image *img;
+ int ret;
if (vpx_codec_decode(&ctx->decoder, avpkt->data, avpkt->size, NULL, 0) !=
VPX_CODEC_OK) {
return AVERROR_INVALIDDATA;
avcodec_set_dimensions(avctx, img->d_w, img->d_h);
}
- picture->data[0] = img->planes[0];
- picture->data[1] = img->planes[1];
- picture->data[2] = img->planes[2];
- picture->data[3] = NULL;
- picture->linesize[0] = img->stride[0];
- picture->linesize[1] = img->stride[1];
- picture->linesize[2] = img->stride[2];
- picture->linesize[3] = 0;
+ if ((ret = ff_get_buffer(avctx, picture, 0)) < 0)
+ return ret;
+ av_image_copy(picture->data, picture->linesize, img->planes,
+ img->stride, avctx->pix_fmt, img->d_w, img->d_h);
*got_frame = 1;
}
return avpkt->size;
.init = vp8_init,
.close = vp8_free,
.decode = vp8_decode,
- .capabilities = CODEC_CAP_AUTO_THREADS,
+ .capabilities = CODEC_CAP_AUTO_THREADS | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("libvpx VP8"),
};
#endif /* CONFIG_LIBVPX_VP8_DECODER */
init_put_bits(&s->pb, pkt->data, pkt->size);
- *p = *pict;
+ av_frame_unref(p);
+ ret = av_frame_ref(p, pict);
+ if (ret < 0)
+ return ret;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
typedef struct LOCOContext {
AVCodecContext *avctx;
- AVFrame pic;
int lossy;
int mode;
} LOCOContext;
LOCOContext * const l = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- AVFrame * const p = &l->pic;
+ AVFrame * const p = data;
int decoded, ret;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
*got_frame = 1;
- *(AVFrame*)data = l->pic;
return buf_size;
buf_too_small:
return 0;
}
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- LOCOContext * const l = avctx->priv_data;
- AVFrame *pic = &l->pic;
-
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
- return 0;
-}
-
AVCodec ff_loco_decoder = {
.name = "loco",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_LOCO,
.priv_data_size = sizeof(LOCOContext),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("LOCO"),
/* get output buffer */
frame->nb_samples = 3 * (buf_size << (1 - is_mace3)) / avctx->channels;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
typedef struct MDECContext {
AVCodecContext *avctx;
DSPContext dsp;
- AVFrame picture;
+ ThreadFrame frame;
GetBitContext gb;
ScanTable scantable;
int version;
return 0;
}
-static inline void idct_put(MDECContext *a, int mb_x, int mb_y)
+static inline void idct_put(MDECContext *a, AVFrame *frame, int mb_x, int mb_y)
{
int16_t (*block)[64] = a->block;
- int linesize = a->picture.linesize[0];
+ int linesize = frame->linesize[0];
- uint8_t *dest_y = a->picture.data[0] + (mb_y * 16 * linesize ) + mb_x * 16;
- uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
- uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
+ uint8_t *dest_y = frame->data[0] + (mb_y * 16* linesize ) + mb_x * 16;
+ uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
+ uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
a->dsp.idct_put(dest_y, linesize, block[0]);
a->dsp.idct_put(dest_y + 8, linesize, block[1]);
a->dsp.idct_put(dest_y + 8 * linesize + 8, linesize, block[3]);
if (!(a->avctx->flags & CODEC_FLAG_GRAY)) {
- a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]);
- a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]);
+ a->dsp.idct_put(dest_cb, frame->linesize[1], block[4]);
+ a->dsp.idct_put(dest_cr, frame->linesize[2], block[5]);
}
}
MDECContext * const a = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- AVFrame *picture = data;
- AVFrame * const p = &a->picture;
+ ThreadFrame frame = { .f = data };
int i, ret;
- if (p->data[0])
- ff_thread_release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_thread_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- p->pict_type = AV_PICTURE_TYPE_I;
- p->key_frame = 1;
+ frame.f->pict_type = AV_PICTURE_TYPE_I;
+ frame.f->key_frame = 1;
av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!a->bitstream_buffer)
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
- idct_put(a, a->mb_x, a->mb_y);
+ idct_put(a, frame.f, a->mb_x, a->mb_y);
}
}
- p->quality = a->qscale * FF_QP2LAMBDA;
- memset(p->qscale_table, a->qscale, a->mb_width);
-
- *picture = a->picture;
*got_frame = 1;
return (get_bits_count(&a->gb) + 31) / 32 * 4;
static av_cold int decode_init(AVCodecContext *avctx)
{
MDECContext * const a = avctx->priv_data;
- AVFrame *p = &a->picture;
a->mb_width = (avctx->coded_width + 15) / 16;
a->mb_height = (avctx->coded_height + 15) / 16;
- avctx->coded_frame = &a->picture;
a->avctx = avctx;
ff_dsputil_init(&a->dsp, avctx);
if (avctx->idct_algo == FF_IDCT_AUTO)
avctx->idct_algo = FF_IDCT_SIMPLE;
- p->qstride = 0;
- p->qscale_table = av_mallocz(a->mb_width);
avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
return 0;
static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
{
MDECContext * const a = avctx->priv_data;
- AVFrame *p = &a->picture;
- avctx->coded_frame = p;
a->avctx = avctx;
- p->qscale_table = av_mallocz( a->mb_width);
-
return 0;
}
{
MDECContext * const a = avctx->priv_data;
- if (a->picture.data[0])
- avctx->release_buffer(avctx, &a->picture);
av_freep(&a->bitstream_buffer);
- av_freep(&a->picture.qscale_table);
a->bitstream_buffer_size = 0;
return 0;
int cur_index;
int prev_index;
- AVFrame buf_ptrs [16];
+ ThreadFrame frames [16];
AVPicture flipped_ptrs[16];
DECLARE_ALIGNED(16, int16_t, dct_block)[64];
53, 60, 61, 54, 47, 55, 62, 63,
};
+static av_cold int mimic_decode_end(AVCodecContext *avctx)
+{
+ MimicContext *ctx = avctx->priv_data;
+ int i;
+
+ av_free(ctx->swap_buf);
+
+ for (i = 0; i < FF_ARRAY_ELEMS(ctx->frames); i++) {
+ if (ctx->frames[i].f)
+ ff_thread_release_buffer(avctx, &ctx->frames[i]);
+ av_frame_free(&ctx->frames[i].f);
+ }
+
+ if (!avctx->internal->is_copy)
+ ff_free_vlc(&ctx->vlc);
+
+ return 0;
+}
+
static av_cold int mimic_decode_init(AVCodecContext *avctx)
{
MimicContext *ctx = avctx->priv_data;
- int ret;
+ int ret, i;
+
+ avctx->internal->allocate_progress = 1;
ctx->prev_index = 0;
ctx->cur_index = 15;
ff_dsputil_init(&ctx->dsp, avctx);
ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, col_zag);
+ for (i = 0; i < FF_ARRAY_ELEMS(ctx->frames); i++) {
+ ctx->frames[i].f = av_frame_alloc();
+ if (!ctx->frames[i].f) {
+ mimic_decode_end(avctx);
+ return AVERROR(ENOMEM);
+ }
+ }
+
return 0;
}
static int mimic_decode_update_thread_context(AVCodecContext *avctx, const AVCodecContext *avctx_from)
{
MimicContext *dst = avctx->priv_data, *src = avctx_from->priv_data;
+ int i, ret;
if (avctx == avctx_from)
return 0;
dst->cur_index = src->next_cur_index;
dst->prev_index = src->next_prev_index;
- memcpy(dst->buf_ptrs, src->buf_ptrs, sizeof(src->buf_ptrs));
memcpy(dst->flipped_ptrs, src->flipped_ptrs, sizeof(src->flipped_ptrs));
- memset(&dst->buf_ptrs[dst->cur_index], 0, sizeof(AVFrame));
+ for (i = 0; i < FF_ARRAY_ELEMS(dst->frames); i++) {
+ ff_thread_release_buffer(avctx, &dst->frames[i]);
+ if (src->frames[i].f->data[0]) {
+ ret = ff_thread_ref_frame(&dst->frames[i], &src->frames[i]);
+ if (ret < 0)
+ return ret;
+ }
+ }
return 0;
}
uint8_t *p = ctx->flipped_ptrs[index].data[0];
if (index != ctx->cur_index && p) {
- ff_thread_await_progress(&ctx->buf_ptrs[index],
+ ff_thread_await_progress(&ctx->frames[index],
cur_row, 0);
p += src -
ctx->flipped_ptrs[ctx->prev_index].data[plane];
}
}
} else {
- ff_thread_await_progress(&ctx->buf_ptrs[ctx->prev_index],
+ ff_thread_await_progress(&ctx->frames[ctx->prev_index],
cur_row, 0);
ctx->dsp.put_pixels_tab[1][0](dst, src, stride, 8);
}
src += (stride - ctx->num_hblocks[plane]) << 3;
dst += (stride - ctx->num_hblocks[plane]) << 3;
- ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index],
+ ff_thread_report_progress(&ctx->frames[ctx->cur_index],
cur_row++, 0);
}
}
return AVERROR_PATCHWELCOME;
}
- if (is_pframe && !ctx->buf_ptrs[ctx->prev_index].data[0]) {
+ if (is_pframe && !ctx->frames[ctx->prev_index].f->data[0]) {
av_log(avctx, AV_LOG_ERROR, "decoding must start with keyframe\n");
return AVERROR_INVALIDDATA;
}
- ctx->buf_ptrs[ctx->cur_index].reference = 1;
- ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? AV_PICTURE_TYPE_P :
- AV_PICTURE_TYPE_I;
- if ((res = ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) < 0) {
+ ff_thread_release_buffer(avctx, &ctx->frames[ctx->cur_index]);
+ ctx->frames[ctx->cur_index].f->pict_type = is_pframe ? AV_PICTURE_TYPE_P :
+ AV_PICTURE_TYPE_I;
+ if ((res = ff_thread_get_buffer(avctx, &ctx->frames[ctx->cur_index],
+ AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res;
}
ctx->next_cur_index = (ctx->cur_index - 1) & 15;
prepare_avpic(ctx, &ctx->flipped_ptrs[ctx->cur_index],
- &ctx->buf_ptrs[ctx->cur_index]);
+ ctx->frames[ctx->cur_index].f);
ff_thread_finish_setup(avctx);
init_get_bits(&ctx->gb, ctx->swap_buf, swap_buf_size << 3);
res = decode(ctx, quality, num_coeffs, !is_pframe);
- ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0);
+ ff_thread_report_progress(&ctx->frames[ctx->cur_index], INT_MAX, 0);
if (res < 0) {
if (!(avctx->active_thread_type & FF_THREAD_FRAME)) {
- ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]);
+ ff_thread_release_buffer(avctx, &ctx->frames[ctx->cur_index]);
return res;
}
}
- *(AVFrame*)data = ctx->buf_ptrs[ctx->cur_index];
+ if ((res = av_frame_ref(data, ctx->frames[ctx->cur_index].f)) < 0)
+ return res;
*got_frame = 1;
ctx->prev_index = ctx->next_prev_index;
ctx->cur_index = ctx->next_cur_index;
/* Only release frames that aren't used for backreferences anymore */
- if (ctx->buf_ptrs[ctx->cur_index].data[0])
- ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]);
+ ff_thread_release_buffer(avctx, &ctx->frames[ctx->cur_index]);
return buf_size;
}
-static av_cold int mimic_decode_end(AVCodecContext *avctx)
+static av_cold int mimic_init_thread_copy(AVCodecContext *avctx)
{
MimicContext *ctx = avctx->priv_data;
int i;
- av_free(ctx->swap_buf);
-
- if (avctx->internal->is_copy)
- return 0;
-
- for (i = 0; i < 16; i++)
- if (ctx->buf_ptrs[i].data[0])
- ff_thread_release_buffer(avctx, &ctx->buf_ptrs[i]);
- ff_free_vlc(&ctx->vlc);
+ for (i = 0; i < FF_ARRAY_ELEMS(ctx->frames); i++) {
+ ctx->frames[i].f = av_frame_alloc();
+ if (!ctx->frames[i].f) {
+ mimic_decode_end(avctx);
+ return AVERROR(ENOMEM);
+ }
+ }
return 0;
}
.decode = mimic_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
.long_name = NULL_IF_CONFIG_SMALL("Mimic"),
- .update_thread_context = ONLY_IF_THREADS_ENABLED(mimic_decode_update_thread_context)
+ .update_thread_context = ONLY_IF_THREADS_ENABLED(mimic_decode_update_thread_context),
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(mimic_init_thread_copy),
};
int buf_size = avpkt->size;
MJpegDecodeContext *s = avctx->priv_data;
const uint8_t *buf_end, *buf_ptr;
- AVFrame *picture = data;
GetBitContext hgb; /* for the header */
uint32_t dqt_offs, dht_offs, sof_offs, sos_offs, second_field_offs;
uint32_t field_size, sod_offs;
+ int ret;
buf_ptr = buf;
buf_end = buf + buf_size;
//XXX FIXME factorize, this looks very similar to the EOI code
- *picture= *s->picture_ptr;
+ if ((ret = av_frame_ref(data, s->picture_ptr)) < 0)
+ return ret;
*got_frame = 1;
- if(!s->lossless){
- picture->quality= FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]);
- picture->qstride= 0;
- picture->qscale_table= s->qscale_table;
- memset(picture->qscale_table, picture->quality, (s->width+15)/16);
- if(avctx->debug & FF_DEBUG_QP)
- av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality);
- picture->quality*= FF_QP2LAMBDA;
+ if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
+ av_log(avctx, AV_LOG_DEBUG, "QP: %d\n",
+ FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]));
}
return buf_size;
/* if different size, realloc/alloc picture */
/* XXX: also check h_count and v_count */
if (width != s->width || height != s->height) {
- av_freep(&s->qscale_table);
-
s->width = width;
s->height = height;
s->interlaced = 0;
avcodec_set_dimensions(s->avctx, width, height);
- s->qscale_table = av_mallocz((s->width + 15) / 16);
s->first_picture = 0;
}
s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
}
- if (s->picture_ptr->data[0])
- s->avctx->release_buffer(s->avctx, s->picture_ptr);
-
- if (ff_get_buffer(s->avctx, s->picture_ptr) < 0) {
+ av_frame_unref(s->picture_ptr);
+ if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
int unescaped_buf_size;
int start_code;
int ret = 0;
- AVFrame *picture = data;
s->got_picture = 0; // picture from previous image can not be reused
buf_ptr = buf;
if (s->bottom_field == !s->interlace_polarity)
goto not_the_end;
}
- *picture = *s->picture_ptr;
+ if ((ret = av_frame_ref(data, s->picture_ptr)) < 0)
+ return ret;
*got_frame = 1;
- if (!s->lossless) {
- picture->quality = FFMAX3(s->qscale[0],
- s->qscale[1],
- s->qscale[2]);
- picture->qstride = 0;
- picture->qscale_table = s->qscale_table;
- memset(picture->qscale_table, picture->quality,
- (s->width + 15) / 16);
- if (avctx->debug & FF_DEBUG_QP)
- av_log(avctx, AV_LOG_DEBUG,
- "QP: %d\n", picture->quality);
- picture->quality *= FF_QP2LAMBDA;
+ if (!s->lossless &&
+ avctx->debug & FF_DEBUG_QP) {
+ av_log(avctx, AV_LOG_DEBUG,
+ "QP: %d\n", FFMAX3(s->qscale[0],
+ s->qscale[1],
+ s->qscale[2]));
}
goto the_end;
MJpegDecodeContext *s = avctx->priv_data;
int i, j;
- if (s->picture_ptr && s->picture_ptr->data[0])
- avctx->release_buffer(avctx, s->picture_ptr);
+ if (s->picture_ptr)
+ av_frame_unref(s->picture_ptr);
av_free(s->buffer);
- av_free(s->qscale_table);
av_freep(&s->ljpeg_buffer);
s->ljpeg_buffer_size = 0;
/* get output buffer */
frame->nb_samples = s->blockpos;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
#define MM_PREAMBLE_SIZE 6
avctx->pix_fmt = AV_PIX_FMT_PAL8;
- s->frame.reference = 1;
-
return 0;
}
buf_size -= MM_PREAMBLE_SIZE;
bytestream2_init(&s->gb, buf, buf_size);
- if (avctx->reget_buffer(avctx, &s->frame) < 0) {
+ if ((res = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return -1;
+ return res;
}
switch(type) {
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ if ((res = av_frame_ref(data, &s->frame)) < 0)
+ return res;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return buf_size;
}
{
MmContext *s = avctx->priv_data;
- if(s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
if(mv4){
int mot_xy= s->block_index[0];
- s->current_picture.f.motion_val[0][mot_xy ][0] = mx;
- s->current_picture.f.motion_val[0][mot_xy ][1] = my;
- s->current_picture.f.motion_val[0][mot_xy + 1][0] = mx;
- s->current_picture.f.motion_val[0][mot_xy + 1][1] = my;
+ s->current_picture.motion_val[0][mot_xy ][0] = mx;
+ s->current_picture.motion_val[0][mot_xy ][1] = my;
+ s->current_picture.motion_val[0][mot_xy + 1][0] = mx;
+ s->current_picture.motion_val[0][mot_xy + 1][1] = my;
mot_xy += s->b8_stride;
- s->current_picture.f.motion_val[0][mot_xy ][0] = mx;
- s->current_picture.f.motion_val[0][mot_xy ][1] = my;
- s->current_picture.f.motion_val[0][mot_xy + 1][0] = mx;
- s->current_picture.f.motion_val[0][mot_xy + 1][1] = my;
+ s->current_picture.motion_val[0][mot_xy ][0] = mx;
+ s->current_picture.motion_val[0][mot_xy ][1] = my;
+ s->current_picture.motion_val[0][mot_xy + 1][0] = mx;
+ s->current_picture.motion_val[0][mot_xy + 1][1] = my;
}
}
const int mot_stride = s->b8_stride;
const int mot_xy = s->block_index[block];
- P_LEFT[0] = s->current_picture.f.motion_val[0][mot_xy - 1][0];
- P_LEFT[1] = s->current_picture.f.motion_val[0][mot_xy - 1][1];
+ P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0];
+ P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1];
if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
c->pred_x= pred_x4= P_LEFT[0];
c->pred_y= pred_y4= P_LEFT[1];
} else {
- P_TOP[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][0];
- P_TOP[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][1];
- P_TOPRIGHT[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + off[block]][0];
- P_TOPRIGHT[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + off[block]][1];
+ P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0];
+ P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1];
+ P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][0];
+ P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][1];
if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift);
my4_sum+= my4;
}
- s->current_picture.f.motion_val[0][s->block_index[block]][0] = mx4;
- s->current_picture.f.motion_val[0][s->block_index[block]][1] = my4;
+ s->current_picture.motion_val[0][s->block_index[block]][0] = mx4;
+ s->current_picture.motion_val[0][s->block_index[block]][1] = my4;
if(mx4 != mx || my4 != my) same=0;
}
const int mot_stride = s->b8_stride;
const int mot_xy = s->block_index[0];
- P_LEFT[0] = s->current_picture.f.motion_val[0][mot_xy - 1][0];
- P_LEFT[1] = s->current_picture.f.motion_val[0][mot_xy - 1][1];
+ P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0];
+ P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1];
if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
if(!s->first_slice_line) {
- P_TOP[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][0];
- P_TOP[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride ][1];
- P_TOPRIGHT[0] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + 2][0];
- P_TOPRIGHT[1] = s->current_picture.f.motion_val[0][mot_xy - mot_stride + 2][1];
+ P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0];
+ P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1];
+ P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][0];
+ P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][1];
if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
if(intra_score < dmin){
mb_type= CANDIDATE_MB_TYPE_INTRA;
- s->current_picture.f.mb_type[mb_y*s->mb_stride + mb_x] = CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup
+ s->current_picture.mb_type[mb_y*s->mb_stride + mb_x] = CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup
}else
- s->current_picture.f.mb_type[mb_y*s->mb_stride + mb_x] = 0;
+ s->current_picture.mb_type[mb_y*s->mb_stride + mb_x] = 0;
{
int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
ymin= xmin=(-32)>>shift;
ymax= xmax= 31>>shift;
- if (IS_8X8(s->next_picture.f.mb_type[mot_xy])) {
+ if (IS_8X8(s->next_picture.mb_type[mot_xy])) {
s->mv_type= MV_TYPE_8X8;
}else{
s->mv_type= MV_TYPE_16X16;
int index= s->block_index[i];
int min, max;
- c->co_located_mv[i][0] = s->next_picture.f.motion_val[0][index][0];
- c->co_located_mv[i][1] = s->next_picture.f.motion_val[0][index][1];
+ c->co_located_mv[i][0] = s->next_picture.motion_val[0][index][0];
+ c->co_located_mv[i][1] = s->next_picture.motion_val[0][index][1];
c->direct_basis_mv[i][0]= c->co_located_mv[i][0]*time_pb/time_pp + ((i& 1)<<(shift+3));
c->direct_basis_mv[i][1]= c->co_located_mv[i][1]*time_pb/time_pp + ((i>>1)<<(shift+3));
// c->direct_basis_mv[1][i][0]= c->co_located_mv[i][0]*(time_pb - time_pp)/time_pp + ((i &1)<<(shift+3);
c->skip=0;
- if (s->codec_id == AV_CODEC_ID_MPEG4 && s->next_picture.f.mbskip_table[xy]) {
+ if (s->codec_id == AV_CODEC_ID_MPEG4 && s->next_picture.mbskip_table[xy]) {
int score= direct_search(s, mb_x, mb_y); //FIXME just check 0,0
score= ((unsigned)(score*score + 128*256))>>16;
int block;
for(block=0; block<4; block++){
int off= (block& 1) + (block>>1)*wrap;
- int mx = s->current_picture.f.motion_val[0][ xy + off ][0];
- int my = s->current_picture.f.motion_val[0][ xy + off ][1];
+ int mx = s->current_picture.motion_val[0][ xy + off ][0];
+ int my = s->current_picture.motion_val[0][ xy + off ][1];
if( mx >=range || mx <-range
|| my >=range || my <-range){
s->mb_type[i] &= ~CANDIDATE_MB_TYPE_INTER4V;
s->mb_type[i] |= CANDIDATE_MB_TYPE_INTRA;
- s->current_picture.f.mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
+ s->current_picture.mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
}
}
}
#include "avcodec.h"
#include "get_bits.h"
#include "dsputil.h"
+#include "internal.h"
#define MAX_HUFF_CODES 16
int buf_size = avpkt->size;
MotionPixelsContext *mp = avctx->priv_data;
GetBitContext gb;
- int i, count1, count2, sz;
+ int i, count1, count2, sz, ret;
- mp->frame.reference = 1;
- mp->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if (avctx->reget_buffer(avctx, &mp->frame)) {
+ if ((ret = ff_reget_buffer(avctx, &mp->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return -1;
+ return ret;
}
/* le32 bitstream msb first */
ff_free_vlc(&mp->vlc);
end:
+ if ((ret = av_frame_ref(data, &mp->frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame *)data = mp->frame;
return buf_size;
}
av_freep(&mp->vpt);
av_freep(&mp->hpt);
av_freep(&mp->bswapbuf);
- if (mp->frame.data[0])
- avctx->release_buffer(avctx, &mp->frame);
+ av_frame_unref(&mp->frame);
return 0;
}
/* get output buffer */
frame->nb_samples = last_frame ? c->lastframelen : MPC_FRAME_SIZE;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = MPC_FRAME_SIZE;
- if ((res = ff_get_buffer(avctx, frame)) < 0) {
+ if ((res = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res;
}
if (s->mb_skip_run-- != 0) {
if (s->pict_type == AV_PICTURE_TYPE_P) {
s->mb_skipped = 1;
- s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
+ s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
} else {
int mb_type;
if (s->mb_x)
- mb_type = s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
+ mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
else
- mb_type = s->current_picture.f.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
+ mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
if (IS_INTRA(mb_type))
return -1;
- s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
+ s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
mb_type | MB_TYPE_SKIP;
-// assert(s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1] & (MB_TYPE_16x16 | MB_TYPE_16x8));
+// assert(s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1] & (MB_TYPE_16x16 | MB_TYPE_16x8));
if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
s->mb_skipped = 1;
}
}
- s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
+ s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
return 0;
}
/* start frame decoding */
if (s->first_field || s->picture_structure == PICT_FRAME) {
+ AVFrameSideData *pan_scan;
+
if (ff_MPV_frame_start(s, avctx) < 0)
return -1;
}
}
- *s->current_picture_ptr->f.pan_scan = s1->pan_scan;
+ pan_scan = av_frame_new_side_data(&s->current_picture_ptr->f,
+ AV_FRAME_DATA_PANSCAN,
+ sizeof(s1->pan_scan));
+ if (!pan_scan)
+ return AVERROR(ENOMEM);
+ memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
ff_thread_finish_setup(avctx);
if (mpeg_decode_mb(s, s->block) < 0)
return -1;
- if (s->current_picture.f.motion_val[0] && !s->encoding) { // note motion_val is normally NULL unless we want to extract the MVs
+ if (s->current_picture.motion_val[0] && !s->encoding) { // note motion_val is normally NULL unless we want to extract the MVs
const int wrap = s->b8_stride;
int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
motion_y = s->mv[dir][i][1];
}
- s->current_picture.f.motion_val[dir][xy ][0] = motion_x;
- s->current_picture.f.motion_val[dir][xy ][1] = motion_y;
- s->current_picture.f.motion_val[dir][xy + 1][0] = motion_x;
- s->current_picture.f.motion_val[dir][xy + 1][1] = motion_y;
- s->current_picture.f.ref_index [dir][b8_xy ] =
- s->current_picture.f.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
+ s->current_picture.motion_val[dir][xy ][0] = motion_x;
+ s->current_picture.motion_val[dir][xy ][1] = motion_y;
+ s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
+ s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
+ s->current_picture.ref_index [dir][b8_xy ] =
+ s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
assert(s->field_select[dir][i] == 0 || s->field_select[dir][i] == 1);
}
xy += wrap;
if (/*s->mb_y << field_pic == s->mb_height &&*/ !s->first_field) {
/* end of image */
- s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2;
-
ff_er_frame_end(&s->er);
ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = s->current_picture_ptr->f;
- ff_print_debug_info(s, pict);
+ int ret = av_frame_ref(pict, &s->current_picture_ptr->f);
+ if (ret < 0)
+ return ret;
+ ff_print_debug_info(s, s->current_picture_ptr);
} else {
if (avctx->active_thread_type & FF_THREAD_FRAME)
s->picture_number++;
/* latency of 1 frame for I- and P-frames */
/* XXX: use another variable than picture_number */
if (s->last_picture_ptr != NULL) {
- *pict = s->last_picture_ptr->f;
- ff_print_debug_info(s, pict);
+ int ret = av_frame_ref(pict, &s->last_picture_ptr->f);
+ if (ret < 0)
+ return ret;
+ ff_print_debug_info(s, s->last_picture_ptr);
}
}
if (CONFIG_MPEG_VDPAU_DECODER && avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count);
- if (slice_end(avctx, picture)) {
+ ret = slice_end(avctx, picture);
+ if (ret < 0)
+ return ret;
+ else if (ret) {
if (s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice
*got_output = 1;
}
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
/* special case for last picture */
if (s2->low_delay == 0 && s2->next_picture_ptr) {
- *picture = s2->next_picture_ptr->f;
+ int ret = av_frame_ref(picture, &s2->next_picture_ptr->f);
+ if (ret < 0)
+ return ret;
+
s2->next_picture_ptr = NULL;
*got_output = 1;
uint16_t time_pb= s->pb_time;
int p_mx, p_my;
- p_mx = s->next_picture.f.motion_val[0][xy][0];
+ p_mx = s->next_picture.motion_val[0][xy][0];
if((unsigned)(p_mx + tab_bias) < tab_size){
s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
: p_mx*(time_pb - time_pp)/time_pp;
}
- p_my = s->next_picture.f.motion_val[0][xy][1];
+ p_my = s->next_picture.motion_val[0][xy][1];
if((unsigned)(p_my + tab_bias) < tab_size){
s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
*/
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
- const int colocated_mb_type = s->next_picture.f.mb_type[mb_index];
+ const int colocated_mb_type = s->next_picture.mb_type[mb_index];
uint16_t time_pp;
uint16_t time_pb;
int i;
} else if(IS_INTERLACED(colocated_mb_type)){
s->mv_type = MV_TYPE_FIELD;
for(i=0; i<2; i++){
- int field_select = s->next_picture.f.ref_index[0][4 * mb_index + 2 * i];
+ int field_select = s->next_picture.ref_index[0][4 * mb_index + 2 * i];
s->field_select[0][i]= field_select;
s->field_select[1][i]= i;
if(s->top_field_first){
*/
#include "error_resilience.h"
+#include "internal.h"
#include "mpegvideo.h"
#include "mpeg4video.h"
#include "h263.h"
{
int i;
int16_t *ac_val, *ac_val1;
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
/* find prediction */
ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
if(s->pict_type == AV_PICTURE_TYPE_B){
int mb_x = 0, mb_y = 0;
- while (s->next_picture.f.mbskip_table[s->mb_index2xy[mb_num]]) {
+ while (s->next_picture.mbskip_table[s->mb_index2xy[mb_num]]) {
if (!mb_x)
- ff_thread_await_progress(&s->next_picture_ptr->f, mb_y++, 0);
+ ff_thread_await_progress(&s->next_picture_ptr->tf, mb_y++, 0);
mb_num++;
if (++mb_x == s->mb_width) mb_x = 0;
}
}while(cbpc == 8);
s->cbp_table[xy]= cbpc & 3;
- s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
+ s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
s->mb_intra = 1;
if(cbpc & 4) {
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
}
- s->current_picture.f.qscale_table[xy]= s->qscale;
+ s->current_picture.qscale_table[xy]= s->qscale;
s->mbintra_table[xy]= 1;
for(i=0; i<6; i++){
s->pred_dir_table[xy]= dir;
}else{ /* P/S_TYPE */
int mx, my, pred_x, pred_y, bits;
- int16_t * const mot_val = s->current_picture.f.motion_val[0][s->block_index[0]];
+ int16_t * const mot_val = s->current_picture.motion_val[0][s->block_index[0]];
const int stride= s->b8_stride*2;
try_again:
if(bits&0x10000){
/* skip mb */
if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
- s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
mx= get_amv(s, 0);
my= get_amv(s, 1);
}else{
- s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
mx=my=0;
}
mot_val[0 ]= mot_val[2 ]=
s->mb_intra = ((cbpc & 4) != 0);
if(s->mb_intra){
- s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
+ s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
s->mbintra_table[xy]= 1;
mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= 0;
my = ff_h263_decode_motion(s, pred_y, s->f_code);
if (my >= 0xffff)
return -1;
- s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
} else {
mx = get_amv(s, 0);
my = get_amv(s, 1);
- s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
}
mot_val[0 ]= mot_val[2 ] =
mot_val[1+stride]= mot_val[3+stride]= my;
} else {
int i;
- s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
int16_t *mot_val= ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
}
s->cbp_table[xy]|= cbpy<<2;
- s->current_picture.f.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
+ s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
}else{ /* P || S_TYPE */
- if (IS_INTRA(s->current_picture.f.mb_type[xy])) {
+ if (IS_INTRA(s->current_picture.mb_type[xy])) {
int dir=0,i;
int ac_pred = get_bits1(&s->gb);
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if(s->cbp_table[xy] & 8) {
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
}
- s->current_picture.f.qscale_table[xy] = s->qscale;
+ s->current_picture.qscale_table[xy] = s->qscale;
for(i=0; i<6; i++){
int dc_pred_dir;
}
s->cbp_table[xy]&= 3; //remove dquant
s->cbp_table[xy]|= cbpy<<2;
- s->current_picture.f.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
+ s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
s->pred_dir_table[xy]= dir;
- } else if (IS_SKIP(s->current_picture.f.mb_type[xy])) {
- s->current_picture.f.qscale_table[xy] = s->qscale;
+ } else if (IS_SKIP(s->current_picture.mb_type[xy])) {
+ s->current_picture.qscale_table[xy] = s->qscale;
s->cbp_table[xy]= 0;
}else{
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if(s->cbp_table[xy] & 8) {
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
}
- s->current_picture.f.qscale_table[xy] = s->qscale;
+ s->current_picture.qscale_table[xy] = s->qscale;
s->cbp_table[xy]&= 3; //remove dquant
s->cbp_table[xy]|= (cbpy^0xf)<<2;
int cbp, mb_type;
const int xy= s->mb_x + s->mb_y*s->mb_stride;
- mb_type = s->current_picture.f.mb_type[xy];
+ mb_type = s->current_picture.mb_type[xy];
cbp = s->cbp_table[xy];
s->use_intra_dc_vlc= s->qscale < s->intra_dc_threshold;
- if (s->current_picture.f.qscale_table[xy] != s->qscale) {
- ff_set_qscale(s, s->current_picture.f.qscale_table[xy]);
+ if (s->current_picture.qscale_table[xy] != s->qscale) {
+ ff_set_qscale(s, s->current_picture.qscale_table[xy]);
}
if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
int i;
for(i=0; i<4; i++){
- s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
- s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
+ s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
+ s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
}
s->mb_intra = IS_INTRA(mb_type);
s->mb_skipped = 1;
}
}else if(s->mb_intra){
- s->ac_pred = IS_ACPRED(s->current_picture.f.mb_type[xy]);
+ s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
}else if(!s->mb_intra){
// s->mcsel= 0; //FIXME do we need to init that
}
} else { /* I-Frame */
s->mb_intra = 1;
- s->ac_pred = IS_ACPRED(s->current_picture.f.mb_type[xy]);
+ s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
}
if (!IS_SKIP(mb_type)) {
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
if(s->pict_type==AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
- s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
s->mcsel=1;
s->mv[0][0][0]= get_amv(s, 0);
s->mv[0][0][1]= get_amv(s, 1);
s->mb_skipped = 0;
}else{
- s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mcsel=0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
if(s->mcsel){
- s->current_picture.f.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 global motion prediction */
s->mv_type = MV_TYPE_16X16;
mx= get_amv(s, 0);
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
}else if((!s->progressive_sequence) && get_bits1(&s->gb)){
- s->current_picture.f.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED;
+ s->current_picture.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED;
/* 16x8 field motion prediction */
s->mv_type= MV_TYPE_FIELD;
s->mv[0][i][1] = my;
}
}else{
- s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
s->mv[0][0][1] = my;
}
} else {
- s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
s->last_mv[i][1][1]= 0;
}
- ff_thread_await_progress(&s->next_picture_ptr->f, s->mb_y, 0);
+ ff_thread_await_progress(&s->next_picture_ptr->tf, s->mb_y, 0);
}
/* if we skipped it in the future P Frame than skip it now too */
- s->mb_skipped = s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
+ s->mb_skipped = s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
if(s->mb_skipped){
/* skip mb */
s->mv[0][0][1] = 0;
s->mv[1][0][0] = 0;
s->mv[1][0][1] = 0;
- s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
goto end;
}
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
}
- s->current_picture.f.mb_type[xy] = mb_type;
+ s->current_picture.mb_type[xy] = mb_type;
} else { /* I-Frame */
do{
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
intra:
s->ac_pred = get_bits1(&s->gb);
if(s->ac_pred)
- s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
+ s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
else
- s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
+ s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if(cbpy<0){
if(mpeg4_is_resync(s)){
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
- if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.f.mbskip_table[xy + delta]) {
- ff_thread_await_progress(&s->next_picture_ptr->f,
+ if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.mbskip_table[xy + delta]) {
+ ff_thread_await_progress(&s->next_picture_ptr->tf,
(s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0);
}
- if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.f.mbskip_table[xy + delta])
+ if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture.mbskip_table[xy + delta])
return SLICE_OK;
return SLICE_END;
}
s->time_increment_bits = 4; /* default value for broken headers */
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
+ avctx->internal->allocate_progress = 1;
+
return 0;
}
{
int score= 0;
int i, n;
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
*/
void ff_clean_mpeg4_qscales(MpegEncContext *s){
int i;
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
ff_clean_h263_qscales(s);
assert(mb_type>=0);
/* nothing to do if this MB was skipped in the next P Frame */
- if (s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
+ if (s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
s->skip_count++;
s->mv[0][0][0]=
s->mv[0][0][1]=
break;
b_pic = pic->f.data[0] + offset;
- if (pic->f.type != FF_BUFFER_TYPE_SHARED)
+ if (!pic->shared)
b_pic+= INPLACE_OFFSET;
diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
if(diff>s->qscale*70){ //FIXME check that 70 is optimal
/* motion vectors: 8x8 mode*/
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
- ff_h263_encode_motion_vector(s, s->current_picture.f.motion_val[0][ s->block_index[i] ][0] - pred_x,
- s->current_picture.f.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
+ ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
+ s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
}
}
if (!samples) {
av_assert0(s->frame != NULL);
s->frame->nb_samples = s->avctx->frame_size;
- if ((ret = ff_get_buffer(s->avctx, s->frame)) < 0) {
+ if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = MPA_FRAME_SIZE;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
* The simplest mpeg encoder (well, it was the simplest!).
*/
+#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "avcodec.h"
#include "dsputil.h"
return 0;
}
-void ff_copy_picture(Picture *dst, Picture *src)
-{
- *dst = *src;
- dst->f.type = FF_BUFFER_TYPE_COPY;
-}
-
-/**
- * Release a frame buffer
- */
-static void free_frame_buffer(MpegEncContext *s, Picture *pic)
-{
- /* WM Image / Screen codecs allocate internal buffers with different
- * dimensions / colorspaces; ignore user-defined callbacks for these. */
- if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
- s->codec_id != AV_CODEC_ID_VC1IMAGE &&
- s->codec_id != AV_CODEC_ID_MSS2)
- ff_thread_release_buffer(s->avctx, &pic->f);
- else
- avcodec_default_release_buffer(s->avctx, &pic->f);
- av_freep(&pic->hwaccel_picture_private);
-}
-
int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
{
int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
}
}
+ pic->tf.f = &pic->f;
if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
s->codec_id != AV_CODEC_ID_VC1IMAGE &&
s->codec_id != AV_CODEC_ID_MSS2)
- r = ff_thread_get_buffer(s->avctx, &pic->f);
- else
- r = avcodec_default_get_buffer(s->avctx, &pic->f);
-
- if (r < 0 || !pic->f.type || !pic->f.data[0]) {
- av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
- r, pic->f.type, pic->f.data[0]);
+ r = ff_thread_get_buffer(s->avctx, &pic->tf,
+ pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
+ else {
+ pic->f.width = s->avctx->width;
+ pic->f.height = s->avctx->height;
+ pic->f.format = s->avctx->pix_fmt;
+ r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
+ }
+
+ if (r < 0 || !pic->f.data[0]) {
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
+ r, pic->f.data[0]);
av_freep(&pic->hwaccel_picture_private);
return -1;
}
s->uvlinesize != pic->f.linesize[1])) {
av_log(s->avctx, AV_LOG_ERROR,
"get_buffer() failed (stride changed)\n");
- free_frame_buffer(s, pic);
+ ff_mpeg_unref_picture(s, pic);
return -1;
}
if (pic->f.linesize[1] != pic->f.linesize[2]) {
av_log(s->avctx, AV_LOG_ERROR,
"get_buffer() failed (uv stride mismatch)\n");
- free_frame_buffer(s, pic);
+ ff_mpeg_unref_picture(s, pic);
return -1;
}
(ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"get_buffer() failed to allocate context scratch buffers.\n");
- free_frame_buffer(s, pic);
+ ff_mpeg_unref_picture(s, pic);
return ret;
}
return 0;
}
-/**
- * Allocate a Picture.
- * The pixels are allocated/set by calling get_buffer() if shared = 0
- */
-int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
+static void free_picture_tables(Picture *pic)
{
- const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
+ int i;
- // the + 1 is needed so memset(,,stride*height) does not sig11
+ av_buffer_unref(&pic->mb_var_buf);
+ av_buffer_unref(&pic->mc_mb_var_buf);
+ av_buffer_unref(&pic->mb_mean_buf);
+ av_buffer_unref(&pic->mbskip_table_buf);
+ av_buffer_unref(&pic->qscale_table_buf);
+ av_buffer_unref(&pic->mb_type_buf);
+ for (i = 0; i < 2; i++) {
+ av_buffer_unref(&pic->motion_val_buf[i]);
+ av_buffer_unref(&pic->ref_index_buf[i]);
+ }
+}
+
+static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
+{
+ const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
const int mb_array_size = s->mb_stride * s->mb_height;
const int b8_array_size = s->b8_stride * s->mb_height * 2;
- const int b4_array_size = s->b4_stride * s->mb_height * 4;
int i;
- int r = -1;
+
+
+ pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
+ pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
+ pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
+ sizeof(uint32_t));
+ if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
+ return AVERROR(ENOMEM);
+
+ if (s->encoding) {
+ pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
+ pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
+ pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
+ if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
+ return AVERROR(ENOMEM);
+ }
+
+ if (s->out_format == FMT_H263 || s->encoding ||
+ (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
+ int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
+ int ref_index_size = 4 * mb_array_size;
+
+ for (i = 0; mv_size && i < 2; i++) {
+ pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
+ pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
+ if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ return 0;
+}
+
+static int make_tables_writable(Picture *pic)
+{
+ int ret, i;
+#define MAKE_WRITABLE(table) \
+do {\
+ if (pic->table &&\
+ (ret = av_buffer_make_writable(&pic->table)) < 0)\
+ return ret;\
+} while (0)
+
+ MAKE_WRITABLE(mb_var_buf);
+ MAKE_WRITABLE(mc_mb_var_buf);
+ MAKE_WRITABLE(mb_mean_buf);
+ MAKE_WRITABLE(mbskip_table_buf);
+ MAKE_WRITABLE(qscale_table_buf);
+ MAKE_WRITABLE(mb_type_buf);
+
+ for (i = 0; i < 2; i++) {
+ MAKE_WRITABLE(motion_val_buf[i]);
+ MAKE_WRITABLE(ref_index_buf[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * Allocate a Picture.
+ * The pixels are allocated/set by calling get_buffer() if shared = 0
+ */
+int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
+{
+ int i, ret;
if (shared) {
assert(pic->f.data[0]);
- assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
- pic->f.type = FF_BUFFER_TYPE_SHARED;
+ pic->shared = 1;
} else {
assert(!pic->f.data[0]);
s->uvlinesize = pic->f.linesize[1];
}
- if (pic->f.qscale_table == NULL) {
- if (s->encoding) {
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
- mb_array_size * sizeof(int16_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
- mb_array_size * sizeof(int16_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
- mb_array_size * sizeof(int8_t ), fail)
- }
+ if (!pic->qscale_table_buf)
+ ret = alloc_picture_tables(s, pic);
+ else
+ ret = make_tables_writable(pic);
+ if (ret < 0)
+ goto fail;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
- mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
- (big_mb_num + s->mb_stride) * sizeof(uint8_t),
- fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
- (big_mb_num + s->mb_stride) * sizeof(uint32_t),
- fail)
- pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
- pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
- if (s->out_format == FMT_H264) {
- for (i = 0; i < 2; i++) {
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
- 2 * (b4_array_size + 4) * sizeof(int16_t),
- fail)
- pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
- 4 * mb_array_size * sizeof(uint8_t), fail)
- }
- pic->f.motion_subsample_log2 = 2;
- } else if (s->out_format == FMT_H263 || s->encoding ||
- (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
- for (i = 0; i < 2; i++) {
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
- 2 * (b8_array_size + 4) * sizeof(int16_t),
- fail)
- pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
- 4 * mb_array_size * sizeof(uint8_t), fail)
- }
- pic->f.motion_subsample_log2 = 3;
- }
- if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
- 64 * mb_array_size * sizeof(int16_t) * 6, fail)
- }
- pic->f.qstride = s->mb_stride;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
- 1 * sizeof(AVPanScan), fail)
+ if (s->encoding) {
+ pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
+ pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
+ pic->mb_mean = pic->mb_mean_buf->data;
}
- pic->owner2 = s;
+ pic->mbskip_table = pic->mbskip_table_buf->data;
+ pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
+ pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
+
+ if (pic->motion_val_buf[0]) {
+ for (i = 0; i < 2; i++) {
+ pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
+ pic->ref_index[i] = pic->ref_index_buf[i]->data;
+ }
+ }
return 0;
-fail: // for the FF_ALLOCZ_OR_GOTO macro
- if (r >= 0)
- free_frame_buffer(s, pic);
- return -1;
+fail:
+ av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
+ ff_mpeg_unref_picture(s, pic);
+ free_picture_tables(pic);
+ return AVERROR(ENOMEM);
}
/**
* Deallocate a picture.
*/
-static void free_picture(MpegEncContext *s, Picture *pic)
+void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
{
- int i;
+ int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
+
+ pic->tf.f = &pic->f;
+ /* WM Image / Screen codecs allocate internal buffers with different
+ * dimensions / colorspaces; ignore user-defined callbacks for these. */
+ if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
+ s->codec_id != AV_CODEC_ID_VC1IMAGE &&
+ s->codec_id != AV_CODEC_ID_MSS2)
+ ff_thread_release_buffer(s->avctx, &pic->tf);
+ else
+ av_frame_unref(&pic->f);
+
+ av_buffer_unref(&pic->hwaccel_priv_buf);
- if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
- free_frame_buffer(s, pic);
- }
-
- av_freep(&pic->mb_var);
- av_freep(&pic->mc_mb_var);
- av_freep(&pic->mb_mean);
- av_freep(&pic->f.mbskip_table);
- av_freep(&pic->qscale_table_base);
- pic->f.qscale_table = NULL;
- av_freep(&pic->mb_type_base);
- pic->f.mb_type = NULL;
- av_freep(&pic->f.dct_coeff);
- av_freep(&pic->f.pan_scan);
- pic->f.mb_type = NULL;
+ memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
+}
+
+static int update_picture_tables(Picture *dst, Picture *src)
+{
+ int i;
+
+#define UPDATE_TABLE(table)\
+do {\
+ if (src->table &&\
+ (!dst->table || dst->table->buffer != src->table->buffer)) {\
+ av_buffer_unref(&dst->table);\
+ dst->table = av_buffer_ref(src->table);\
+ if (!dst->table) {\
+ free_picture_tables(dst);\
+ return AVERROR(ENOMEM);\
+ }\
+ }\
+} while (0)
+
+ UPDATE_TABLE(mb_var_buf);
+ UPDATE_TABLE(mc_mb_var_buf);
+ UPDATE_TABLE(mb_mean_buf);
+ UPDATE_TABLE(mbskip_table_buf);
+ UPDATE_TABLE(qscale_table_buf);
+ UPDATE_TABLE(mb_type_buf);
for (i = 0; i < 2; i++) {
- av_freep(&pic->motion_val_base[i]);
- av_freep(&pic->f.ref_index[i]);
- pic->f.motion_val[i] = NULL;
+ UPDATE_TABLE(motion_val_buf[i]);
+ UPDATE_TABLE(ref_index_buf[i]);
}
- if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
- for (i = 0; i < 4; i++) {
- pic->f.base[i] =
- pic->f.data[i] = NULL;
- }
- pic->f.type = 0;
+ dst->mb_var = src->mb_var;
+ dst->mc_mb_var = src->mc_mb_var;
+ dst->mb_mean = src->mb_mean;
+ dst->mbskip_table = src->mbskip_table;
+ dst->qscale_table = src->qscale_table;
+ dst->mb_type = src->mb_type;
+ for (i = 0; i < 2; i++) {
+ dst->motion_val[i] = src->motion_val[i];
+ dst->ref_index[i] = src->ref_index[i];
}
+
+ return 0;
+}
+
+int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
+{
+ int ret;
+
+ av_assert0(!dst->f.buf[0]);
+ av_assert0(src->f.buf[0]);
+
+ src->tf.f = &src->f;
+ dst->tf.f = &dst->f;
+ ret = ff_thread_ref_frame(&dst->tf, &src->tf);
+ if (ret < 0)
+ goto fail;
+
+ ret = update_picture_tables(dst, src);
+ if (ret < 0)
+ goto fail;
+
+ if (src->hwaccel_picture_private) {
+ dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
+ if (!dst->hwaccel_priv_buf)
+ goto fail;
+ dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
+ }
+
+ dst->field_picture = src->field_picture;
+ dst->mb_var_sum = src->mb_var_sum;
+ dst->mc_mb_var_sum = src->mc_mb_var_sum;
+ dst->b_frame_score = src->b_frame_score;
+ dst->needs_realloc = src->needs_realloc;
+ dst->reference = src->reference;
+ dst->shared = src->shared;
+
+ return 0;
+fail:
+ ff_mpeg_unref_picture(s, dst);
+ return ret;
}
static int init_duplicate_context(MpegEncContext *s)
int ff_mpeg_update_thread_context(AVCodecContext *dst,
const AVCodecContext *src)
{
- int i;
+ int i, ret;
MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
if (dst == src || !s1->context_initialized)
memcpy(s, s1, sizeof(MpegEncContext));
s->avctx = dst;
- s->picture_range_start += MAX_PICTURE_COUNT;
- s->picture_range_end += MAX_PICTURE_COUNT;
s->bitstream_buffer = NULL;
s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
s->picture_number = s1->picture_number;
s->input_picture_number = s1->input_picture_number;
- memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
- memcpy(&s->last_picture, &s1->last_picture,
- (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
-
- // reset s->picture[].f.extended_data to s->picture[].f.data
- for (i = 0; i < s->picture_count; i++)
- s->picture[i].f.extended_data = s->picture[i].f.data;
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ ff_mpeg_unref_picture(s, &s->picture[i]);
+ if (s1->picture[i].f.data[0] &&
+ (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
+ return ret;
+ }
+
+#define UPDATE_PICTURE(pic)\
+do {\
+ ff_mpeg_unref_picture(s, &s->pic);\
+ if (s1->pic.f.data[0])\
+ ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
+ else\
+ ret = update_picture_tables(&s->pic, &s1->pic);\
+ if (ret < 0)\
+ return ret;\
+} while (0)
+
+ UPDATE_PICTURE(current_picture);
+ UPDATE_PICTURE(last_picture);
+ UPDATE_PICTURE(next_picture);
s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
s->f_code = 1;
s->b_code = 1;
- s->picture_range_start = 0;
- s->picture_range_end = MAX_PICTURE_COUNT;
-
s->slice_context_count = 1;
}
}
}
- s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
- s->picture_count * sizeof(Picture), fail);
- for (i = 0; i < s->picture_count; i++) {
+ MAX_PICTURE_COUNT * sizeof(Picture), fail);
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
avcodec_get_frame_defaults(&s->picture[i].f);
}
+ memset(&s->next_picture, 0, sizeof(s->next_picture));
+ memset(&s->last_picture, 0, sizeof(s->last_picture));
+ memset(&s->current_picture, 0, sizeof(s->current_picture));
+ avcodec_get_frame_defaults(&s->next_picture.f);
+ avcodec_get_frame_defaults(&s->last_picture.f);
+ avcodec_get_frame_defaults(&s->current_picture.f);
if (s->width && s->height) {
if (init_context_frame(s))
} else
free_duplicate_context(s);
- free_context_frame(s);
+ if ((err = free_context_frame(s)) < 0)
+ return err;
if (s->picture)
- for (i = 0; i < s->picture_count; i++) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
s->picture[i].needs_realloc = 1;
}
av_freep(&s->reordered_input_picture);
av_freep(&s->dct_offset);
- if (s->picture && !s->avctx->internal->is_copy) {
- for (i = 0; i < s->picture_count; i++) {
- free_picture(s, &s->picture[i]);
+ if (s->picture) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ free_picture_tables(&s->picture[i]);
+ ff_mpeg_unref_picture(s, &s->picture[i]);
}
}
av_freep(&s->picture);
+ free_picture_tables(&s->last_picture);
+ ff_mpeg_unref_picture(s, &s->last_picture);
+ free_picture_tables(&s->current_picture);
+ ff_mpeg_unref_picture(s, &s->current_picture);
+ free_picture_tables(&s->next_picture);
+ ff_mpeg_unref_picture(s, &s->next_picture);
+ free_picture_tables(&s->new_picture);
+ ff_mpeg_unref_picture(s, &s->new_picture);
free_context_frame(s);
- if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
- avcodec_default_free_buffers(s->avctx);
-
s->context_initialized = 0;
s->last_picture_ptr =
s->next_picture_ptr =
int i;
/* release non reference frames */
- for (i = 0; i < s->picture_count; i++) {
- if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
- (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
- (remove_current || &s->picture[i] != s->current_picture_ptr)
- /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
- free_frame_buffer(s, &s->picture[i]);
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ if (!s->picture[i].reference &&
+ (remove_current || &s->picture[i] != s->current_picture_ptr)) {
+ ff_mpeg_unref_picture(s, &s->picture[i]);
}
}
}
{
if (pic->f.data[0] == NULL)
return 1;
- if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
- if (!pic->owner2 || pic->owner2 == s)
- return 1;
+ if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
+ return 1;
return 0;
}
int i;
if (shared) {
- for (i = s->picture_range_start; i < s->picture_range_end; i++) {
- if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ if (s->picture[i].f.data[0] == NULL)
return i;
}
} else {
- for (i = s->picture_range_start; i < s->picture_range_end; i++) {
- if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
- return i; // FIXME
- }
- for (i = s->picture_range_start; i < s->picture_range_end; i++) {
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (pic_is_unused(s, &s->picture[i]))
return i;
}
{
int ret = find_unused_picture(s, shared);
- if (ret >= 0 && ret < s->picture_range_end) {
+ if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
if (s->picture[ret].needs_realloc) {
s->picture[ret].needs_realloc = 0;
- free_picture(s, &s->picture[ret]);
+ free_picture_tables(&s->picture[ret]);
+ ff_mpeg_unref_picture(s, &s->picture[ret]);
avcodec_get_frame_defaults(&s->picture[ret].f);
}
}
*/
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
- int i;
+ int i, ret;
Picture *pic;
s->mb_skipped = 0;
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
s->last_picture_ptr != s->next_picture_ptr &&
s->last_picture_ptr->f.data[0]) {
- if (s->last_picture_ptr->owner2 == s)
- free_frame_buffer(s, s->last_picture_ptr);
+ ff_mpeg_unref_picture(s, s->last_picture_ptr);
}
/* release forgotten pictures */
/* if (mpeg124/h263) */
if (!s->encoding) {
- for (i = 0; i < s->picture_count; i++) {
- if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
- &s->picture[i] != s->last_picture_ptr &&
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ if (&s->picture[i] != s->last_picture_ptr &&
&s->picture[i] != s->next_picture_ptr &&
- s->picture[i].f.reference && !s->picture[i].needs_realloc) {
+ s->picture[i].reference && !s->picture[i].needs_realloc) {
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
av_log(avctx, AV_LOG_ERROR,
"releasing zombie picture\n");
- free_frame_buffer(s, &s->picture[i]);
+ ff_mpeg_unref_picture(s, &s->picture[i]);
}
}
}
pic = &s->picture[i];
}
- pic->f.reference = 0;
+ pic->reference = 0;
if (!s->droppable) {
if (s->codec_id == AV_CODEC_ID_H264)
- pic->f.reference = s->picture_structure;
+ pic->reference = s->picture_structure;
else if (s->pict_type != AV_PICTURE_TYPE_B)
- pic->f.reference = 3;
+ pic->reference = 3;
}
pic->f.coded_picture_number = s->coded_picture_number++;
// s->current_picture_ptr->quality = s->new_picture_ptr->quality;
s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
- ff_copy_picture(&s->current_picture, s->current_picture_ptr);
+ ff_mpeg_unref_picture(s, &s->current_picture);
+ if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
+ s->current_picture_ptr)) < 0)
+ return ret;
- if (s->pict_type != AV_PICTURE_TYPE_B) {
+ if (s->codec_id != AV_CODEC_ID_H264 && s->pict_type != AV_PICTURE_TYPE_B) {
s->last_picture_ptr = s->next_picture_ptr;
if (!s->droppable)
s->next_picture_ptr = s->current_picture_ptr;
(avctx->height >> v_chroma_shift) *
s->last_picture_ptr->f.linesize[2]);
- ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
- ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
- s->last_picture_ptr->f.reference = 3;
+ ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
+ ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
}
if ((s->next_picture_ptr == NULL ||
s->next_picture_ptr->f.data[0] == NULL) &&
s->next_picture_ptr = NULL;
return -1;
}
- ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
- ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
- s->next_picture_ptr->f.reference = 3;
+ ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
+ ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
}
}
- if (s->last_picture_ptr)
- ff_copy_picture(&s->last_picture, s->last_picture_ptr);
- if (s->next_picture_ptr)
- ff_copy_picture(&s->next_picture, s->next_picture_ptr);
+ if (s->codec_id != AV_CODEC_ID_H264) {
+ if (s->last_picture_ptr) {
+ ff_mpeg_unref_picture(s, &s->last_picture);
+ if (s->last_picture_ptr->f.data[0] &&
+ (ret = ff_mpeg_ref_picture(s, &s->last_picture,
+ s->last_picture_ptr)) < 0)
+ return ret;
+ }
+ if (s->next_picture_ptr) {
+ ff_mpeg_unref_picture(s, &s->next_picture);
+ if (s->next_picture_ptr->f.data[0] &&
+ (ret = ff_mpeg_ref_picture(s, &s->next_picture,
+ s->next_picture_ptr)) < 0)
+ return ret;
+ }
- if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
- if (s->next_picture_ptr)
- s->next_picture_ptr->owner2 = s;
- if (s->last_picture_ptr)
- s->last_picture_ptr->owner2 = s;
+ assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
+ s->last_picture_ptr->f.data[0]));
}
- assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
- s->last_picture_ptr->f.data[0]));
-
if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
int i;
for (i = 0; i < 4; i++) {
!s->avctx->hwaccel &&
!(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
s->unrestricted_mv &&
- s->current_picture.f.reference &&
+ s->current_picture.reference &&
!s->intra_only &&
!(s->flags & CODEC_FLAG_EMU_EDGE)) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
if (s->encoding) {
/* release non-reference frames */
- for (i = 0; i < s->picture_count; i++) {
- if (s->picture[i].f.data[0] && !s->picture[i].f.reference
- /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
- free_frame_buffer(s, &s->picture[i]);
- }
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ if (!s->picture[i].reference)
+ ff_mpeg_unref_picture(s, &s->picture[i]);
}
}
// clear copies, to avoid confusion
#endif
s->avctx->coded_frame = &s->current_picture_ptr->f;
- if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
- ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
- }
+ if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.reference)
+ ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
}
/**
/**
* Print debugging info for the given picture.
*/
-void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
+void ff_print_debug_info(MpegEncContext *s, Picture *p)
{
- if (s->avctx->hwaccel || !pict || !pict->mb_type)
+ AVFrame *pict;
+ if (s->avctx->hwaccel || !p || !p->mb_type)
return;
+ pict = &p->f;
if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
int x,y;
}
if (s->avctx->debug & FF_DEBUG_QP) {
av_log(s->avctx, AV_LOG_DEBUG, "%2d",
- pict->qscale_table[x + y * s->mb_stride]);
+ p->qscale_table[x + y * s->mb_stride]);
}
if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
- int mb_type = pict->mb_type[x + y * s->mb_stride];
+ int mb_type = p->mb_type[x + y * s->mb_stride];
// Type & MV direction
if (IS_PCM(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "P");
pict->linesize[i] * height >> v_chroma_shift);
pict->data[i] = s->visualization_buffer[i];
}
- pict->type = FF_BUFFER_TYPE_COPY;
ptr = pict->data[0];
block_height = 16 >> v_chroma_shift;
int mb_x;
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
const int mb_index = mb_x + mb_y * s->mb_stride;
- if ((s->avctx->debug_mv) && pict->motion_val) {
+ if ((s->avctx->debug_mv) && p->motion_val) {
int type;
for (type = 0; type < 3; type++) {
int direction = 0;
direction = 1;
break;
}
- if (!USES_LIST(pict->mb_type[mb_index], direction))
+ if (!USES_LIST(p->mb_type[mb_index], direction))
continue;
- if (IS_8X8(pict->mb_type[mb_index])) {
+ if (IS_8X8(p->mb_type[mb_index])) {
int i;
for (i = 0; i < 4; i++) {
int sx = mb_x * 16 + 4 + 8 * (i & 1);
int sy = mb_y * 16 + 4 + 8 * (i >> 1);
int xy = (mb_x * 2 + (i & 1) +
(mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
- int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
- int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
+ int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
+ int my = (p->motion_val[direction][xy][1] >> shift) + sy;
draw_arrow(ptr, sx, sy, mx, my, width,
height, s->linesize, 100);
}
- } else if (IS_16X8(pict->mb_type[mb_index])) {
+ } else if (IS_16X8(p->mb_type[mb_index])) {
int i;
for (i = 0; i < 2; i++) {
int sx = mb_x * 16 + 8;
int sy = mb_y * 16 + 4 + 8 * i;
int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
- int mx = (pict->motion_val[direction][xy][0] >> shift);
- int my = (pict->motion_val[direction][xy][1] >> shift);
+ int mx = (p->motion_val[direction][xy][0] >> shift);
+ int my = (p->motion_val[direction][xy][1] >> shift);
- if (IS_INTERLACED(pict->mb_type[mb_index]))
+ if (IS_INTERLACED(p->mb_type[mb_index]))
my *= 2;
draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
height, s->linesize, 100);
}
- } else if (IS_8X16(pict->mb_type[mb_index])) {
+ } else if (IS_8X16(p->mb_type[mb_index])) {
int i;
for (i = 0; i < 2; i++) {
int sx = mb_x * 16 + 4 + 8 * i;
int sy = mb_y * 16 + 8;
int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
- int mx = pict->motion_val[direction][xy][0] >> shift;
- int my = pict->motion_val[direction][xy][1] >> shift;
+ int mx = p->motion_val[direction][xy][0] >> shift;
+ int my = p->motion_val[direction][xy][1] >> shift;
- if (IS_INTERLACED(pict->mb_type[mb_index]))
+ if (IS_INTERLACED(p->mb_type[mb_index]))
my *= 2;
draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
int sx = mb_x * 16 + 8;
int sy = mb_y * 16 + 8;
int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
- int mx = pict->motion_val[direction][xy][0] >> shift + sx;
- int my = pict->motion_val[direction][xy][1] >> shift + sy;
+ int mx = p->motion_val[direction][xy][0] >> shift + sx;
+ int my = p->motion_val[direction][xy][1] >> shift + sy;
draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
}
}
}
- if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
- uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
+ if ((s->avctx->debug & FF_DEBUG_VIS_QP) && p->motion_val) {
+ uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
0x0101010101010101ULL;
int y;
for (y = 0; y < block_height; y++) {
}
}
if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
- pict->motion_val) {
- int mb_type = pict->mb_type[mb_index];
+ p->motion_val) {
+ int mb_type = p->mb_type[mb_index];
uint64_t u,v;
int y;
#define COLOR(theta, r) \
int xy = (mb_x * 2 + (i & 1) +
(mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
// FIXME bidir
- int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
+ int32_t *mv = (int32_t *) &p->motion_val[0][xy];
if (mv[0] != mv[dm] ||
mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
for (y = 0; y < 8; y++)
}
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
- /* save DCT coefficients */
+ /* print DCT coefficients */
int i,j;
- int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
for(i=0; i<6; i++){
for(j=0; j<64; j++){
- *dct++ = block[i][s->dsp.idct_permutation[j]];
- av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
+ av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
}
av_log(s->avctx, AV_LOG_DEBUG, "\n");
}
}
- s->current_picture.f.qscale_table[mb_xy] = s->qscale;
+ s->current_picture.qscale_table[mb_xy] = s->qscale;
/* update DC predictors for P macroblocks */
if (!s->mb_intra) {
s->mb_skipped= 0;
assert(s->pict_type!=AV_PICTURE_TYPE_I);
*mbskip_ptr = 1;
- } else if(!s->current_picture.f.reference) {
+ } else if(!s->current_picture.reference) {
*mbskip_ptr = 1;
} else{
*mbskip_ptr = 0; /* not skipped */
if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
if (s->mv_dir & MV_DIR_FORWARD) {
- ff_thread_await_progress(&s->last_picture_ptr->f,
+ ff_thread_await_progress(&s->last_picture_ptr->tf,
ff_MPV_lowest_referenced_row(s, 0),
0);
}
if (s->mv_dir & MV_DIR_BACKWARD) {
- ff_thread_await_progress(&s->next_picture_ptr->f,
+ ff_thread_await_progress(&s->next_picture_ptr->tf,
ff_MPV_lowest_referenced_row(s, 1),
0);
}
if (!avctx->hwaccel &&
!(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
draw_edges &&
- cur->f.reference &&
+ cur->reference &&
!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
int *linesize = cur->f.linesize;
int sides = 0, edge_h;
if(s==NULL || s->picture==NULL)
return;
- for(i=0; i<s->picture_count; i++){
- if (s->picture[i].f.data[0] &&
- (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
- s->picture[i].f.type == FF_BUFFER_TYPE_USER))
- free_frame_buffer(s, &s->picture[i]);
- }
+ for (i = 0; i < MAX_PICTURE_COUNT; i++)
+ ff_mpeg_unref_picture(s, &s->picture[i]);
s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
s->mb_x= s->mb_y= 0;
void ff_MPV_report_decode_progress(MpegEncContext *s)
{
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
- ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
+ ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
}
#if CONFIG_ERROR_RESILIENCE
#include "parser.h"
#include "mpeg12data.h"
#include "rl.h"
+#include "thread.h"
#include "videodsp.h"
#include "libavutil/opt.h"
*/
typedef struct Picture{
struct AVFrame f;
+ ThreadFrame tf;
+
+ AVBufferRef *qscale_table_buf;
+ int8_t *qscale_table;
+
+ AVBufferRef *motion_val_buf[2];
+ int16_t (*motion_val[2])[2];
+
+ AVBufferRef *mb_type_buf;
+ uint32_t *mb_type;
+
+ AVBufferRef *mbskip_table_buf;
+ uint8_t *mbskip_table;
+
+ AVBufferRef *ref_index_buf[2];
+ int8_t *ref_index[2];
+
+ AVBufferRef *mb_var_buf;
+ uint16_t *mb_var; ///< Table for MB variances
+
+ AVBufferRef *mc_mb_var_buf;
+ uint16_t *mc_mb_var; ///< Table for motion compensated MB variances
+
+ AVBufferRef *mb_mean_buf;
+ uint8_t *mb_mean; ///< Table for MB luminance
+
+ AVBufferRef *hwaccel_priv_buf;
+ /**
+ * hardware accelerator private data
+ */
+ void *hwaccel_picture_private;
- int8_t *qscale_table_base;
- int16_t (*motion_val_base[2])[2];
- uint32_t *mb_type_base;
#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if there is just one type
#define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4)
#define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16)
int mb_var_sum; ///< sum of MB variance for current frame
int mc_mb_var_sum; ///< motion compensated MB variance for current frame
- uint16_t *mb_var; ///< Table for MB variances
- uint16_t *mc_mb_var; ///< Table for motion compensated MB variances
- uint8_t *mb_mean; ///< Table for MB luminance
+
int b_frame_score; /* */
- void *owner2; ///< pointer to the context that allocated this picture
int needs_realloc; ///< Picture needs to be reallocated (eg due to a frame size change)
- /**
- * hardware accelerator private data
- */
- void *hwaccel_picture_private;
+
+ int reference;
+ int shared;
} Picture;
/**
Picture *last_picture_ptr; ///< pointer to the previous picture.
Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred)
Picture *current_picture_ptr; ///< pointer to the current picture
- int picture_count; ///< number of allocated pictures (MAX_PICTURE_COUNT * avctx->thread_count)
- int picture_range_start, picture_range_end; ///< the part of picture that this context can allocate in
uint8_t *visualization_buffer[3]; ///< temporary buffer vor MV visualization
int last_dc[3]; ///< last DC values for MPEG1
int16_t *dc_val_base;
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \
((pic && pic >= old_ctx->picture && \
- pic < old_ctx->picture + old_ctx->picture_count) ? \
+ pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
&new_ctx->picture[pic - old_ctx->picture] : NULL)
/* mpegvideo_enc common options */
int v_edge_pos, int h_edge_pos);
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h);
void ff_mpeg_flush(AVCodecContext *avctx);
-void ff_print_debug_info(MpegEncContext *s, AVFrame *pict);
+void ff_print_debug_info(MpegEncContext *s, Picture *p);
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
void ff_release_unused_pictures(MpegEncContext *s, int remove_current);
int ff_find_unused_picture(MpegEncContext *s, int shared);
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
void ff_init_block_index(MpegEncContext *s);
-void ff_copy_picture(Picture *dst, Picture *src);
void ff_MPV_motion(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb,
int16_t block[6][64],
int motion_x, int motion_y);
+int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src);
+void ff_mpeg_unref_picture(MpegEncContext *s, Picture *picture);
+
#endif /* AVCODEC_MPEGVIDEO_H */
*/
void ff_init_qscale_tab(MpegEncContext *s)
{
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
int i;
for (i = 0; i < s->mb_num; i++) {
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
{
- AVFrame *pic = NULL;
+ Picture *pic = NULL;
int64_t pts;
- int i, display_picture_number = 0;
+ int i, display_picture_number = 0, ret;
const int encoding_delay = s->max_b_frames ? s->max_b_frames :
(s->low_delay ? 0 : 1);
int direct = 1;
}
if (pic_arg) {
- if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
+ if (!pic_arg->buf[0]);
direct = 0;
if (pic_arg->linesize[0] != s->linesize)
direct = 0;
if (i < 0)
return i;
- pic = &s->picture[i].f;
+ pic = &s->picture[i];
pic->reference = 3;
- for (i = 0; i < 4; i++) {
- pic->data[i] = pic_arg->data[i];
- pic->linesize[i] = pic_arg->linesize[i];
- }
- if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
+ if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
+ return ret;
+ if (ff_alloc_picture(s, pic, 1) < 0) {
return -1;
}
} else {
if (i < 0)
return i;
- pic = &s->picture[i].f;
+ pic = &s->picture[i];
pic->reference = 3;
- if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
+ if (ff_alloc_picture(s, pic, 0) < 0) {
return -1;
}
- if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
- pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
- pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
+ if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
+ pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
+ pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
// empty
} else {
int h_chroma_shift, v_chroma_shift;
int w = s->width >> h_shift;
int h = s->height >> v_shift;
uint8_t *src = pic_arg->data[i];
- uint8_t *dst = pic->data[i];
+ uint8_t *dst = pic->f.data[i];
if (!s->avctx->rc_buffer_size)
dst += INPLACE_OFFSET;
}
}
}
- copy_picture_attributes(s, pic, pic_arg);
- pic->display_picture_number = display_picture_number;
- pic->pts = pts; // we set this here to avoid modifiying pic_arg
+ copy_picture_attributes(s, &pic->f, pic_arg);
+ pic->f.display_picture_number = display_picture_number;
+ pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
}
/* shift buffer entries */
const int bw = plane ? 1 : 2;
for (y = 0; y < s->mb_height * bw; y++) {
for (x = 0; x < s->mb_width * bw; x++) {
- int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
+ int off = p->shared ? 0 : 16;
uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
pre_input = *pre_input_ptr;
- if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
+ if (!pre_input.shared && i) {
pre_input.f.data[0] += INPLACE_OFFSET;
pre_input.f.data[1] += INPLACE_OFFSET;
pre_input.f.data[2] += INPLACE_OFFSET;
static int select_input_picture(MpegEncContext *s)
{
- int i;
+ int i, ret;
for (i = 1; i < MAX_PICTURE_COUNT; i++)
s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
if (s->picture_in_gop_number < s->gop_size &&
skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
// FIXME check that te gop check above is +-1 correct
- if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
- for (i = 0; i < 4; i++)
- s->input_picture[0]->f.data[i] = NULL;
- s->input_picture[0]->f.type = 0;
- } else {
- assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
- s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
-
- s->avctx->release_buffer(s->avctx,
- &s->input_picture[0]->f);
- }
+ av_frame_unref(&s->input_picture[0]->f);
emms_c();
ff_vbv_update(s, 0);
}
no_output_pic:
if (s->reordered_input_picture[0]) {
- s->reordered_input_picture[0]->f.reference =
+ s->reordered_input_picture[0]->reference =
s->reordered_input_picture[0]->f.pict_type !=
AV_PICTURE_TYPE_B ? 3 : 0;
- ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
+ ff_mpeg_unref_picture(s, &s->new_picture);
+ if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
+ return ret;
- if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
- s->avctx->rc_buffer_size) {
+ if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
// input is a shared pix, so we can't modifiy it -> alloc a new
// one & ensure that the shared one is reuseable
return i;
pic = &s->picture[i];
- pic->f.reference = s->reordered_input_picture[0]->f.reference;
+ pic->reference = s->reordered_input_picture[0]->reference;
if (ff_alloc_picture(s, pic, 0) < 0) {
return -1;
}
- /* mark us unused / free shared pic */
- if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
- s->avctx->release_buffer(s->avctx,
- &s->reordered_input_picture[0]->f);
- for (i = 0; i < 4; i++)
- s->reordered_input_picture[0]->f.data[i] = NULL;
- s->reordered_input_picture[0]->f.type = 0;
-
copy_picture_attributes(s, &pic->f,
&s->reordered_input_picture[0]->f);
+ /* mark us unused / free shared pic */
+ av_frame_unref(&s->reordered_input_picture[0]->f);
+ s->reordered_input_picture[0]->shared = 0;
+
s->current_picture_ptr = pic;
} else {
// input is not a shared pix -> reuse buffer for current_pix
-
- assert(s->reordered_input_picture[0]->f.type ==
- FF_BUFFER_TYPE_USER ||
- s->reordered_input_picture[0]->f.type ==
- FF_BUFFER_TYPE_INTERNAL);
-
s->current_picture_ptr = s->reordered_input_picture[0];
for (i = 0; i < 4; i++) {
s->new_picture.f.data[i] += INPLACE_OFFSET;
}
}
- ff_copy_picture(&s->current_picture, s->current_picture_ptr);
+ ff_mpeg_unref_picture(s, &s->current_picture);
+ if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
+ s->current_picture_ptr)) < 0)
+ return ret;
s->picture_number = s->new_picture.f.display_picture_number;
} else {
- memset(&s->new_picture, 0, sizeof(Picture));
+ ff_mpeg_unref_picture(s, &s->new_picture);
}
return 0;
}
update_qscale(s);
if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
- s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
+ s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
s->dquant = s->qscale - last_qp;
if (s->out_format == FMT_H263) {
s->mv_type = MV_TYPE_8X8;
s->mb_intra= 0;
for(i=0; i<4; i++){
- s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
- s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
+ s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
+ s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
}
encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
&dmin, &next_block, 0, 0);
}
}
- s->current_picture.f.qscale_table[xy] = best_s.qscale;
+ s->current_picture.qscale_table[xy] = best_s.qscale;
copy_context_after_encode(s, &best_s, -1);
s->mv_type = MV_TYPE_8X8;
s->mb_intra= 0;
for(i=0; i<4; i++){
- s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
- s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
+ s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
+ s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
}
break;
case CANDIDATE_MB_TYPE_DIRECT:
if(!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B){
LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
- AVFrame *cur_frame = &s->current_picture.f;
+ Picture *cur_frame = &s->current_picture;
const int xy= s->mb_x + s->mb_y*s->mb_stride;
const int mot_stride= s->b8_stride;
const int mot_xy= mb_x*2 + mb_y*2*mot_stride;
// Do I need to export quant when I could not perform postprocessing?
// Anyway, it doesn't hurt.
- s->current_picture.f.qscale_table[mb_xy] = s->qscale;
+ s->current_picture.qscale_table[mb_xy] = s->qscale;
// start of XVMC-specific code
render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
{
int cbp, code, i;
uint8_t *coded_val;
- uint32_t * const mb_type_ptr = &s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride];
+ uint32_t * const mb_type_ptr = &s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride];
if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->use_skip_mb_code) {
#include <string.h>
#include "avcodec.h"
+#include "internal.h"
#include "msrledec.h"
typedef struct MsrleContext {
s->buf = buf;
s->size = buf_size;
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
ff_msrle_decode(avctx, (AVPicture*)&s->frame, avctx->bits_per_coded_sample, &s->gb);
}
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return buf_size;
MsrleContext *s = avctx->priv_data;
/* release the last frame */
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
*/
#include "avcodec.h"
+#include "internal.h"
#include "mss12.h"
typedef struct MSS1Context {
init_get_bits(&gb, buf, buf_size * 8);
arith_init(&acoder, &gb);
- ctx->pic.reference = 3;
- ctx->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
- FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &ctx->pic)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &ctx->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
memcpy(ctx->pic.data[1], c->pal, AVPALETTE_SIZE);
ctx->pic.palette_has_changed = pal_changed;
+ if ((ret = av_frame_ref(data, &ctx->pic)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = ctx->pic;
/* always report that the buffer was completely consumed */
return buf_size;
{
MSS1Context * const ctx = avctx->priv_data;
- if (ctx->pic.data[0])
- avctx->release_buffer(avctx, &ctx->pic);
+ av_frame_unref(&ctx->pic);
ff_mss12_decode_end(&ctx->ctx);
return 0;
typedef struct MSS2Context {
VC1Context v;
int split_position;
- AVFrame pic;
AVFrame last_pic;
MSS12Context c;
MSS2DSPContext dsp;
int buf_size = avpkt->size;
MSS2Context *ctx = avctx->priv_data;
MSS12Context *c = &ctx->c;
+ AVFrame *frame = data;
GetBitContext gb;
GetByteContext gB;
ArithCoder acoder;
return AVERROR_INVALIDDATA;
avctx->pix_fmt = is_555 ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_RGB24;
- if (ctx->pic.data[0] && ctx->pic.format != avctx->pix_fmt)
- avctx->release_buffer(avctx, &ctx->pic);
+ if (ctx->last_pic.format != avctx->pix_fmt)
+ av_frame_unref(&ctx->last_pic);
if (has_wmv9) {
bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
}
if (c->mvX < 0 || c->mvY < 0) {
- FFSWAP(AVFrame, ctx->pic, ctx->last_pic);
FFSWAP(uint8_t *, c->pal_pic, c->last_pal_pic);
- if (ctx->pic.data[0])
- avctx->release_buffer(avctx, &ctx->pic);
-
- ctx->pic.reference = 3;
- ctx->pic.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_READABLE |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
-
- if ((ret = ff_get_buffer(avctx, &ctx->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (ctx->last_pic.data[0]) {
- av_assert0(ctx->pic.linesize[0] == ctx->last_pic.linesize[0]);
+ av_assert0(frame->linesize[0] == ctx->last_pic.linesize[0]);
c->last_rgb_pic = ctx->last_pic.data[0] +
ctx->last_pic.linesize[0] * (avctx->height - 1);
} else {
return AVERROR_INVALIDDATA;
}
} else {
- if (ctx->last_pic.data[0])
- avctx->release_buffer(avctx, &ctx->last_pic);
-
- ctx->pic.reference = 3;
- ctx->pic.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_READABLE |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
-
- if ((ret = avctx->reget_buffer(avctx, &ctx->pic)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &ctx->last_pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
+ if ((ret = av_frame_ref(frame, &ctx->last_pic)) < 0)
+ return ret;
c->last_rgb_pic = NULL;
}
- c->rgb_pic = ctx->pic.data[0] +
- ctx->pic.linesize[0] * (avctx->height - 1);
- c->rgb_stride = -ctx->pic.linesize[0];
+ c->rgb_pic = frame->data[0] +
+ frame->linesize[0] * (avctx->height - 1);
+ c->rgb_stride = -frame->linesize[0];
- ctx->pic.key_frame = keyframe;
- ctx->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+ frame->key_frame = keyframe;
+ frame->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
if (is_555) {
bytestream2_init(&gB, buf, buf_size);
if (buf_size)
av_log(avctx, AV_LOG_WARNING, "buffer not fully consumed\n");
+ if (c->mvX < 0 || c->mvY < 0) {
+ av_frame_unref(&ctx->last_pic);
+ ret = av_frame_ref(&ctx->last_pic, frame);
+ if (ret < 0)
+ return ret;
+ }
+
*got_frame = 1;
- *(AVFrame *)data = ctx->pic;
return avpkt->size;
}
{
MSS2Context *const ctx = avctx->priv_data;
- if (ctx->pic.data[0])
- avctx->release_buffer(avctx, &ctx->pic);
- if (ctx->last_pic.data[0])
- avctx->release_buffer(avctx, &ctx->last_pic);
+ av_frame_unref(&ctx->last_pic);
ff_mss12_decode_end(&ctx->c);
av_freep(&ctx->c.pal_pic);
MSS12Context *c = &ctx->c;
int ret;
c->avctx = avctx;
- avctx->coded_frame = &ctx->pic;
if (ret = ff_mss12_decode_init(c, 1, &ctx->sc[0], &ctx->sc[1]))
return ret;
c->pal_stride = c->mask_stride;
#include "avcodec.h"
#include "bytestream.h"
#include "dsputil.h"
+#include "internal.h"
#include "mss34dsp.h"
#define HEADER_SIZE 27
return buf_size;
c->got_error = 0;
- c->pic.reference = 3;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
c->pic.key_frame = keyframe;
c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
if (!bytestream2_get_bytes_left(&gb)) {
+ if ((ret = av_frame_ref(data, &c->pic)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = c->pic;
return buf_size;
}
dst[2] += c->pic.linesize[2] * 8;
}
+ if ((ret = av_frame_ref(data, &c->pic)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = c->pic;
return buf_size;
}
}
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- avctx->coded_frame = &c->pic;
init_coders(c);
MSS3Context * const c = avctx->priv_data;
int i;
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ av_frame_unref(&c->pic);
for (i = 0; i < 3; i++)
av_freep(&c->dct_coder[i].prev_dc);
#include "bytestream.h"
#include "dsputil.h"
#include "get_bits.h"
+#include "internal.h"
#include "mss34dsp.h"
#include "unary.h"
return AVERROR_INVALIDDATA;
}
- c->pic.reference = 3;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
: AV_PICTURE_TYPE_P;
if (frame_type == SKIP_FRAME) {
*got_frame = 1;
- *(AVFrame*)data = c->pic;
+ if ((ret = av_frame_ref(data, &c->pic)) < 0)
+ return ret;
return buf_size;
}
dst[2] += c->pic.linesize[2] * 16;
}
+ if ((ret = av_frame_ref(data, &c->pic)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = c->pic;
return buf_size;
}
}
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
- avctx->coded_frame = &c->pic;
return 0;
}
MSS4Context * const c = avctx->priv_data;
int i;
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ av_frame_unref(&c->pic);
for (i = 0; i < 3; i++)
av_freep(&c->prev_dc[i]);
mss4_free_vlcs(c);
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
+#include "internal.h"
#define PALETTE_COUNT 256
#define CHECK_STREAM_PTR(n) \
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
Msvideo1Context *s = avctx->priv_data;
+ int ret;
s->buf = buf;
s->size = buf_size;
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if (avctx->reget_buffer(avctx, &s->frame)) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return -1;
+ return ret;
}
if (s->mode_8bit) {
else
msvideo1_decode_16bit(s);
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return buf_size;
{
Msvideo1Context *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
{
MXpegDecodeContext *s = avctx->priv_data;
- s->picture[0].reference = s->picture[1].reference = 3;
s->jpg.picture_ptr = &s->picture[0];
return ff_mjpeg_decode_init(avctx);
}
const uint8_t *unescaped_buf_ptr;
int unescaped_buf_size;
int start_code;
- AVFrame *picture = data;
int ret;
buf_ptr = buf;
break;
}
/* use stored SOF data to allocate current picture */
- if (jpg->picture_ptr->data[0])
- avctx->release_buffer(avctx, jpg->picture_ptr);
- if (ff_get_buffer(avctx, jpg->picture_ptr) < 0) {
+ av_frame_unref(jpg->picture_ptr);
+ if (ff_get_buffer(avctx, jpg->picture_ptr,
+ AV_GET_BUFFER_FLAG_REF) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(ENOMEM);
}
/* allocate dummy reference picture if needed */
if (!reference_ptr->data[0] &&
- ff_get_buffer(avctx, reference_ptr) < 0) {
+ ff_get_buffer(avctx, reference_ptr,
+ AV_GET_BUFFER_FLAG_REF) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(ENOMEM);
}
the_end:
if (jpg->got_picture) {
+ int ret = av_frame_ref(data, jpg->picture_ptr);
+ if (ret < 0)
+ return ret;
*got_frame = 1;
- *picture = *jpg->picture_ptr;
+
s->picture_index ^= 1;
jpg->picture_ptr = &s->picture[s->picture_index];
jpg->picture_ptr = NULL;
ff_mjpeg_decode_end(avctx);
- for (i = 0; i < 2; ++i) {
- if (s->picture[i].data[0])
- avctx->release_buffer(avctx, &s->picture[i]);
- }
+ for (i = 0; i < 2; ++i)
+ av_frame_unref(&s->picture[i]);
av_freep(&s->mxm_bitmask);
av_freep(&s->completion_bitmask);
/* get output buffer */
frame->nb_samples = NELLY_SAMPLES * blocks;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "libavutil/lzo.h"
#include "libavutil/imgutils.h"
#include "avcodec.h"
+#include "internal.h"
#include "rtjpeg.h"
typedef struct {
buf_size -= RTJPEG_HEADER_SIZE;
}
- if (keyframe && c->pic.data[0]) {
- avctx->release_buffer(avctx, &c->pic);
+ if (keyframe) {
+ av_frame_unref(&c->pic);
init_frame = 1;
}
- c->pic.reference = 3;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
- FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- result = avctx->reget_buffer(avctx, &c->pic);
+
+ result = ff_reget_buffer(avctx, &c->pic);
if (result < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return result;
return AVERROR_INVALIDDATA;
}
- *picture = c->pic;
+ if ((result = av_frame_ref(picture, &c->pic)) < 0)
+ return result;
+
*got_frame = 1;
return orig_size;
}
NuvContext *c = avctx->priv_data;
av_freep(&c->decomp_buf);
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ av_frame_unref(&c->pic);
return 0;
}
av_opt_set_defaults(s);
s->time_base = (AVRational){0,1};
- s->get_buffer = avcodec_default_get_buffer;
- s->release_buffer = avcodec_default_release_buffer;
+ s->get_buffer2 = avcodec_default_get_buffer2;
s->get_format = avcodec_default_get_format;
s->execute = avcodec_default_execute;
s->execute2 = avcodec_default_execute2;
s->pix_fmt = AV_PIX_FMT_NONE;
s->sample_fmt = AV_SAMPLE_FMT_NONE;
- s->reget_buffer = avcodec_default_reget_buffer;
s->reordered_opaque = AV_NOPTS_VALUE;
if(codec && codec->priv_data_size){
if(!s->priv_data){
{"s32p", "32-bit signed integer planar", 0, AV_OPT_TYPE_CONST, {.i64 = AV_SAMPLE_FMT_S32P }, INT_MIN, INT_MAX, A|D, "request_sample_fmt"},
{"fltp", "32-bit float planar", 0, AV_OPT_TYPE_CONST, {.i64 = AV_SAMPLE_FMT_FLTP }, INT_MIN, INT_MAX, A|D, "request_sample_fmt"},
{"dblp", "64-bit double planar", 0, AV_OPT_TYPE_CONST, {.i64 = AV_SAMPLE_FMT_DBLP }, INT_MIN, INT_MAX, A|D, "request_sample_fmt"},
+{"refcounted_frames", NULL, OFFSET(refcounted_frames), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, A|V|D },
{NULL},
};
/* get output buffer */
frame->nb_samples = samples;
- if ((retval = ff_get_buffer(avctx, frame)) < 0) {
+ if ((retval = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return retval;
}
/* get output buffer */
frame->nb_samples = n * samples_per_block / avctx->channels;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "get_bits.h"
#include "internal.h"
-typedef struct PCXContext {
- AVFrame picture;
-} PCXContext;
-
-static av_cold int pcx_init(AVCodecContext *avctx) {
- PCXContext *s = avctx->priv_data;
-
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame= &s->picture;
-
- return 0;
-}
-
/**
* @return advanced src pointer
*/
AVPacket *avpkt) {
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- PCXContext * const s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame * const p = &s->picture;
+ AVFrame * const p = data;
int compressed, xmin, ymin, xmax, ymax;
unsigned int w, h, bits_per_pixel, bytes_per_line, nplanes, stride, y, x,
bytes_per_scanline;
buf += 128;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
return ret;
if (w != avctx->width || h != avctx->height)
avcodec_set_dimensions(avctx, w, h);
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
pcx_palette(&palette, (uint32_t *) p->data[1], 16);
}
- *picture = s->picture;
*got_frame = 1;
ret = buf - bufstart;
return ret;
}
-static av_cold int pcx_end(AVCodecContext *avctx) {
- PCXContext *s = avctx->priv_data;
-
- if(s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
AVCodec ff_pcx_decoder = {
.name = "pcx",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PCX,
- .priv_data_size = sizeof(PCXContext),
- .init = pcx_init,
- .close = pcx_end,
.decode = pcx_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"),
#include "internal.h"
typedef struct PicContext {
- AVFrame frame;
int width, height;
int nb_planes;
GetByteContext g;
} PicContext;
-static void picmemset_8bpp(PicContext *s, int value, int run, int *x, int *y)
+static void picmemset_8bpp(PicContext *s, AVFrame *frame, int value, int run,
+ int *x, int *y)
{
while (run > 0) {
- uint8_t *d = s->frame.data[0] + *y * s->frame.linesize[0];
+ uint8_t *d = frame->data[0] + *y * frame->linesize[0];
if (*x + run >= s->width) {
int n = s->width - *x;
memset(d + *x, value, n);
}
}
-static void picmemset(PicContext *s, int value, int run,
+static void picmemset(PicContext *s, AVFrame *frame, int value, int run,
int *x, int *y, int *plane, int bits_per_plane)
{
uint8_t *d;
while (run > 0) {
int j;
for (j = 8-bits_per_plane; j >= 0; j -= bits_per_plane) {
- d = s->frame.data[0] + *y * s->frame.linesize[0];
+ d = frame->data[0] + *y * frame->linesize[0];
d[*x] |= (value >> j) & mask;
*x += 1;
if (*x == s->width) {
AVPacket *avpkt)
{
PicContext *s = avctx->priv_data;
+ AVFrame *frame = data;
uint32_t *palette;
int bits_per_plane, bpp, etype, esize, npal, pos_after_pal;
- int i, x, y, plane, tmp;
+ int i, x, y, plane, tmp, ret;
bytestream2_init(&s->g, avpkt->data, avpkt->size);
if (av_image_check_size(s->width, s->height, 0, avctx) < 0)
return -1;
avcodec_set_dimensions(avctx, s->width, s->height);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
}
- if (ff_get_buffer(avctx, &s->frame) < 0){
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
- memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]);
- s->frame.pict_type = AV_PICTURE_TYPE_I;
- s->frame.palette_has_changed = 1;
+ memset(frame->data[0], 0, s->height * frame->linesize[0]);
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->palette_has_changed = 1;
pos_after_pal = bytestream2_tell(&s->g) + esize;
- palette = (uint32_t*)s->frame.data[1];
+ palette = (uint32_t*)frame->data[1];
if (etype == 1 && esize > 1 && bytestream2_peek_byte(&s->g) < 6) {
int idx = bytestream2_get_byte(&s->g);
npal = 4;
break;
if (bits_per_plane == 8) {
- picmemset_8bpp(s, val, run, &x, &y);
+ picmemset_8bpp(s, frame, val, run, &x, &y);
if (y < 0)
break;
} else {
- picmemset(s, val, run, &x, &y, &plane, bits_per_plane);
+ picmemset(s, frame, val, run, &x, &y, &plane, bits_per_plane);
}
}
}
}
*got_frame = 1;
- *(AVFrame*)data = s->frame;
return avpkt->size;
}
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- PicContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
- return 0;
-}
-
AVCodec ff_pictor_decoder = {
.name = "pictor",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PICTOR,
.priv_data_size = sizeof(PicContext),
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Pictor/PC Paint"),
PNGDSPContext dsp;
GetByteContext gb;
- AVFrame picture1, picture2;
- AVFrame *current_picture, *last_picture;
+ AVFrame *prev;
int state;
int width, height;
PNGDecContext * const s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- AVFrame *picture = data;
+ AVFrame *p = data;
uint8_t *crow_buf_base = NULL;
- AVFrame *p;
uint32_t tag, length;
int ret;
- FFSWAP(AVFrame *, s->current_picture, s->last_picture);
- avctx->coded_frame = s->current_picture;
- p = s->current_picture;
-
/* check signature */
if (buf_size < 8 ||
memcmp(buf, ff_pngsig, 8) != 0 &&
} else {
goto fail;
}
- if (p->data[0])
- avctx->release_buffer(avctx, p);
- p->reference = 0;
- if (ff_get_buffer(avctx, p) < 0) {
+ if (ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
goto fail;
}
}
exit_loop:
/* handle p-frames only if a predecessor frame is available */
- if (s->last_picture->data[0] != NULL) {
+ if (s->prev->data[0]) {
if (!(avpkt->flags & AV_PKT_FLAG_KEY)) {
int i, j;
- uint8_t *pd = s->current_picture->data[0];
- uint8_t *pd_last = s->last_picture->data[0];
+ uint8_t *pd = p->data[0];
+ uint8_t *pd_last = s->prev->data[0];
for (j = 0; j < s->height; j++) {
for (i = 0; i < s->width * s->bpp; i++) {
}
}
- *picture = *s->current_picture;
+ av_frame_unref(s->prev);
+ if ((ret = av_frame_ref(s->prev, p)) < 0)
+ goto fail;
+
*got_frame = 1;
ret = bytestream2_tell(&s->gb);
{
PNGDecContext *s = avctx->priv_data;
- s->current_picture = &s->picture1;
- s->last_picture = &s->picture2;
- avcodec_get_frame_defaults(&s->picture1);
- avcodec_get_frame_defaults(&s->picture2);
+ s->prev = av_frame_alloc();
+ if (!s->prev)
+ return AVERROR(ENOMEM);
+
ff_pngdsp_init(&s->dsp);
return 0;
{
PNGDecContext *s = avctx->priv_data;
- if (s->picture1.data[0])
- avctx->release_buffer(avctx, &s->picture1);
- if (s->picture2.data[0])
- avctx->release_buffer(avctx, &s->picture2);
+ av_frame_free(&s->prev);
return 0;
}
return 0;
}
-av_cold int ff_pnm_end(AVCodecContext *avctx)
-{
- PNMContext *s = avctx->priv_data;
-
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
av_cold int ff_pnm_init(AVCodecContext *avctx)
{
PNMContext *s = avctx->priv_data;
} PNMContext;
int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s);
-int ff_pnm_end(AVCodecContext *avctx);
int ff_pnm_init(AVCodecContext *avctx);
#endif /* AVCODEC_PNM_H */
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
PNMContext * const s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame * const p = &s->picture;
+ AVFrame * const p = data;
int i, j, n, linesize, h, upgrade = 0;
unsigned char *ptr;
int components, sample_len, ret;
if ((ret = ff_pnm_decode_header(avctx, s)) < 0)
return ret;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
break;
}
- *picture = s->picture;
*got_frame = 1;
return s->bytestream - s->bytestream_start;
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PGM,
.priv_data_size = sizeof(PNMContext),
- .init = ff_pnm_init,
- .close = ff_pnm_end,
.decode = pnm_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PGMYUV,
.priv_data_size = sizeof(PNMContext),
- .init = ff_pnm_init,
- .close = ff_pnm_end,
.decode = pnm_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PPM,
.priv_data_size = sizeof(PNMContext),
- .init = ff_pnm_init,
- .close = ff_pnm_end,
.decode = pnm_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PBM,
.priv_data_size = sizeof(PNMContext),
- .init = ff_pnm_init,
- .close = ff_pnm_end,
.decode = pnm_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PAM,
.priv_data_size = sizeof(PNMContext),
- .init = ff_pnm_init,
- .close = ff_pnm_end,
.decode = pnm_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
typedef struct {
ProresDSPContext dsp;
- AVFrame picture;
+ AVFrame *frame;
ScanTable scantable;
int scantable_type; ///< -1 = uninitialized, 0 = progressive, 1/2 = interlaced
avctx->bits_per_raw_sample = PRORES_BITS_PER_SAMPLE;
ff_proresdsp_init(&ctx->dsp);
- avctx->coded_frame = &ctx->picture;
- avcodec_get_frame_defaults(&ctx->picture);
- ctx->picture.type = AV_PICTURE_TYPE_I;
- ctx->picture.key_frame = 1;
-
ctx->scantable_type = -1; // set scantable type to uninitialized
memset(ctx->qmat_luma, 4, 64);
memset(ctx->qmat_chroma, 4, 64);
}
if (ctx->frame_type) { /* if interlaced */
- ctx->picture.interlaced_frame = 1;
- ctx->picture.top_field_first = ctx->frame_type & 1;
+ ctx->frame->interlaced_frame = 1;
+ ctx->frame->top_field_first = ctx->frame_type & 1;
} else {
- ctx->picture.interlaced_frame = 0;
+ ctx->frame->interlaced_frame = 0;
}
avctx->color_primaries = buf[14];
ctx->num_x_mbs = (avctx->width + 15) >> 4;
ctx->num_y_mbs = (avctx->height +
- (1 << (4 + ctx->picture.interlaced_frame)) - 1) >>
- (4 + ctx->picture.interlaced_frame);
+ (1 << (4 + ctx->frame->interlaced_frame)) - 1) >>
+ (4 + ctx->frame->interlaced_frame);
remainder = ctx->num_x_mbs & ((1 << slice_width_factor) - 1);
num_x_slices = (ctx->num_x_mbs >> slice_width_factor) + (remainder & 1) +
int mbs_per_slice = td->slice_width;
const uint8_t *buf;
uint8_t *y_data, *u_data, *v_data;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = ctx->frame;
int i, sf, slice_width_factor;
int slice_data_size, hdr_size, y_data_size, u_data_size, v_data_size;
int y_linesize, u_linesize, v_linesize;
AVPacket *avpkt)
{
ProresContext *ctx = avctx->priv_data;
- AVFrame *picture = avctx->coded_frame;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int frame_hdr_size, pic_num, pic_data_size;
+ ctx->frame = data;
+ ctx->frame->pict_type = AV_PICTURE_TYPE_I;
+ ctx->frame->key_frame = 1;
+
/* check frame atom container */
if (buf_size < 28 || buf_size < AV_RB32(buf) ||
AV_RB32(buf + 4) != FRAME_ID) {
MOVE_DATA_PTR(frame_hdr_size);
- if (picture->data[0])
- avctx->release_buffer(avctx, picture);
-
- picture->reference = 0;
- if (ff_get_buffer(avctx, picture) < 0)
+ if (ff_get_buffer(avctx, ctx->frame, 0) < 0)
return -1;
- for (pic_num = 0; ctx->picture.interlaced_frame - pic_num + 1; pic_num++) {
+ for (pic_num = 0; ctx->frame->interlaced_frame - pic_num + 1; pic_num++) {
pic_data_size = decode_picture_header(ctx, buf, buf_size, avctx);
if (pic_data_size < 0)
return AVERROR_INVALIDDATA;
MOVE_DATA_PTR(pic_data_size);
}
- *got_frame = 1;
- *(AVFrame*) data = *avctx->coded_frame;
+ ctx->frame = NULL;
+ *got_frame = 1;
return avpkt->size;
}
{
ProresContext *ctx = avctx->priv_data;
- if (ctx->picture.data[0])
- avctx->release_buffer(avctx, &ctx->picture);
-
av_freep(&ctx->slice_data);
return 0;
#include "avcodec.h"
#include "internal.h"
#include "thread.h"
+#include "libavutil/avassert.h"
#include "libavutil/common.h"
#if HAVE_PTHREADS
int done;
} ThreadContext;
-/// Max number of frame buffers that can be allocated when using frame threads.
-#define MAX_BUFFERS (32+1)
-
/**
* Context used by codec threads and stored in their AVCodecContext thread_opaque.
*/
* Array of frames passed to ff_thread_release_buffer().
* Frames are released after all threads referencing them are finished.
*/
- AVFrame released_buffers[MAX_BUFFERS];
- int num_released_buffers;
-
- /**
- * Array of progress values used by ff_thread_get_buffer().
- */
- int progress[MAX_BUFFERS][2];
- uint8_t progress_used[MAX_BUFFERS];
+ AVFrame *released_buffers;
+ int num_released_buffers;
+ int released_buffers_allocated;
AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer()
+ int requested_flags; ///< flags passed to get_buffer() for requested_frame
} PerThreadContext;
/**
dst->flags = src->flags;
dst->draw_horiz_band= src->draw_horiz_band;
+ dst->get_buffer2 = src->get_buffer2;
+#if FF_API_GET_BUFFER
dst->get_buffer = src->get_buffer;
dst->release_buffer = src->release_buffer;
+#endif
dst->opaque = src->opaque;
dst->debug = src->debug;
#undef copy_fields
}
-static void free_progress(AVFrame *f)
-{
- PerThreadContext *p = f->owner->thread_opaque;
- int *progress = f->thread_opaque;
-
- p->progress_used[(progress - p->progress[0]) / 2] = 0;
-}
-
/// Releases the buffers that this decoding thread was the last user of.
static void release_delayed_buffers(PerThreadContext *p)
{
AVFrame *f;
pthread_mutex_lock(&fctx->buffer_mutex);
+
+ // fix extended data in case the caller screwed it up
+ av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO);
f = &p->released_buffers[--p->num_released_buffers];
- free_progress(f);
- f->thread_opaque = NULL;
+ f->extended_data = f->data;
+ av_frame_unref(f);
- f->owner->release_buffer(f->owner, f);
pthread_mutex_unlock(&fctx->buffer_mutex);
}
}
* and it calls back to the client here.
*/
- if (!p->avctx->thread_safe_callbacks &&
- p->avctx->get_buffer != avcodec_default_get_buffer) {
+ if (!p->avctx->thread_safe_callbacks && (
+#if FF_API_GET_BUFFER
+ p->avctx->get_buffer ||
+#endif
+ p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
while (p->state != STATE_SETUP_FINISHED && p->state != STATE_INPUT_READY) {
pthread_mutex_lock(&p->progress_mutex);
while (p->state == STATE_SETTING_UP)
pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
if (p->state == STATE_GET_BUFFER) {
- p->result = ff_get_buffer(p->avctx, p->requested_frame);
+ p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
p->state = STATE_SETTING_UP;
pthread_cond_signal(&p->progress_cond);
}
pthread_mutex_unlock(&p->progress_mutex);
}
- *picture = p->frame;
+ av_frame_move_ref(picture, &p->frame);
*got_picture_ptr = p->got_frame;
picture->pkt_dts = p->avpkt.dts;
return (p->result >= 0) ? avpkt->size : p->result;
}
-void ff_thread_report_progress(AVFrame *f, int n, int field)
+void ff_thread_report_progress(ThreadFrame *f, int n, int field)
{
PerThreadContext *p;
- int *progress = f->thread_opaque;
+ int *progress = f->progress ? (int*)f->progress->data : NULL;
if (!progress || progress[field] >= n) return;
pthread_mutex_unlock(&p->progress_mutex);
}
-void ff_thread_await_progress(AVFrame *f, int n, int field)
+void ff_thread_await_progress(ThreadFrame *f, int n, int field)
{
PerThreadContext *p;
- int *progress = f->thread_opaque;
+ int *progress = f->progress ? (int*)f->progress->data : NULL;
if (!progress || progress[field] >= n) return;
for (i = 0; i < thread_count; i++) {
PerThreadContext *p = &fctx->threads[i];
- avcodec_default_free_buffers(p->avctx);
-
pthread_mutex_destroy(&p->mutex);
pthread_mutex_destroy(&p->progress_mutex);
pthread_cond_destroy(&p->input_cond);
pthread_cond_destroy(&p->output_cond);
av_buffer_unref(&p->avpkt.buf);
av_freep(&p->buf);
+ av_freep(&p->released_buffers);
if (i) {
av_freep(&p->avctx->priv_data);
}
}
-static int *allocate_progress(PerThreadContext *p)
-{
- int i;
-
- for (i = 0; i < MAX_BUFFERS; i++)
- if (!p->progress_used[i]) break;
-
- if (i == MAX_BUFFERS) {
- av_log(p->avctx, AV_LOG_ERROR, "allocate_progress() overflow\n");
- return NULL;
- }
-
- p->progress_used[i] = 1;
-
- return p->progress[i];
-}
-
-int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
+int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
{
PerThreadContext *p = avctx->thread_opaque;
- int *progress, err;
+ int err;
f->owner = avctx;
- if (!(avctx->active_thread_type&FF_THREAD_FRAME)) {
- f->thread_opaque = NULL;
- return ff_get_buffer(avctx, f);
- }
+ if (!(avctx->active_thread_type & FF_THREAD_FRAME))
+ return ff_get_buffer(avctx, f->f, flags);
if (p->state != STATE_SETTING_UP &&
(avctx->codec->update_thread_context || !avctx->thread_safe_callbacks)) {
return -1;
}
- pthread_mutex_lock(&p->parent->buffer_mutex);
- f->thread_opaque = progress = allocate_progress(p);
+ if (avctx->internal->allocate_progress) {
+ int *progress;
+ f->progress = av_buffer_alloc(2 * sizeof(int));
+ if (!f->progress) {
+ return AVERROR(ENOMEM);
+ }
+ progress = (int*)f->progress->data;
- if (!progress) {
- pthread_mutex_unlock(&p->parent->buffer_mutex);
- return -1;
+ progress[0] = progress[1] = -1;
}
- progress[0] =
- progress[1] = -1;
-
- if (avctx->thread_safe_callbacks ||
- avctx->get_buffer == avcodec_default_get_buffer) {
- err = ff_get_buffer(avctx, f);
+ pthread_mutex_lock(&p->parent->buffer_mutex);
+ if (avctx->thread_safe_callbacks || (
+#if FF_API_GET_BUFFER
+ !avctx->get_buffer &&
+#endif
+ avctx->get_buffer2 == avcodec_default_get_buffer2)) {
+ err = ff_get_buffer(avctx, f->f, flags);
} else {
- p->requested_frame = f;
+ p->requested_frame = f->f;
+ p->requested_flags = flags;
p->state = STATE_GET_BUFFER;
pthread_mutex_lock(&p->progress_mutex);
pthread_cond_signal(&p->progress_cond);
ff_thread_finish_setup(avctx);
}
- if (err) {
- free_progress(f);
- f->thread_opaque = NULL;
- }
+ if (err)
+ av_buffer_unref(&f->progress);
+
pthread_mutex_unlock(&p->parent->buffer_mutex);
return err;
}
-void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
+void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
{
PerThreadContext *p = avctx->thread_opaque;
FrameThreadContext *fctx;
+ AVFrame *dst, *tmp;
+ int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) ||
+ avctx->thread_safe_callbacks ||
+ (
+#if FF_API_GET_BUFFER
+ !avctx->get_buffer &&
+#endif
+ avctx->get_buffer2 == avcodec_default_get_buffer2);
- if (!f->data[0])
+ if (!f->f->data[0])
return;
- if (!(avctx->active_thread_type&FF_THREAD_FRAME)) {
- avctx->release_buffer(avctx, f);
- return;
- }
+ if (avctx->debug & FF_DEBUG_BUFFERS)
+ av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
- if (p->num_released_buffers >= MAX_BUFFERS) {
- av_log(p->avctx, AV_LOG_ERROR, "too many thread_release_buffer calls!\n");
+ av_buffer_unref(&f->progress);
+ f->owner = NULL;
+
+ if (can_direct_free) {
+ av_frame_unref(f->f);
return;
}
- if(avctx->debug & FF_DEBUG_BUFFERS)
- av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
-
fctx = p->parent;
pthread_mutex_lock(&fctx->buffer_mutex);
- p->released_buffers[p->num_released_buffers++] = *f;
+
+ if (p->num_released_buffers + 1 >= INT_MAX / sizeof(*p->released_buffers))
+ goto fail;
+ tmp = av_fast_realloc(p->released_buffers, &p->released_buffers_allocated,
+ (p->num_released_buffers + 1) *
+ sizeof(*p->released_buffers));
+ if (!tmp)
+ goto fail;
+ p->released_buffers = tmp;
+
+ dst = &p->released_buffers[p->num_released_buffers];
+ av_frame_move_ref(dst, f->f);
+
+ p->num_released_buffers++;
+
+fail:
pthread_mutex_unlock(&fctx->buffer_mutex);
- memset(f->data, 0, sizeof(f->data));
}
/**
#include "avcodec.h"
#include "internal.h"
-typedef struct PTXContext {
- AVFrame picture;
-} PTXContext;
-
-static av_cold int ptx_init(AVCodecContext *avctx) {
- PTXContext *s = avctx->priv_data;
-
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame= &s->picture;
-
- return 0;
-}
-
static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt) {
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + avpkt->size;
- PTXContext * const s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame * const p = &s->picture;
+ AVFrame * const p = data;
unsigned int offset, w, h, y, stride, bytes_per_pixel;
int ret;
uint8_t *ptr;
buf += offset;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
return ret;
if (w != avctx->width || h != avctx->height)
avcodec_set_dimensions(avctx, w, h);
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
buf += w*bytes_per_pixel;
}
- *picture = s->picture;
*got_frame = 1;
if (y < h) {
return offset + w*h*bytes_per_pixel;
}
-static av_cold int ptx_end(AVCodecContext *avctx) {
- PTXContext *s = avctx->priv_data;
-
- if(s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
AVCodec ff_ptx_decoder = {
.name = "ptx",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PTX,
- .priv_data_size = sizeof(PTXContext),
- .init = ptx_init,
- .close = ptx_end,
.decode = ptx_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("V.Flash PTX image"),
/* get output buffer */
frame->nb_samples = 160;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = 16 * s->frame_size;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "avcodec.h"
#include "internal.h"
-typedef struct QdrawContext {
- AVCodecContext *avctx;
- AVFrame pic;
-} QdrawContext;
-
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + avpkt->size;
int buf_size = avpkt->size;
- QdrawContext * const a = avctx->priv_data;
- AVFrame * const p = &a->pic;
+ AVFrame * const p = data;
uint8_t* outdata;
int colors;
int i, ret;
uint32_t *pal;
int r, g, b;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
- outdata = a->pic.data[0];
+ outdata = p->data[0];
if (buf_end - buf < 0x68 + 4)
return AVERROR_INVALIDDATA;
code = *buf++;
if (code & 0x80 ) { /* run */
pix = *buf++;
- if ((out + (257 - code)) > (outdata + a->pic.linesize[0]))
+ if ((out + (257 - code)) > (outdata + p->linesize[0]))
break;
memset(out, pix, 257 - code);
out += 257 - code;
tsize += 257 - code;
left -= 2;
} else { /* copy */
- if ((out + code) > (outdata + a->pic.linesize[0]))
+ if ((out + code) > (outdata + p->linesize[0]))
break;
if (buf_end - buf < code + 1)
return AVERROR_INVALIDDATA;
}
}
buf = next;
- outdata += a->pic.linesize[0];
+ outdata += p->linesize[0];
}
*got_frame = 1;
- *(AVFrame*)data = a->pic;
return buf_size;
}
return 0;
}
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- QdrawContext * const a = avctx->priv_data;
- AVFrame *pic = &a->pic;
-
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
- return 0;
-}
-
AVCodec ff_qdraw_decoder = {
.name = "qdraw",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_QDRAW,
- .priv_data_size = sizeof(QdrawContext),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Apple QuickDraw"),
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
typedef struct QpegContext{
AVCodecContext *avctx;
}
bytestream2_init(&a->buffer, avpkt->data, avpkt->size);
- p->reference = 3;
- if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
}
memcpy(a->pic.data[1], a->pal, AVPALETTE_SIZE);
+ if ((ret = av_frame_ref(data, &a->pic)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = a->pic;
return avpkt->size;
}
QpegContext * const a = avctx->priv_data;
AVFrame * const p = &a->pic;
- if(p->data[0])
- avctx->release_buffer(avctx, p);
+ av_frame_unref(p);
av_free(a->refdata);
return 0;
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
typedef struct QtrleContext {
AVCodecContext *avctx;
int ret;
bytestream2_init(&s->g, avpkt->data, avpkt->size);
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log (s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
}
done:
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* always report that the buffer was completely consumed */
return avpkt->size;
{
QtrleContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
avctx->pix_fmt = AV_PIX_FMT_RGB48;
avctx->bits_per_raw_sample = 10;
- avctx->coded_frame = avcodec_alloc_frame();
-
return 0;
}
AVPacket *avpkt)
{
int h, w, ret;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
const uint32_t *src = (const uint32_t *)avpkt->data;
int aligned_width = FFALIGN(avctx->width, 64);
uint8_t *dst_line;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
if (avpkt->size < 4 * aligned_width * avctx->height) {
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA;
}
- pic->reference = 0;
- if ((ret = ff_get_buffer(avctx, pic)) < 0)
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
}
*got_frame = 1;
- *(AVFrame*)data = *avctx->coded_frame;
return avpkt->size;
}
-static av_cold int decode_close(AVCodecContext *avctx)
-{
- AVFrame *pic = avctx->coded_frame;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
- av_freep(&avctx->coded_frame);
-
- return 0;
-}
-
#if CONFIG_R210_DECODER
AVCodec ff_r210_decoder = {
.name = "r210",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_R210,
.init = decode_init,
- .close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed RGB 10-bit"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_R10K,
.init = decode_init,
- .close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("AJA Kona 10-bit RGB Codec"),
/* get output buffer */
frame->nb_samples = NBLOCKS * BLOCKSIZE;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
frame->nb_samples = ctx->max_frame_size;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Me fail get_buffer()? That's unpossible!\n");
return ret;
}
#include "avcodec.h"
#include "raw.h"
+#include "libavutil/buffer.h"
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/imgutils.h"
typedef struct RawVideoContext {
- uint32_t palette[AVPALETTE_COUNT];
- unsigned char *buffer; /* block of memory for holding one frame */
- int length; /* number of bytes in buffer */
+ AVBufferRef *palette;
+ int frame_size; /* size of the frame in bytes */
int flip;
- AVFrame pic; ///< AVCodecContext.coded_frame
+ int is_2_4_bpp; // 2 or 4 bpp raw in avi/mov
+ int is_yuv2;
} RawVideoContext;
static const PixelFormatTag pix_fmt_bps_avi[] = {
static av_cold int raw_init_decoder(AVCodecContext *avctx)
{
RawVideoContext *context = avctx->priv_data;
+ const AVPixFmtDescriptor *desc;
if (avctx->codec_tag == MKTAG('r', 'a', 'w', ' '))
avctx->pix_fmt = find_pix_fmt(pix_fmt_bps_mov,
avctx->pix_fmt = find_pix_fmt(pix_fmt_bps_avi,
avctx->bits_per_coded_sample);
- avpriv_set_systematic_pal2(context->palette, avctx->pix_fmt);
- context->length = avpicture_get_size(avctx->pix_fmt, avctx->width,
- avctx->height);
- if ((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) &&
- avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
- (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' '))) {
- context->buffer = av_malloc(context->length);
- if (!context->buffer)
- return -1;
+ desc = av_pix_fmt_desc_get(avctx->pix_fmt);
+ if (!desc) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (desc->flags & (PIX_FMT_PAL || PIX_FMT_PSEUDOPAL)) {
+ context->palette = av_buffer_alloc(AVPALETTE_SIZE);
+ if (!context->palette)
+ return AVERROR(ENOMEM);
+ if (desc->flags & PIX_FMT_PSEUDOPAL)
+ avpriv_set_systematic_pal2((uint32_t*)context->palette->data, avctx->pix_fmt);
+ else
+ memset(context->palette->data, 0, AVPALETTE_SIZE);
}
- context->pic.pict_type = AV_PICTURE_TYPE_I;
- context->pic.key_frame = 1;
- avctx->coded_frame = &context->pic;
+ context->frame_size = avpicture_get_size(avctx->pix_fmt, avctx->width,
+ avctx->height);
+ if ((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) &&
+ avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
+ (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ')))
+ context->is_2_4_bpp = 1;
if ((avctx->extradata_size >= 9 &&
!memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
avctx->codec_tag == MKTAG('W','R','A','W'))
context->flip = 1;
+ if (avctx->codec_tag == AV_RL32("yuv2") &&
+ avctx->pix_fmt == AV_PIX_FMT_YUYV422)
+ context->is_yuv2 = 1;
+
return 0;
}
RawVideoContext *context = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
+ int need_copy = !avpkt->buf || context->is_2_4_bpp || context->is_yuv2;
int res;
AVFrame *frame = data;
AVPicture *picture = data;
- frame->pict_type = avctx->coded_frame->pict_type;
- frame->interlaced_frame = avctx->coded_frame->interlaced_frame;
- frame->top_field_first = avctx->coded_frame->top_field_first;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
frame->reordered_opaque = avctx->reordered_opaque;
frame->pkt_pts = avctx->pkt->pts;
- if (buf_size < context->length - (avctx->pix_fmt == AV_PIX_FMT_PAL8 ?
- AVPALETTE_SIZE : 0))
+ if (buf_size < context->frame_size - (avctx->pix_fmt == AV_PIX_FMT_PAL8 ?
+ AVPALETTE_SIZE : 0))
return -1;
+ if (need_copy)
+ frame->buf[0] = av_buffer_alloc(context->frame_size);
+ else
+ frame->buf[0] = av_buffer_ref(avpkt->buf);
+ if (!frame->buf[0])
+ return AVERROR(ENOMEM);
+
//2bpp and 4bpp raw in avi and mov (yes this is ugly ...)
- if (context->buffer) {
+ if (context->is_2_4_bpp) {
int i;
- uint8_t *dst = context->buffer;
- buf_size = context->length - AVPALETTE_SIZE;
+ uint8_t *dst = frame->buf[0]->data;
+ buf_size = context->frame_size - AVPALETTE_SIZE;
if (avctx->bits_per_coded_sample == 4) {
for (i = 0; 2 * i + 1 < buf_size; i++) {
dst[2 * i + 0] = buf[i] >> 4;
}
}
buf = dst;
+ } else if (need_copy) {
+ memcpy(frame->buf[0]->data, buf, FFMIN(buf_size, context->frame_size));
+ buf = frame->buf[0]->data;
}
if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
- buf += buf_size - context->length;
+ buf += buf_size - context->frame_size;
if ((res = avpicture_fill(picture, buf, avctx->pix_fmt,
avctx->width, avctx->height)) < 0)
return res;
- if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->length) ||
- (desc->flags & PIX_FMT_PSEUDOPAL)) {
- frame->data[1] = context->palette;
- }
+
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE,
NULL);
if (pal) {
- memcpy(frame->data[1], pal, AVPALETTE_SIZE);
+ av_buffer_unref(&context->palette);
+ context->palette = av_buffer_alloc(AVPALETTE_SIZE);
+ if (!context->palette)
+ return AVERROR(ENOMEM);
+ memcpy(context->palette->data, pal, AVPALETTE_SIZE);
frame->palette_has_changed = 1;
}
}
+
+ if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) ||
+ (desc->flags & PIX_FMT_PSEUDOPAL)) {
+ frame->buf[1] = av_buffer_ref(context->palette);
+ if (!frame->buf[1])
+ return AVERROR(ENOMEM);
+ frame->data[1] = frame->buf[1]->data;
+ }
if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
{
RawVideoContext *context = avctx->priv_data;
- av_freep(&context->buffer);
+ av_buffer_unref(&context->palette);
return 0;
}
typedef struct Rl2Context {
AVCodecContext *avctx;
- AVFrame frame;
uint16_t video_base; ///< initial drawing offset
uint32_t clr_count; ///< number of used colors (currently unused)
void *data, int *got_frame,
AVPacket *avpkt)
{
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int ret, buf_size = avpkt->size;
Rl2Context *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- /** get buffer */
- s->frame.reference = 0;
- if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/** run length decode */
- rl2_rle_decode(s, buf, buf_size, s->frame.data[0], s->frame.linesize[0],
+ rl2_rle_decode(s, buf, buf_size, frame->data[0], frame->linesize[0],
s->video_base);
/** make the palette available on the way out */
- memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/** report that the buffer was completely consumed */
return buf_size;
{
Rl2Context *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
av_free(s->back_frame);
return 0;
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
#include "roqvideo.h"
static void roqvideo_decode_frame(RoqContext *ri)
s->width = avctx->width;
s->height = avctx->height;
- s->last_frame = &s->frames[0];
- s->current_frame = &s->frames[1];
+
+ s->last_frame = av_frame_alloc();
+ s->current_frame = av_frame_alloc();
+ if (!s->current_frame || !s->last_frame) {
+ av_frame_free(&s->current_frame);
+ av_frame_free(&s->last_frame);
+ return AVERROR(ENOMEM);
+ }
+
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
return 0;
int buf_size = avpkt->size;
RoqContext *s = avctx->priv_data;
int copy= !s->current_frame->data[0];
+ int ret;
- s->current_frame->reference = 3;
- if (avctx->reget_buffer(avctx, s->current_frame)) {
+ if ((ret = ff_reget_buffer(avctx, s->current_frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, " RoQ: get_buffer() failed\n");
- return -1;
+ return ret;
}
if(copy)
bytestream2_init(&s->gb, buf, buf_size);
roqvideo_decode_frame(s);
+ if ((ret = av_frame_ref(data, s->current_frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = *s->current_frame;
/* shuffle frames */
FFSWAP(AVFrame *, s->current_frame, s->last_frame);
{
RoqContext *s = avctx->priv_data;
- /* release the last frame */
- if (s->last_frame->data[0])
- avctx->release_buffer(avctx, s->last_frame);
- if (s->current_frame->data[0])
- avctx->release_buffer(avctx, s->current_frame);
+ av_frame_free(&s->current_frame);
+ av_frame_free(&s->last_frame);
return 0;
}
enc->framesSinceKeyframe++;
}
+static int roq_encode_end(AVCodecContext *avctx)
+{
+ RoqContext *enc = avctx->priv_data;
+
+ av_frame_free(&enc->current_frame);
+ av_frame_free(&enc->last_frame);
+
+ av_free(enc->tmpData);
+ av_free(enc->this_motion4);
+ av_free(enc->last_motion4);
+ av_free(enc->this_motion8);
+ av_free(enc->last_motion8);
+
+ return 0;
+}
+
static int roq_encode_init(AVCodecContext *avctx)
{
RoqContext *enc = avctx->priv_data;
enc->framesSinceKeyframe = 0;
enc->first_frame = 1;
- enc->last_frame = &enc->frames[0];
- enc->current_frame = &enc->frames[1];
+ enc->last_frame = av_frame_alloc();
+ enc->current_frame = av_frame_alloc();
+ if (!enc->last_frame || !enc->current_frame) {
+ roq_encode_end(avctx);
+ return AVERROR(ENOMEM);
+ }
enc->tmpData = av_malloc(sizeof(RoqTempdata));
if (enc->first_frame) {
/* Alloc memory for the reconstruction data (we must know the stride
for that) */
- if (ff_get_buffer(avctx, enc->current_frame) ||
- ff_get_buffer(avctx, enc->last_frame)) {
+ if (ff_get_buffer(avctx, enc->current_frame, 0) ||
+ ff_get_buffer(avctx, enc->last_frame, 0)) {
av_log(avctx, AV_LOG_ERROR, " RoQ: get_buffer() failed\n");
return -1;
}
return 0;
}
-static int roq_encode_end(AVCodecContext *avctx)
-{
- RoqContext *enc = avctx->priv_data;
-
- avctx->release_buffer(avctx, enc->last_frame);
- avctx->release_buffer(avctx, enc->current_frame);
-
- av_free(enc->tmpData);
- av_free(enc->this_motion4);
- av_free(enc->last_motion4);
- av_free(enc->this_motion8);
- av_free(enc->last_motion8);
-
- return 0;
-}
-
AVCodec ff_roq_encoder = {
.name = "roqvideo",
.type = AVMEDIA_TYPE_VIDEO,
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
+#include "internal.h"
typedef struct RpzaContext {
s->buf = buf;
s->size = buf_size;
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
rpza_decode_stream(s);
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* always report that the buffer was completely consumed */
return buf_size;
{
RpzaContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MpegEncContext *s = avctx->priv_data;
- int i;
+ int i, ret;
AVFrame *pict = data;
int slice_count;
const uint8_t *slices_hdr = NULL;
ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = s->current_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->current_picture_ptr);
} else if (s->last_picture_ptr != NULL) {
- *pict = s->last_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->last_picture_ptr);
}
if(s->last_picture_ptr || s->low_delay){
*got_frame = 1;
- ff_print_debug_info(s, pict);
}
s->current_picture_ptr= NULL; // so we can detect if frame_end was not called (find some nicer solution...)
}
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
+ int mbtype = s->current_picture_ptr->mb_type[mb_pos];
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
r->deblock_coefs[mb_pos] = 0xFFFF;
if(IS_INTRA(mbtype))
*/
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
+ cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
if(mb_x)
- left_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - 1]];
+ left_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]];
for(j = 0; j < 16; j += 4){
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
for(i = !mb_x; i < 4; i++, Y += 4){
}
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
+ cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
if(row)
- top_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - s->mb_stride]];
+ top_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]];
for(j = 4*!row; j < 16; j += 4){
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
for(i = 0; i < 4; i++, Y += 4){
r->is16 = get_bits1(gb);
if(r->is16){
- s->current_picture_ptr->f.mb_type[mb_pos] = MB_TYPE_INTRA16x16;
+ s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
r->block_type = RV34_MB_TYPE_INTRA16x16;
t = get_bits(gb, 2);
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
if(!get_bits1(gb))
av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
}
- s->current_picture_ptr->f.mb_type[mb_pos] = MB_TYPE_INTRA;
+ s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
r->block_type = RV34_MB_TYPE_INTRA;
if(r->decode_intra_types(r, gb, intra_types) < 0)
return -1;
r->block_type = r->decode_mb_info(r);
if(r->block_type == -1)
return -1;
- s->current_picture_ptr->f.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
+ s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
r->mb_type[mb_pos] = r->block_type;
if(r->block_type == RV34_MB_SKIP){
if(s->pict_type == AV_PICTURE_TYPE_P)
if(s->pict_type == AV_PICTURE_TYPE_B)
r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
}
- r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->f.mb_type[mb_pos]);
+ r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
rv34_decode_mv(r, r->block_type);
if(r->block_type == RV34_MB_SKIP){
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
r->chroma_vlc = 1;
r->luma_vlc = 0;
- if(IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos])){
+ if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
if(r->is16){
t = get_bits(gb, 2);
fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
c_off = -1;
if(avail[-1]){
- A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][0];
- A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-1][1];
+ A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
+ A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
}
if(avail[-4]){
- B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][0];
- B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride][1];
+ B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
+ B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
}else{
B[0] = A[0];
B[1] = A[1];
}
if(!avail[c_off-4]){
if(avail[-4] && (avail[-1] || r->rv30)){
- C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][0];
- C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride-1][1];
+ C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
+ C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
}else{
C[0] = A[0];
C[1] = A[1];
}
}else{
- C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][0];
- C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos-s->b8_stride+c_off][1];
+ C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
+ C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
}
mx = mid_pred(A[0], B[0], C[0]);
my = mid_pred(A[1], B[1], C[1]);
my += r->dmv[dmv_no][1];
for(j = 0; j < part_sizes_h[block_type]; j++){
for(i = 0; i < part_sizes_w[block_type]; i++){
- s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
- s->current_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
+ s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
+ s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
}
}
}
int i, j;
Picture *cur_pic = s->current_picture_ptr;
const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
- int type = cur_pic->f.mb_type[mb_pos];
+ int type = cur_pic->mb_type[mb_pos];
if((r->avail_cache[6-1] & type) & mask){
- A[0] = cur_pic->f.motion_val[dir][mv_pos - 1][0];
- A[1] = cur_pic->f.motion_val[dir][mv_pos - 1][1];
+ A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
+ A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
has_A = 1;
}
if((r->avail_cache[6-4] & type) & mask){
- B[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride][0];
- B[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride][1];
+ B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
+ B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
has_B = 1;
}
if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
- C[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride + 2][0];
- C[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride + 2][1];
+ C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
+ C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
has_C = 1;
}else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
- C[0] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride - 1][0];
- C[1] = cur_pic->f.motion_val[dir][mv_pos - s->b8_stride - 1][1];
+ C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
+ C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
has_C = 1;
}
for(j = 0; j < 2; j++){
for(i = 0; i < 2; i++){
- cur_pic->f.motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
- cur_pic->f.motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
+ cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
+ cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
}
}
if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
- ZERO8x2(cur_pic->f.motion_val[!dir][mv_pos], s->b8_stride);
+ ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
}
}
int* avail = r->avail_cache + avail_indexes[0];
if(avail[-1]){
- A[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][0];
- A[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - 1][1];
+ A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
+ A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
}
if(avail[-4]){
- B[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][0];
- B[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride][1];
+ B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
+ B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
}else{
B[0] = A[0];
B[1] = A[1];
}
if(!avail[-4 + 2]){
if(avail[-4] && (avail[-1])){
- C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][0];
- C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride - 1][1];
+ C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
+ C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
}else{
C[0] = A[0];
C[1] = A[1];
}
}else{
- C[0] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][0];
- C[1] = s->current_picture_ptr->f.motion_val[0][mv_pos - s->b8_stride + 2][1];
+ C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
+ C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
}
mx = mid_pred(A[0], B[0], C[0]);
my = mid_pred(A[1], B[1], C[1]);
for(j = 0; j < 2; j++){
for(i = 0; i < 2; i++){
for(k = 0; k < 2; k++){
- s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
- s->current_picture_ptr->f.motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
+ s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
+ s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
}
}
}
if(thirdpel){
int chroma_mx, chroma_my;
- mx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
- my = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
- lx = (s->current_picture_ptr->f.motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
- ly = (s->current_picture_ptr->f.motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
- chroma_mx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] / 2;
- chroma_my = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] / 2;
+ mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
+ my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
+ lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
+ ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
+ chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
+ chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
}else{
int cx, cy;
- mx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] >> 2;
- my = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] >> 2;
- lx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] & 3;
- ly = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] & 3;
- cx = s->current_picture_ptr->f.motion_val[dir][mv_pos][0] / 2;
- cy = s->current_picture_ptr->f.motion_val[dir][mv_pos][1] / 2;
+ mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
+ my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
+ lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
+ ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
+ cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
+ cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
umx = cx >> 2;
umy = cy >> 2;
uvmx = (cx & 3) << 1;
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
/* wait for the referenced mb row to be finished */
int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
- AVFrame *f = dir ? &s->next_picture_ptr->f : &s->last_picture_ptr->f;
+ ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
ff_thread_await_progress(f, mb_row, 0);
}
switch(block_type){
case RV34_MB_TYPE_INTRA:
case RV34_MB_TYPE_INTRA16x16:
- ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
return 0;
case RV34_MB_SKIP:
if(s->pict_type == AV_PICTURE_TYPE_P){
- ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
break;
}
//surprisingly, it uses motion scheme from next reference frame
/* wait for the current mb row to be finished */
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
- ff_thread_await_progress(&s->next_picture_ptr->f, FFMAX(0, s->mb_y-1), 0);
+ ff_thread_await_progress(&s->next_picture_ptr->tf, FFMAX(0, s->mb_y-1), 0);
- next_bt = s->next_picture_ptr->f.mb_type[s->mb_x + s->mb_y * s->mb_stride];
+ next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
- ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
- ZERO8x2(s->current_picture_ptr->f.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
}else
for(j = 0; j < 2; j++)
for(i = 0; i < 2; i++)
for(k = 0; k < 2; k++)
for(l = 0; l < 2; l++)
- s->current_picture_ptr->f.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->f.motion_val[0][mv_pos + i + j*s->b8_stride][k]);
+ s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
rv34_mc_2mv(r, block_type);
else
rv34_mc_2mv_skip(r);
- ZERO8x2(s->current_picture_ptr->f.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
+ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
break;
case RV34_MB_P_16x16:
case RV34_MB_P_MIX16x16:
MpegEncContext *s = &r->s;
int hmvmask = 0, vmvmask = 0, i, j;
int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
- int16_t (*motion_val)[2] = &s->current_picture_ptr->f.motion_val[0][midx];
+ int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
for(j = 0; j < 16; j += 8){
for(i = 0; i < 2; i++){
if(is_mv_diff_gt_3(motion_val + i, 1))
dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
if(s->mb_x && dist)
r->avail_cache[5] =
- r->avail_cache[9] = s->current_picture_ptr->f.mb_type[mb_pos - 1];
+ r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
if(dist >= s->mb_width)
r->avail_cache[2] =
- r->avail_cache[3] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride];
+ r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
- r->avail_cache[4] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride + 1];
+ r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
if(s->mb_x && dist > s->mb_width)
- r->avail_cache[1] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride - 1];
+ r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
s->qscale = r->si.quant;
cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
r->cbp_luma [mb_pos] = cbp;
r->cbp_chroma[mb_pos] = cbp >> 16;
r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
- s->current_picture_ptr->f.qscale_table[mb_pos] = s->qscale;
+ s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
if(cbp == -1)
return -1;
- if (IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos])){
+ if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
else rv34_output_intra(r, intra_types, cbp);
return 0;
dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
if(s->mb_x && dist)
r->avail_cache[5] =
- r->avail_cache[9] = s->current_picture_ptr->f.mb_type[mb_pos - 1];
+ r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
if(dist >= s->mb_width)
r->avail_cache[2] =
- r->avail_cache[3] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride];
+ r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
- r->avail_cache[4] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride + 1];
+ r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
if(s->mb_x && dist > s->mb_width)
- r->avail_cache[1] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride - 1];
+ r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
s->qscale = r->si.quant;
cbp = rv34_decode_intra_mb_header(r, intra_types);
r->cbp_luma [mb_pos] = cbp;
r->cbp_chroma[mb_pos] = cbp >> 16;
r->deblock_coefs[mb_pos] = 0xFFFF;
- s->current_picture_ptr->f.qscale_table[mb_pos] = s->qscale;
+ s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
if(cbp == -1)
return -1;
r->loop_filter(r, s->mb_y - 2);
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
- ff_thread_report_progress(&s->current_picture_ptr->f,
+ ff_thread_report_progress(&s->current_picture_ptr->tf,
s->mb_y - 2, 0);
}
if(!intra_vlcs[0].cbppattern[0].bits)
rv34_init_tables();
+ avctx->internal->allocate_progress = 1;
+
return 0;
}
if ((err = rv34_decoder_alloc(r)) < 0)
return err;
}
+
return 0;
}
{
RV34DecContext *r = avctx->priv_data;
MpegEncContext *s = &r->s;
- int got_picture = 0;
+ int got_picture = 0, ret;
ff_er_frame_end(&s->er);
ff_MPV_frame_end(s);
s->mb_num_left = 0;
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
- ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
+ ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = s->current_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->current_picture_ptr);
got_picture = 1;
} else if (s->last_picture_ptr != NULL) {
- *pict = s->last_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ return ret;
+ ff_print_debug_info(s, s->last_picture_ptr);
got_picture = 1;
}
- if (got_picture)
- ff_print_debug_info(s, pict);
return got_picture;
}
MpegEncContext *s = &r->s;
AVFrame *pict = data;
SliceInfo si;
- int i;
+ int i, ret;
int slice_count;
const uint8_t *slices_hdr = NULL;
int last = 0;
if (buf_size == 0) {
/* special case for last picture */
if (s->low_delay==0 && s->next_picture_ptr) {
- *pict = s->next_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
+ return ret;
s->next_picture_ptr = NULL;
*got_picture_ptr = 1;
if(r->loop_filter)
r->loop_filter(r, s->mb_height - 1);
- *got_picture_ptr = finish_frame(avctx, pict);
+ ret = finish_frame(avctx, pict);
+ if (ret < 0)
+ return ret;
+ *got_picture_ptr = ret;
} else if (HAVE_THREADS &&
(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
ff_er_frame_end(&s->er);
ff_MPV_frame_end(s);
s->mb_num_left = 0;
- ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
+ ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
return AVERROR_INVALIDDATA;
}
}
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
+ int mbtype = s->current_picture_ptr->mb_type[mb_pos];
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
if(IS_INTRA(mbtype))
unsigned y_to_deblock;
int c_to_deblock[2];
- q = s->current_picture_ptr->f.qscale_table[mb_pos];
+ q = s->current_picture_ptr->qscale_table[mb_pos];
alpha = rv40_alpha_tab[q];
beta = rv40_beta_tab [q];
betaY = betaC = beta * 3;
if(avail[i]){
int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
mvmasks[i] = r->deblock_coefs[pos];
- mbtype [i] = s->current_picture_ptr->f.mb_type[pos];
+ mbtype [i] = s->current_picture_ptr->mb_type[pos];
cbp [i] = r->cbp_luma[pos];
uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
/* get output buffer */
block_size = (avctx->bits_per_coded_sample + 4) / 4;
frame->nb_samples = 2 * (buf_size / block_size) / avctx->channels;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "sgi.h"
typedef struct SgiState {
- AVFrame picture;
unsigned int width;
unsigned int height;
unsigned int depth;
AVPacket *avpkt)
{
SgiState *s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame *p = &s->picture;
+ AVFrame *p = data;
unsigned int dimension, rle;
int ret = 0;
uint8_t *out_buf, *out_end;
return -1;
avcodec_set_dimensions(avctx, s->width, s->height);
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if (ff_get_buffer(avctx, p) < 0) {
+ if (ff_get_buffer(avctx, p, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n");
return -1;
}
}
if (ret == 0) {
- *picture = s->picture;
*got_frame = 1;
return avpkt->size;
} else {
}
}
-static av_cold int sgi_init(AVCodecContext *avctx){
- SgiState *s = avctx->priv_data;
-
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame = &s->picture;
-
- return 0;
-}
-
-static av_cold int sgi_end(AVCodecContext *avctx)
-{
- SgiState * const s = avctx->priv_data;
-
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
AVCodec ff_sgi_decoder = {
.name = "sgi",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_SGI,
.priv_data_size = sizeof(SgiState),
- .init = sgi_init,
- .close = sgi_end,
.decode = decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("SGI image"),
.capabilities = CODEC_CAP_DR1,
if (s->cur_chan == s->channels) {
/* get output buffer */
frame->nb_samples = s->blocksize;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = mode_par->frames_per_packet * subframe_size *
mode_par->subframe_count;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
GetByteContext gb2;
GetBitContext gb;
int blocks, blk, bw, bh;
- int i;
+ int i, ret;
int stride;
int flags;
if (avpkt->size <= 769)
return 0;
- smk->pic.reference = 1;
- smk->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if(avctx->reget_buffer(avctx, &smk->pic) < 0){
+ if ((ret = ff_reget_buffer(avctx, &smk->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return -1;
+ return ret;
}
/* make the palette available on the way out */
}
+ if ((ret = av_frame_ref(data, &smk->pic)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = smk->pic;
/* always report that the buffer was completely consumed */
return avpkt->size;
av_freep(&smk->full_tbl);
av_freep(&smk->type_tbl);
- if (smk->pic.data[0])
- avctx->release_buffer(avctx, &smk->pic);
+ av_frame_unref(&smk->pic);
return 0;
}
/* get output buffer */
frame->nb_samples = unp_size / (avctx->channels * (bits + 1));
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
#define CPAIR 2
#define CQUAD 4
int buf_size = avpkt->size;
SmcContext *s = avctx->priv_data;
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
+ int ret;
bytestream2_init(&s->gb, buf, buf_size);
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE;
- if (avctx->reget_buffer(avctx, &s->frame)) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return -1;
+ return ret;
}
if (pal) {
smc_decode_stream(s);
*got_frame = 1;
- *(AVFrame*)data = s->frame;
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
/* always report that the buffer was completely consumed */
return buf_size;
{
SmcContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->frame);
return 0;
}
#include "internal.h"
#include "sunrast.h"
-typedef struct SUNRASTContext {
- AVFrame picture;
-} SUNRASTContext;
-
-static av_cold int sunrast_init(AVCodecContext *avctx) {
- SUNRASTContext *s = avctx->priv_data;
-
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame = &s->picture;
-
- return 0;
-}
-
static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + avpkt->size;
- SUNRASTContext * const s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame * const p = &s->picture;
+ AVFrame * const p = data;
unsigned int w, h, depth, type, maptype, maplength, stride, x, y, len, alen;
uint8_t *ptr;
const uint8_t *bufstart = buf;
return AVERROR_INVALIDDATA;
}
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
if (w != avctx->width || h != avctx->height)
avcodec_set_dimensions(avctx, w, h);
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
}
- *picture = s->picture;
*got_frame = 1;
return buf - bufstart;
}
-static av_cold int sunrast_end(AVCodecContext *avctx) {
- SUNRASTContext *s = avctx->priv_data;
-
- if(s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
AVCodec ff_sunrast_decoder = {
.name = "sunrast",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_SUNRAST,
- .priv_data_size = sizeof(SUNRASTContext),
- .init = sunrast_init,
- .close = sunrast_end,
.decode = sunrast_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Sun Rasterfile image"),
typedef struct SVQ1Context {
DSPContext dsp;
GetBitContext gb;
- AVFrame *cur, *prev;
+ AVFrame *prev;
int width;
int height;
int frame_code;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
SVQ1Context *s = avctx->priv_data;
- AVFrame *cur = s->cur;
+ AVFrame *cur = data;
uint8_t *current;
int result, i, x, y, width, height;
svq1_pmv *pmv;
- if (cur->data[0])
- avctx->release_buffer(avctx, cur);
-
/* initialize bit buffer */
init_get_bits(&s->gb, buf, buf_size * 8);
avctx->skip_frame >= AVDISCARD_ALL)
return buf_size;
- result = ff_get_buffer(avctx, cur);
+ result = ff_get_buffer(avctx, cur, s->nonref ? 0 : AV_GET_BUFFER_FLAG_REF);
if (result < 0)
return result;
}
}
- *(AVFrame*)data = *cur;
- if (!s->nonref)
- FFSWAP(AVFrame*, s->cur, s->prev);
+ if (!s->nonref) {
+ av_frame_unref(s->prev);
+ result = av_frame_ref(s->prev, cur);
+ if (result < 0)
+ goto err;
+ }
*got_frame = 1;
result = buf_size;
int i;
int offset = 0;
- s->cur = avcodec_alloc_frame();
s->prev = avcodec_alloc_frame();
- if (!s->cur || !s->prev) {
- avcodec_free_frame(&s->cur);
- avcodec_free_frame(&s->prev);
+ if (!s->prev)
return AVERROR(ENOMEM);
- }
s->width = avctx->width + 3 & ~3;
s->height = avctx->height + 3 & ~3;
{
SVQ1Context *s = avctx->priv_data;
- if (s->cur->data[0])
- avctx->release_buffer(avctx, s->cur);
- if (s->prev->data[0])
- avctx->release_buffer(avctx, s->prev);
- avcodec_free_frame(&s->cur);
avcodec_free_frame(&s->prev);
return 0;
{
SVQ1Context *s = avctx->priv_data;
- if (s->cur->data[0])
- avctx->release_buffer(avctx, s->cur);
- if (s->prev->data[0])
- avctx->release_buffer(avctx, s->prev);
+ av_frame_unref(s->prev);
}
AVCodec ff_svq1_decoder = {
s->m.current_picture.mb_mean = (uint8_t *)s->dummy;
s->m.current_picture.mb_var = (uint16_t *)s->dummy;
s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy;
- s->m.current_picture.f.mb_type = s->dummy;
+ s->m.current_picture.mb_type = s->dummy;
- s->m.current_picture.f.motion_val[0] = s->motion_val8[plane] + 2;
+ s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2;
s->m.p_mv_table = s->motion_val16[plane] +
s->m.mb_stride + 1;
s->m.dsp = s->dsp; // move
}
if (!s->current_picture.data[0]) {
- ff_get_buffer(avctx, &s->current_picture);
- ff_get_buffer(avctx, &s->last_picture);
+ ff_get_buffer(avctx, &s->current_picture, 0);
+ ff_get_buffer(avctx, &s->last_picture, 0);
s->scratchbuf = av_malloc(s->current_picture.linesize[0] * 16 * 2);
}
av_freep(&s->motion_val16[i]);
}
+ av_frame_unref(&s->current_picture);
+ av_frame_unref(&s->last_picture);
+
return 0;
}
if (mode != PREDICT_MODE) {
pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
} else {
- mx = s->next_pic->f.motion_val[0][b_xy][0] << 1;
- my = s->next_pic->f.motion_val[0][b_xy][1] << 1;
+ mx = s->next_pic->motion_val[0][b_xy][0] << 1;
+ my = s->next_pic->motion_val[0][b_xy][1] << 1;
if (dir == 0) {
mx = mx * h->frame_num_offset /
}
/* write back motion vectors */
- fill_rectangle(h->cur_pic.f.motion_val[dir][b_xy],
+ fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
part_width >> 2, part_height >> 2, h->b_stride,
pack16to32(mx, my), 4);
}
if (mb_type == 0) { /* SKIP */
if (h->pict_type == AV_PICTURE_TYPE_P ||
- s->next_pic->f.mb_type[mb_xy] == -1) {
+ s->next_pic->mb_type[mb_xy] == -1) {
svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
0, 0, 0, 0, 0, 0);
mb_type = MB_TYPE_SKIP;
} else {
- mb_type = FFMIN(s->next_pic->f.mb_type[mb_xy], 6);
+ mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
return -1;
if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
for (i = 0; i < 4; i++)
AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
- h->cur_pic.f.motion_val[m][b_xy - 1 + i * h->b_stride]);
+ h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
} else {
for (i = 0; i < 4; i++)
AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
}
if (h->mb_y > 0) {
memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
- h->cur_pic.f.motion_val[m][b_xy - h->b_stride],
+ h->cur_pic.motion_val[m][b_xy - h->b_stride],
4 * 2 * sizeof(int16_t));
memset(&h->ref_cache[m][scan8[0] - 1 * 8],
(h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
if (h->mb_x < h->mb_width - 1) {
AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
- h->cur_pic.f.motion_val[m][b_xy - h->b_stride + 4]);
+ h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
(h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
if (h->mb_x > 0) {
AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
- h->cur_pic.f.motion_val[m][b_xy - h->b_stride - 1]);
+ h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
(h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
} else
return -1;
} else {
for (i = 0; i < 4; i++)
- memset(h->cur_pic.f.motion_val[0][b_xy + i * h->b_stride],
+ memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
0, 4 * 2 * sizeof(int16_t));
}
if (mb_type != 1) {
return -1;
} else {
for (i = 0; i < 4; i++)
- memset(h->cur_pic.f.motion_val[1][b_xy + i * h->b_stride],
+ memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
0, 4 * 2 * sizeof(int16_t));
}
}
if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
for (i = 0; i < 4; i++)
- memset(h->cur_pic.f.motion_val[0][b_xy + i * h->b_stride],
+ memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
0, 4 * 2 * sizeof(int16_t));
if (h->pict_type == AV_PICTURE_TYPE_B) {
for (i = 0; i < 4; i++)
- memset(h->cur_pic.f.motion_val[1][b_xy + i * h->b_stride],
+ memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
0, 4 * 2 * sizeof(int16_t));
}
}
}
h->cbp = cbp;
- h->cur_pic.f.mb_type[mb_xy] = mb_type;
+ h->cur_pic.mb_type[mb_xy] = mb_type;
if (IS_INTRA(mb_type))
h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
return 0;
}
+static void free_picture(AVCodecContext *avctx, Picture *pic)
+{
+ int i;
+ for (i = 0; i < 2; i++) {
+ av_buffer_unref(&pic->motion_val_buf[i]);
+ av_buffer_unref(&pic->ref_index_buf[i]);
+ }
+ av_buffer_unref(&pic->mb_type_buf);
+
+ av_frame_unref(&pic->f);
+}
+
static int get_buffer(AVCodecContext *avctx, Picture *pic)
{
SVQ3Context *s = avctx->priv_data;
const int b4_array_size = b4_stride * h->mb_height * 4;
int ret;
- if (!pic->motion_val_base[0]) {
+ if (!pic->motion_val_buf[0]) {
int i;
- pic->mb_type_base = av_mallocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
- if (!pic->mb_type_base)
+ pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
+ if (!pic->mb_type_buf)
return AVERROR(ENOMEM);
- pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
+ pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
for (i = 0; i < 2; i++) {
- pic->motion_val_base[i] = av_mallocz(2 * (b4_array_size + 4) * sizeof(int16_t));
- pic->f.ref_index[i] = av_mallocz(4 * mb_array_size);
- if (!pic->motion_val_base[i] || !pic->f.ref_index[i])
- return AVERROR(ENOMEM);
+ pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
+ pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
+ if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
- pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
+ pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
+ pic->ref_index[i] = pic->ref_index_buf[i]->data;
}
}
pic->f.motion_subsample_log2 = 2;
- pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B);
+ pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
+
+ ret = ff_get_buffer(avctx, &pic->f,
+ pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
+ if (ret < 0)
+ goto fail;
- ret = ff_get_buffer(avctx, &pic->f);
if (!h->edge_emu_buffer) {
h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
if (!h->edge_emu_buffer)
h->linesize = pic->f.linesize[0];
h->uvlinesize = pic->f.linesize[1];
+ return 0;
+fail:
+ free_picture(avctx, pic);
return ret;
}
/* special case for last picture */
if (buf_size == 0) {
if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
- *(AVFrame *) data = s->next_pic->f;
+ ret = av_frame_ref(data, &s->next_pic->f);
+ if (ret < 0)
+ return ret;
s->last_frame_output = 1;
*got_frame = 1;
}
if (h->pict_type != AV_PICTURE_TYPE_B)
FFSWAP(Picture*, s->next_pic, s->last_pic);
- if (s->cur_pic->f.data[0])
- avctx->release_buffer(avctx, &s->cur_pic->f);
+ av_frame_unref(&s->cur_pic->f);
/* for skipping the frame */
s->cur_pic->f.pict_type = h->pict_type;
return ret;
h->cur_pic_ptr = s->cur_pic;
+ av_frame_unref(&h->cur_pic.f);
h->cur_pic = *s->cur_pic;
+ ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
+ if (ret < 0)
+ return ret;
for (i = 0; i < 16; i++) {
h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
ff_h264_hl_decode_mb(h);
if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
- h->cur_pic.f.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
+ h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
(h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
}
}
if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
- *(AVFrame *)data = s->cur_pic->f;
- else
- *(AVFrame *)data = s->last_pic->f;
+ ret = av_frame_ref(data, &s->cur_pic->f);
+ else if (s->last_pic->f.data[0])
+ ret = av_frame_ref(data, &s->last_pic->f);
+ if (ret < 0)
+ return ret;
/* Do not output the last pic after seeking. */
if (s->last_pic->f.data[0] || h->low_delay)
if (h->pict_type != AV_PICTURE_TYPE_B) {
FFSWAP(Picture*, s->cur_pic, s->next_pic);
+ } else {
+ av_frame_unref(&s->cur_pic->f);
}
return buf_size;
}
-static void free_picture(AVCodecContext *avctx, Picture *pic)
-{
- int i;
- for (i = 0; i < 2; i++) {
- av_freep(&pic->motion_val_base[i]);
- av_freep(&pic->f.ref_index[i]);
- }
- av_freep(&pic->mb_type_base);
-
- if (pic->f.data[0])
- avctx->release_buffer(avctx, &pic->f);
- av_freep(&pic);
-}
-
static int svq3_decode_end(AVCodecContext *avctx)
{
SVQ3Context *s = avctx->priv_data;
free_picture(avctx, s->cur_pic);
free_picture(avctx, s->next_pic);
free_picture(avctx, s->last_pic);
+ av_freep(&s->cur_pic);
+ av_freep(&s->next_pic);
+ av_freep(&s->last_pic);
+
+ av_frame_unref(&h->cur_pic.f);
ff_h264_free_context(h);
: s->ti.frame_samples;
frame->nb_samples = s->nb_samples;
- if ((ret = ff_get_buffer(avctx, frame)) < 0)
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
if (avctx->bits_per_coded_sample <= 16) {
#include "targa.h"
typedef struct TargaContext {
- AVFrame picture;
GetByteContext gb;
int color_type;
AVPacket *avpkt)
{
TargaContext * const s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame * const p = &s->picture;
+ AVFrame * const p = data;
uint8_t *dst;
int stride;
int idlen, compr, y, w, h, bpp, flags, ret;
return AVERROR_INVALIDDATA;
}
- if(s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
return ret;
if(w != avctx->width || h != avctx->height)
avcodec_set_dimensions(avctx, w, h);
- if ((ret = ff_get_buffer(avctx, p)) < 0){
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
}
- *picture = s->picture;
*got_frame = 1;
return avpkt->size;
}
-static av_cold int targa_init(AVCodecContext *avctx){
- TargaContext *s = avctx->priv_data;
-
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame = &s->picture;
-
- return 0;
-}
-
-static av_cold int targa_end(AVCodecContext *avctx){
- TargaContext *s = avctx->priv_data;
-
- if(s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
AVCodec ff_targa_decoder = {
.name = "targa",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TARGA,
.priv_data_size = sizeof(TargaContext),
- .init = targa_init,
- .close = targa_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Truevision Targa image"),
#ifndef AVCODEC_THREAD_H
#define AVCODEC_THREAD_H
+#include "libavutil/buffer.h"
+
#include "config.h"
#include "avcodec.h"
+typedef struct ThreadFrame {
+ AVFrame *f;
+ AVCodecContext *owner;
+ // progress->data is an array of 2 ints holding progress for top/bottom
+ // fields
+ AVBufferRef *progress;
+} ThreadFrame;
+
/**
* Wait for decoding threads to finish and reset internal state.
* Called by avcodec_flush_buffers().
* @param field The field being decoded, for field-picture codecs.
* 0 for top field or frame pictures, 1 for bottom field.
*/
-void ff_thread_report_progress(AVFrame *f, int progress, int field);
+void ff_thread_report_progress(ThreadFrame *f, int progress, int field);
/**
* Wait for earlier decoding threads to finish reference pictures.
* @param field The field being referenced, for field-picture codecs.
* 0 for top field or frame pictures, 1 for bottom field.
*/
-void ff_thread_await_progress(AVFrame *f, int progress, int field);
+void ff_thread_await_progress(ThreadFrame *f, int progress, int field);
/**
* Wrapper around get_buffer() for frame-multithreaded codecs.
* @param avctx The current context.
* @param f The frame to write into.
*/
-int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f);
+int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
/**
* Wrapper around release_buffer() frame-for multithreaded codecs.
* @param avctx The current context.
* @param f The picture being released.
*/
-void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f);
+void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f);
+
+int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src);
int ff_thread_init(AVCodecContext *s);
void ff_thread_free(AVCodecContext *s);
#include "avcodec.h"
#define BITSTREAM_READER_LE
#include "get_bits.h"
+#include "internal.h"
typedef struct SeqVideoContext {
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
+ int ret;
SeqVideoContext *seq = avctx->priv_data;
- seq->frame.reference = 1;
- seq->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if (avctx->reget_buffer(avctx, &seq->frame)) {
+ if ((ret = ff_reget_buffer(avctx, &seq->frame)) < 0) {
av_log(seq->avctx, AV_LOG_ERROR, "tiertexseqvideo: reget_buffer() failed\n");
- return -1;
+ return ret;
}
if (seqvideo_decode(seq, buf, buf_size))
return AVERROR_INVALIDDATA;
+ if ((ret = av_frame_ref(data, &seq->frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame *)data = seq->frame;
return buf_size;
}
{
SeqVideoContext *seq = avctx->priv_data;
- if (seq->frame.data[0])
- avctx->release_buffer(avctx, &seq->frame);
+ av_frame_unref(&seq->frame);
return 0;
}
typedef struct TiffContext {
AVCodecContext *avctx;
- AVFrame picture;
int width, height;
unsigned int bpp, bppcount;
return 0;
}
-static int init_image(TiffContext *s)
+static int init_image(TiffContext *s, AVFrame *frame)
{
int i, ret;
uint32_t *pal;
return ret;
avcodec_set_dimensions(s->avctx, s->width, s->height);
}
- if (s->picture.data[0])
- s->avctx->release_buffer(s->avctx, &s->picture);
- if ((ret = ff_get_buffer(s->avctx, &s->picture)) < 0) {
+ if ((ret = ff_get_buffer(s->avctx, frame, 0)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
if (s->palette_is_set) {
- memcpy(s->picture.data[1], s->palette, sizeof(s->palette));
+ memcpy(frame->data[1], s->palette, sizeof(s->palette));
} else {
/* make default grayscale pal */
- pal = (uint32_t *) s->picture.data[1];
+ pal = (uint32_t *) frame->data[1];
for (i = 0; i < 256; i++)
pal[i] = i * 0x010101;
}
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TiffContext *const s = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame *const p = &s->picture;
+ AVFrame *const p = data;
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
unsigned off;
int id, le, ret;
return AVERROR_INVALIDDATA;
}
/* now we have the data and may start decoding */
- if ((ret = init_image(s)) < 0)
+ if ((ret = init_image(s, p)) < 0)
return ret;
if (s->strips == 1 && !s->stripsize) {
uint8_t *src;
int j;
- src = s->picture.data[0];
+ src = p->data[0];
for (j = 0; j < s->height; j++) {
- for (i = 0; i < s->picture.linesize[0]; i++)
+ for (i = 0; i < p->linesize[0]; i++)
src[i] = 255 - src[i];
- src += s->picture.linesize[0];
+ src += p->linesize[0];
}
}
- *picture = s->picture;
*got_frame = 1;
return buf_size;
s->width = 0;
s->height = 0;
s->avctx = avctx;
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame = &s->picture;
ff_lzw_decode_open(&s->lzw);
ff_ccitt_unpack_init();
TiffContext *const s = avctx->priv_data;
ff_lzw_decode_close(&s->lzw);
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
return 0;
}
#include "cga_data.h"
-typedef struct TMVContext {
- AVFrame pic;
-} TMVContext;
-
static int tmv_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
- TMVContext *tmv = avctx->priv_data;
+ AVFrame *frame = data;
const uint8_t *src = avpkt->data;
uint8_t *dst;
unsigned char_cols = avctx->width >> 3;
unsigned x, y, fg, bg, c;
int ret;
- if (tmv->pic.data[0])
- avctx->release_buffer(avctx, &tmv->pic);
-
- if ((ret = ff_get_buffer(avctx, &tmv->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
return AVERROR_INVALIDDATA;
}
- tmv->pic.pict_type = AV_PICTURE_TYPE_I;
- tmv->pic.key_frame = 1;
- dst = tmv->pic.data[0];
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
+ dst = frame->data[0];
- tmv->pic.palette_has_changed = 1;
- memcpy(tmv->pic.data[1], ff_cga_palette, 16 * 4);
+ frame->palette_has_changed = 1;
+ memcpy(frame->data[1], ff_cga_palette, 16 * 4);
for (y = 0; y < char_rows; y++) {
for (x = 0; x < char_cols; x++) {
c = *src++;
bg = *src >> 4;
fg = *src++ & 0xF;
- ff_draw_pc_font(dst + x * 8, tmv->pic.linesize[0],
+ ff_draw_pc_font(dst + x * 8, frame->linesize[0],
ff_cga_font, 8, c, fg, bg);
}
- dst += tmv->pic.linesize[0] * 8;
+ dst += frame->linesize[0] * 8;
}
*got_frame = 1;
- *(AVFrame *)data = tmv->pic;
+
return avpkt->size;
}
return 0;
}
-static av_cold int tmv_decode_close(AVCodecContext *avctx)
-{
- TMVContext *tmv = avctx->priv_data;
-
- if (tmv->pic.data[0])
- avctx->release_buffer(avctx, &tmv->pic);
-
- return 0;
-}
-
AVCodec ff_tmv_decoder = {
.name = "tmv",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TMV,
- .priv_data_size = sizeof(TMVContext),
.init = tmv_decode_init,
- .close = tmv_decode_close,
.decode = tmv_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8088flex TMV"),
#include <string.h>
#include "avcodec.h"
+#include "internal.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
if (s->w != s->avctx->width || s->h != s->avctx->height ||
new_pix_fmt != s->avctx->pix_fmt) {
- if (s->frame.data[0])
- s->avctx->release_buffer(s->avctx, &s->frame);
+ av_frame_unref(&s->frame);
s->avctx->sample_aspect_ratio = (AVRational){ 1 << width_shift, 1 };
s->avctx->pix_fmt = new_pix_fmt;
avcodec_set_dimensions(s->avctx, s->w, s->h);
if ((ret = truemotion1_decode_header(s)) < 0)
return ret;
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
truemotion1_decode_16bit(s);
}
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return buf_size;
{
TrueMotion1Context *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
+ av_frame_unref(&s->frame);
av_free(s->vert_pred);
return 0;
#include "bytestream.h"
#include "get_bits.h"
#include "dsputil.h"
+#include "internal.h"
#define TM2_ESCAPE 0x80000000
#define TM2_DELTAS 64
av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
return AVERROR(ENOMEM);
}
- p->reference = 1;
- p->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
+
+ if ((ret = ff_reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
av_free(swbuf);
return ret;
l->cur = !l->cur;
*got_frame = 1;
- *(AVFrame*)data = l->pic;
+ ret = av_frame_ref(data, &l->pic);
av_free(swbuf);
- return buf_size;
+ return (ret < 0) ? ret : buf_size;
}
static av_cold int decode_init(AVCodecContext *avctx)
av_free(l->V2_base);
}
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
+ av_frame_unref(pic);
return 0;
}
/* get output buffer */
frame->nb_samples = iterations * 240;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
typedef struct TsccContext {
AVCodecContext *avctx;
- AVFrame pic;
// Bits per pixel
int bpp;
int buf_size = avpkt->size;
CamtasiaContext * const c = avctx->priv_data;
const unsigned char *encoded = buf;
+ AVFrame *frame = data;
int zret; // Zlib return code
int ret, len = buf_size;
- if(c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- c->pic.reference = 1;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0){
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (zret != Z_DATA_ERROR) {
bytestream2_init(&c->gb, c->decomp_buf,
c->decomp_size - c->zstream.avail_out);
- ff_msrle_decode(avctx, (AVPicture*)&c->pic, c->bpp, &c->gb);
+ ff_msrle_decode(avctx, (AVPicture*)frame, c->bpp, &c->gb);
}
/* make the palette available on the way out */
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
if (pal) {
- c->pic.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE);
}
- memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
+ memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
}
*got_frame = 1;
- *(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return buf_size;
av_freep(&c->decomp_buf);
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
inflateEnd(&c->zstream);
return 0;
#include "avcodec.h"
#include "get_bits.h"
#include "bytestream.h"
+#include "internal.h"
#include "tscc2data.h"
typedef struct TSCC2Context {
return AVERROR_INVALIDDATA;
}
- c->pic.reference = 3;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
if (frame_type == 0) {
*got_frame = 1;
- *(AVFrame*)data = c->pic;
+ if ((ret = av_frame_ref(data, &c->pic)) < 0)
+ return ret;
return buf_size;
}
}
*got_frame = 1;
- *(AVFrame*)data = c->pic;
+ if ((ret = av_frame_ref(data, &c->pic)) < 0)
+ return ret;
/* always report that the buffer was completely consumed */
return buf_size;
return AVERROR(ENOMEM);
}
- avctx->coded_frame = &c->pic;
-
return 0;
}
{
TSCC2Context * const c = avctx->priv_data;
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ av_frame_unref(&c->pic);
av_freep(&c->slice_quants);
free_vlcs(c);
/* get output buffer */
frame->nb_samples = framelen;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
if (tctx->discarded_packets >= 2) {
frame->nb_samples = mtab->size;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "internal.h"
#include "s3tc.h"
-typedef struct TXDContext {
- AVFrame picture;
-} TXDContext;
-
-static av_cold int txd_init(AVCodecContext *avctx) {
- TXDContext *s = avctx->priv_data;
-
- avcodec_get_frame_defaults(&s->picture);
- avctx->coded_frame = &s->picture;
-
- return 0;
-}
-
static int txd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt) {
- TXDContext * const s = avctx->priv_data;
GetByteContext gb;
- AVFrame *picture = data;
- AVFrame * const p = &s->picture;
+ AVFrame * const p = data;
unsigned int version, w, h, d3d_format, depth, stride, flags;
unsigned int y, v;
uint8_t *ptr;
return AVERROR_PATCHWELCOME;
}
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
return ret;
if (w != avctx->width || h != avctx->height)
avcodec_set_dimensions(avctx, w, h);
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
}
- *picture = s->picture;
*got_frame = 1;
return avpkt->size;
return AVERROR_PATCHWELCOME;
}
-static av_cold int txd_end(AVCodecContext *avctx) {
- TXDContext *s = avctx->priv_data;
-
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
AVCodec ff_txd_decoder = {
.name = "txd",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TXD,
- .priv_data_size = sizeof(TXDContext),
- .init = txd_init,
- .close = txd_end,
.decode = txd_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Renderware TXD (TeXture Dictionary) image"),
#include "avcodec.h"
#include "bytestream.h"
+#include "internal.h"
#include "ulti_cb.h"
UltimotionDecodeContext *s = avctx->priv_data;
AVFrame *pic = &s->frame;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
+ av_frame_unref(pic);
return 0;
}
int blocks = 0;
int done = 0;
int x = 0, y = 0;
- int i;
+ int i, ret;
int skip;
int tmp;
- s->frame.reference = 1;
- s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if (avctx->reget_buffer(avctx, &s->frame) < 0) {
+ if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return -1;
+ return ret;
}
bytestream2_init(&s->gb, buf, buf_size);
}
*got_frame = 1;
- *(AVFrame*)data= s->frame;
+ if ((ret = av_frame_ref(data, &s->frame)) < 0)
+ return ret;
return buf_size;
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/crc.h"
+#include "libavutil/frame.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
s->height = height;
}
-#define INTERNAL_BUFFER_SIZE (32 + 1)
-
#if (ARCH_ARM && HAVE_NEON) || ARCH_PPC || HAVE_MMX
# define STRIDE_ALIGN 16
#else
return ret;
}
-static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
-{
- AVCodecInternal *avci = avctx->internal;
- int buf_size, ret;
-
- av_freep(&avci->audio_data);
- buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
- frame->nb_samples, avctx->sample_fmt,
- 0);
- if (buf_size < 0)
- return AVERROR(EINVAL);
-
- frame->data[0] = av_mallocz(buf_size);
- if (!frame->data[0])
- return AVERROR(ENOMEM);
-
- ret = avcodec_fill_audio_frame(frame, avctx->channels, avctx->sample_fmt,
- frame->data[0], buf_size, 0);
- if (ret < 0) {
- av_freep(&frame->data[0]);
- return ret;
- }
-
- avci->audio_data = frame->data[0];
- if (avctx->debug & FF_DEBUG_BUFFERS)
- av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, "
- "internal audio buffer used\n", frame);
-
- return 0;
-}
-
-static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
+static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
{
- int i;
- int w = s->width;
- int h = s->height;
- InternalBuffer *buf;
- AVCodecInternal *avci = s->internal;
-
- if (pic->data[0] != NULL) {
- av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n");
- return -1;
- }
- if (avci->buffer_count >= INTERNAL_BUFFER_SIZE) {
- av_log(s, AV_LOG_ERROR, "buffer_count overflow (missing release_buffer?)\n");
- return -1;
- }
+ FramePool *pool = avctx->internal->pool;
+ int i, ret;
- if (av_image_check_size(w, h, 0, s))
- return -1;
-
- if (!avci->buffer) {
- avci->buffer = av_mallocz((INTERNAL_BUFFER_SIZE + 1) *
- sizeof(InternalBuffer));
- }
-
- buf = &avci->buffer[avci->buffer_count];
-
- if (buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt != s->pix_fmt)) {
- for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
- av_freep(&buf->base[i]);
- buf->data[i] = NULL;
- }
- }
-
- if (!buf->base[0]) {
- int h_chroma_shift, v_chroma_shift;
- int size[4] = { 0 };
- int tmpsize;
- int unaligned;
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO: {
AVPicture picture;
- int stride_align[AV_NUM_DATA_POINTERS];
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt);
- const int pixel_size = desc->comp[0].step_minus1 + 1;
+ int size[4] = { 0 };
+ int w = frame->width;
+ int h = frame->height;
+ int tmpsize, unaligned;
- av_pix_fmt_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift,
- &v_chroma_shift);
+ if (pool->format == frame->format &&
+ pool->width == frame->width && pool->height == frame->height)
+ return 0;
- avcodec_align_dimensions2(s, &w, &h, stride_align);
+ avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
- if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
+ if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
w += EDGE_WIDTH * 2;
h += EDGE_WIDTH * 2;
}
do {
// NOTE: do not align linesizes individually, this breaks e.g. assumptions
// that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
- av_image_fill_linesizes(picture.linesize, s->pix_fmt, w);
+ av_image_fill_linesizes(picture.linesize, avctx->pix_fmt, w);
// increase alignment of w for next try (rhs gives the lowest bit set in w)
w += w & ~(w - 1);
unaligned = 0;
for (i = 0; i < 4; i++)
- unaligned |= picture.linesize[i] % stride_align[i];
+ unaligned |= picture.linesize[i] % pool->stride_align[i];
} while (unaligned);
- tmpsize = av_image_fill_pointers(picture.data, s->pix_fmt, h, NULL, picture.linesize);
+ tmpsize = av_image_fill_pointers(picture.data, avctx->pix_fmt, h,
+ NULL, picture.linesize);
if (tmpsize < 0)
return -1;
size[i] = picture.data[i + 1] - picture.data[i];
size[i] = tmpsize - (picture.data[i] - picture.data[0]);
- memset(buf->base, 0, sizeof(buf->base));
- memset(buf->data, 0, sizeof(buf->data));
+ for (i = 0; i < 4; i++) {
+ av_buffer_pool_uninit(&pool->pools[i]);
+ pool->linesize[i] = picture.linesize[i];
+ if (size[i]) {
+ pool->pools[i] = av_buffer_pool_init(size[i] + 16, NULL);
+ if (!pool->pools[i]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+ }
+ pool->format = frame->format;
+ pool->width = frame->width;
+ pool->height = frame->height;
- for (i = 0; i < 4 && size[i]; i++) {
- const int h_shift = i == 0 ? 0 : h_chroma_shift;
- const int v_shift = i == 0 ? 0 : v_chroma_shift;
+ break;
+ }
+ case AVMEDIA_TYPE_AUDIO: {
+ int ch = av_get_channel_layout_nb_channels(frame->channel_layout);
+ int planar = av_sample_fmt_is_planar(frame->format);
+ int planes = planar ? ch : 1;
+
+ if (pool->format == frame->format && pool->planes == planes &&
+ pool->channels == ch && frame->nb_samples == pool->samples)
+ return 0;
+
+ av_buffer_pool_uninit(&pool->pools[0]);
+ ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
+ frame->nb_samples, frame->format, 0);
+ if (ret < 0)
+ goto fail;
+
+ pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
+ if (!pool->pools[0]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
- buf->linesize[i] = picture.linesize[i];
+ pool->format = frame->format;
+ pool->planes = planes;
+ pool->channels = ch;
+ pool->samples = frame->nb_samples;
+ break;
+ }
+ default: av_assert0(0);
+ }
+ return 0;
+fail:
+ for (i = 0; i < 4; i++)
+ av_buffer_pool_uninit(&pool->pools[i]);
+ pool->format = -1;
+ pool->planes = pool->channels = pool->samples = 0;
+ pool->width = pool->height = 0;
+ return ret;
+}
- buf->base[i] = av_malloc(size[i] + 16); //FIXME 16
- if (buf->base[i] == NULL)
- return -1;
+static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+ FramePool *pool = avctx->internal->pool;
+ int planes = pool->planes;
+ int i;
- // no edge if EDGE EMU or not planar YUV
- if ((s->flags & CODEC_FLAG_EMU_EDGE) || !size[2])
- buf->data[i] = buf->base[i];
- else
- buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i] * EDGE_WIDTH >> v_shift) + (pixel_size * EDGE_WIDTH >> h_shift), stride_align[i]);
- }
- for (; i < AV_NUM_DATA_POINTERS; i++) {
- buf->base[i] = buf->data[i] = NULL;
- buf->linesize[i] = 0;
+ frame->linesize[0] = pool->linesize[0];
+
+ if (planes > AV_NUM_DATA_POINTERS) {
+ frame->extended_data = av_mallocz(planes * sizeof(*frame->extended_data));
+ frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
+ frame->extended_buf = av_mallocz(frame->nb_extended_buf *
+ sizeof(*frame->extended_buf));
+ if (!frame->extended_data || !frame->extended_buf) {
+ av_freep(&frame->extended_data);
+ av_freep(&frame->extended_buf);
+ return AVERROR(ENOMEM);
}
- if (size[1] && !size[2])
- avpriv_set_systematic_pal2((uint32_t *)buf->data[1], s->pix_fmt);
- buf->width = s->width;
- buf->height = s->height;
- buf->pix_fmt = s->pix_fmt;
+ } else
+ frame->extended_data = frame->data;
+
+ for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
+ frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
+ if (!frame->buf[i])
+ goto fail;
+ frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
}
+ for (i = 0; i < frame->nb_extended_buf; i++) {
+ frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
+ if (!frame->extended_buf[i])
+ goto fail;
+ frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
+ }
+
+ if (avctx->debug & FF_DEBUG_BUFFERS)
+ av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
+
+ return 0;
+fail:
+ av_frame_unref(frame);
+ return AVERROR(ENOMEM);
+}
- for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
- pic->base[i] = buf->base[i];
- pic->data[i] = buf->data[i];
- pic->linesize[i] = buf->linesize[i];
+static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
+{
+ FramePool *pool = s->internal->pool;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format);
+ int pixel_size = desc->comp[0].step_minus1 + 1;
+ int h_chroma_shift, v_chroma_shift;
+ int i;
+
+ if (pic->data[0] != NULL) {
+ av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n");
+ return -1;
}
+
+ memset(pic->data, 0, sizeof(pic->data));
pic->extended_data = pic->data;
- avci->buffer_count++;
+
+ av_pix_fmt_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
+
+ for (i = 0; i < 4 && pool->pools[i]; i++) {
+ const int h_shift = i == 0 ? 0 : h_chroma_shift;
+ const int v_shift = i == 0 ? 0 : v_chroma_shift;
+
+ pic->linesize[i] = pool->linesize[i];
+
+ pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
+ if (!pic->buf[i])
+ goto fail;
+
+ // no edge if EDGE EMU or not planar YUV
+ if ((s->flags & CODEC_FLAG_EMU_EDGE) || !pool->pools[2])
+ pic->data[i] = pic->buf[i]->data;
+ else {
+ pic->data[i] = pic->buf[i]->data +
+ FFALIGN((pic->linesize[i] * EDGE_WIDTH >> v_shift) +
+ (pixel_size * EDGE_WIDTH >> h_shift), pool->stride_align[i]);
+ }
+ }
+ for (; i < AV_NUM_DATA_POINTERS; i++) {
+ pic->data[i] = NULL;
+ pic->linesize[i] = 0;
+ }
+ if (pic->data[1] && !pic->data[2])
+ avpriv_set_systematic_pal2((uint32_t *)pic->data[1], s->pix_fmt);
if (s->debug & FF_DEBUG_BUFFERS)
- av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d "
- "buffers used\n", pic, avci->buffer_count);
+ av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
return 0;
+fail:
+ av_frame_unref(pic);
+ return AVERROR(ENOMEM);
}
-int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
{
+ int ret;
+
+ if ((ret = update_frame_pool(avctx, frame)) < 0)
+ return ret;
+
+#if FF_API_GET_BUFFER
frame->type = FF_BUFFER_TYPE_INTERNAL;
+#endif
+
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
return video_get_buffer(avctx, frame);
}
}
-int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+#if FF_API_GET_BUFFER
+int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
{
+ return avcodec_default_get_buffer2(avctx, frame, 0);
+}
+
+typedef struct CompatReleaseBufPriv {
+ AVCodecContext avctx;
+ AVFrame frame;
+} CompatReleaseBufPriv;
+
+static void compat_free_buffer(void *opaque, uint8_t *data)
+{
+ CompatReleaseBufPriv *priv = opaque;
+ priv->avctx.release_buffer(&priv->avctx, &priv->frame);
+ av_freep(&priv);
+}
+
+static void compat_release_buffer(void *opaque, uint8_t *data)
+{
+ AVBufferRef *buf = opaque;
+ av_buffer_unref(&buf);
+}
+#endif
+
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
+{
+ int ret;
+
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
frame->width = avctx->width;
frame->height = avctx->height;
frame->format = avctx->pix_fmt;
frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
+
+ if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
+ return ret;
break;
case AVMEDIA_TYPE_AUDIO:
frame->sample_rate = avctx->sample_rate;
frame->pkt_pts = avctx->pkt ? avctx->pkt->pts : AV_NOPTS_VALUE;
frame->reordered_opaque = avctx->reordered_opaque;
- return avctx->get_buffer(avctx, frame);
-}
+#if FF_API_GET_BUFFER
+ /*
+ * Wrap an old get_buffer()-allocated buffer in an bunch of AVBuffers.
+ * We wrap each plane in its own AVBuffer. Each of those has a reference to
+ * a dummy AVBuffer as its private data, unreffing it on free.
+ * When all the planes are freed, the dummy buffer's free callback calls
+ * release_buffer().
+ */
+ if (avctx->get_buffer) {
+ CompatReleaseBufPriv *priv = NULL;
+ AVBufferRef *dummy_buf = NULL;
+ int planes, i, ret;
-void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic)
-{
- int i;
- InternalBuffer *buf, *last;
- AVCodecInternal *avci = s->internal;
+ if (flags & AV_GET_BUFFER_FLAG_REF)
+ frame->reference = 1;
- assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
+ ret = avctx->get_buffer(avctx, frame);
+ if (ret < 0)
+ return ret;
- assert(pic->type == FF_BUFFER_TYPE_INTERNAL);
- assert(avci->buffer_count);
+ /* return if the buffers are already set up
+ * this would happen e.g. when a custom get_buffer() calls
+ * avcodec_default_get_buffer
+ */
+ if (frame->buf[0])
+ return 0;
+
+ priv = av_mallocz(sizeof(*priv));
+ if (!priv) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ priv->avctx = *avctx;
+ priv->frame = *frame;
- if (avci->buffer) {
- buf = NULL; /* avoids warning */
- for (i = 0; i < avci->buffer_count; i++) { //just 3-5 checks so is not worth to optimize
- buf = &avci->buffer[i];
- if (buf->data[0] == pic->data[0])
- break;
+ dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, priv, 0);
+ if (!dummy_buf) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
}
- assert(i < avci->buffer_count);
- avci->buffer_count--;
- last = &avci->buffer[avci->buffer_count];
- if (buf != last)
- FFSWAP(InternalBuffer, *buf, *last);
- }
+#define WRAP_PLANE(ref_out, data, data_size) \
+do { \
+ AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \
+ if (!dummy_ref) { \
+ ret = AVERROR(ENOMEM); \
+ goto fail; \
+ } \
+ ref_out = av_buffer_create(data, data_size, compat_release_buffer, \
+ dummy_ref, 0); \
+ if (!ref_out) { \
+ av_frame_unref(frame); \
+ ret = AVERROR(ENOMEM); \
+ goto fail; \
+ } \
+} while (0)
+
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+
+ if (!desc) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ planes = (desc->flags & PIX_FMT_PLANAR) ? desc->nb_components : 1;
- for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
- pic->data[i] = NULL;
-// pic->base[i]=NULL;
+ for (i = 0; i < planes; i++) {
+ int h_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
+ int plane_size = (frame->width >> h_shift) * frame->linesize[i];
- if (s->debug & FF_DEBUG_BUFFERS)
- av_log(s, AV_LOG_DEBUG, "default_release_buffer called on pic %p, %d "
- "buffers used\n", pic, avci->buffer_count);
-}
+ WRAP_PLANE(frame->buf[i], frame->data[i], plane_size);
+ }
+ } else {
+ int planar = av_sample_fmt_is_planar(frame->format);
+ planes = planar ? avctx->channels : 1;
+
+ if (planes > FF_ARRAY_ELEMS(frame->buf)) {
+ frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf);
+ frame->extended_buf = av_malloc(sizeof(*frame->extended_buf) *
+ frame->nb_extended_buf);
+ if (!frame->extended_buf) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
-int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic)
-{
- AVFrame temp_pic;
- int i;
+ for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++)
+ WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]);
+
+ for (i = 0; i < planes - FF_ARRAY_ELEMS(frame->buf); i++)
+ WRAP_PLANE(frame->extended_buf[i],
+ frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)],
+ frame->linesize[0]);
+ }
+
+ av_buffer_unref(&dummy_buf);
- assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
+ return 0;
- /* If no picture return a new buffer */
- if (pic->data[0] == NULL) {
- /* We will copy from buffer, so must be readable */
- pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
- return ff_get_buffer(s, pic);
+fail:
+ avctx->release_buffer(avctx, frame);
+ av_freep(&priv);
+ av_buffer_unref(&dummy_buf);
+ return ret;
}
+#endif
+
+ return avctx->get_buffer2(avctx, frame, flags);
+}
+
+int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVFrame tmp;
+ int ret;
- assert(s->pix_fmt == pic->format);
+ av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);
+
+ if (!frame->data[0])
+ return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
- /* If internal buffer type return the same buffer */
- if (pic->type == FF_BUFFER_TYPE_INTERNAL) {
- if (s->pkt)
- pic->pkt_pts = s->pkt->pts;
- else
- pic->pkt_pts = AV_NOPTS_VALUE;
- pic->reordered_opaque = s->reordered_opaque;
+ if (av_frame_is_writable(frame))
return 0;
+
+ av_frame_move_ref(&tmp, frame);
+
+ ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
+ if (ret < 0) {
+ av_frame_unref(&tmp);
+ return ret;
}
- /*
- * Not internal type and reget_buffer not overridden, emulate cr buffer
- */
- temp_pic = *pic;
- for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
- pic->data[i] = pic->base[i] = NULL;
- pic->opaque = NULL;
- /* Allocate new frame */
- if (ff_get_buffer(s, pic))
- return -1;
- /* Copy image data from old buffer to new buffer */
- av_picture_copy((AVPicture *)pic, (AVPicture *)&temp_pic, s->pix_fmt, s->width,
- s->height);
- s->release_buffer(s, &temp_pic); // Release old frame
+ av_image_copy(frame->data, frame->linesize, tmp.data, tmp.linesize,
+ frame->format, frame->width, frame->height);
+
+ av_frame_unref(&tmp);
+
return 0;
}
+#if FF_API_GET_BUFFER
+void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic)
+{
+ av_frame_unref(pic);
+}
+
+int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic)
+{
+ av_assert0(0);
+}
+#endif
+
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
{
int i;
goto end;
}
+ avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool));
+ if (!avctx->internal->pool) {
+ ret = AVERROR(ENOMEM);
+ goto free_and_end;
+ }
+
if (codec->priv_data_size > 0) {
if (!avctx->priv_data) {
avctx->priv_data = av_mallocz(codec->priv_data_size);
free_and_end:
av_dict_free(&tmp);
av_freep(&avctx->priv_data);
+ if (avctx->internal)
+ av_freep(&avctx->internal->pool);
av_freep(&avctx->internal);
avctx->codec = NULL;
goto end;
int *got_picture_ptr,
AVPacket *avpkt)
{
+ AVCodecInternal *avci = avctx->internal;
int ret;
*got_picture_ptr = 0;
avcodec_get_frame_defaults(picture);
+ if (!avctx->refcounted_frames)
+ av_frame_unref(&avci->to_free);
+
if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
emms_c(); //needed to avoid an emms_c() call before every return;
- if (*got_picture_ptr)
+ if (ret < 0 && picture->data[0])
+ av_frame_unref(picture);
+
+ if (*got_picture_ptr) {
+ if (!avctx->refcounted_frames) {
+ avci->to_free = *picture;
+ avci->to_free.extended_data = avci->to_free.data;
+ }
+
avctx->frame_number++;
+ }
} else
ret = 0;
int *got_frame_ptr,
AVPacket *avpkt)
{
+ AVCodecInternal *avci = avctx->internal;
int planar, channels;
int ret = 0;
avcodec_get_frame_defaults(frame);
+ if (!avctx->refcounted_frames)
+ av_frame_unref(&avci->to_free);
+
if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) {
ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
if (ret >= 0 && *got_frame_ptr) {
frame->pkt_dts = avpkt->dts;
if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt;
+
+ if (!avctx->refcounted_frames) {
+ avci->to_free = *frame;
+ avci->to_free.extended_data = avci->to_free.data;
+ }
}
+
+ if (ret < 0 && frame->data[0])
+ av_frame_unref(frame);
}
/* many decoders assign whole AVFrames, thus overwriting extended_data;
}
if (avcodec_is_open(avctx)) {
+ FramePool *pool = avctx->internal->pool;
+ int i;
if (HAVE_THREADS && avctx->thread_opaque)
ff_thread_free(avctx);
if (avctx->codec && avctx->codec->close)
avctx->codec->close(avctx);
- avcodec_default_free_buffers(avctx);
avctx->coded_frame = NULL;
+ if (!avctx->refcounted_frames)
+ av_frame_unref(&avctx->internal->to_free);
+ for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
+ av_buffer_pool_uninit(&pool->pools[i]);
+ av_freep(&avctx->internal->pool);
av_freep(&avctx->internal);
}
avctx->codec->flush(avctx);
}
-static void video_free_buffers(AVCodecContext *s)
-{
- AVCodecInternal *avci = s->internal;
- int i, j;
-
- if (!avci->buffer)
- return;
-
- if (avci->buffer_count)
- av_log(s, AV_LOG_WARNING, "Found %i unreleased buffers!\n",
- avci->buffer_count);
- for (i = 0; i < INTERNAL_BUFFER_SIZE; i++) {
- InternalBuffer *buf = &avci->buffer[i];
- for (j = 0; j < 4; j++) {
- av_freep(&buf->base[j]);
- buf->data[j] = NULL;
- }
- }
- av_freep(&avci->buffer);
-
- avci->buffer_count = 0;
-}
-
-static void audio_free_buffers(AVCodecContext *avctx)
-{
- AVCodecInternal *avci = avctx->internal;
- av_freep(&avci->audio_data);
-}
-
-void avcodec_default_free_buffers(AVCodecContext *avctx)
-{
- switch (avctx->codec_type) {
- case AVMEDIA_TYPE_VIDEO:
- video_free_buffers(avctx);
- break;
- case AVMEDIA_TYPE_AUDIO:
- audio_free_buffers(avctx);
- break;
- default:
- break;
- }
-}
-
int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
{
switch (codec_id) {
(av_toupper((x >> 24) & 0xFF) << 24);
}
+int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
+{
+ int ret;
+
+ dst->owner = src->owner;
+
+ ret = av_frame_ref(dst->f, src->f);
+ if (ret < 0)
+ return ret;
+
+ if (src->progress &&
+ !(dst->progress = av_buffer_ref(src->progress))) {
+ ff_thread_release_buffer(dst->owner, dst);
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
#if !HAVE_THREADS
-int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
{
f->owner = avctx;
- return ff_get_buffer(avctx, f);
+ return ff_get_buffer(avctx, f, flags);
}
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
{
- f->owner->release_buffer(f->owner, f);
+ av_frame_unref(f);
}
void ff_thread_finish_setup(AVCodecContext *avctx)
typedef struct UtvideoContext {
AVCodecContext *avctx;
- AVFrame pic;
DSPContext dsp;
uint32_t frame_info_size, flags, frame_info;
int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
int ret;
GetByteContext gb;
+ ThreadFrame frame = { .f = data };
- if (c->pic.data[0])
- ff_thread_release_buffer(avctx, &c->pic);
-
- c->pic.reference = 1;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = ff_thread_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
case AV_PIX_FMT_RGB24:
case AV_PIX_FMT_RGBA:
for (i = 0; i < c->planes; i++) {
- ret = decode_plane(c, i, c->pic.data[0] + ff_ut_rgb_order[i],
- c->planes, c->pic.linesize[0], avctx->width,
+ ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i],
+ c->planes, frame.f->linesize[0], avctx->width,
avctx->height, plane_start[i],
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median(c->pic.data[0] + ff_ut_rgb_order[i],
- c->planes, c->pic.linesize[0], avctx->width,
+ restore_median(frame.f->data[0] + ff_ut_rgb_order[i],
+ c->planes, frame.f->linesize[0], avctx->width,
avctx->height, c->slices, 0);
} else {
- restore_median_il(c->pic.data[0] + ff_ut_rgb_order[i],
- c->planes, c->pic.linesize[0],
+ restore_median_il(frame.f->data[0] + ff_ut_rgb_order[i],
+ c->planes, frame.f->linesize[0],
avctx->width, avctx->height, c->slices,
0);
}
}
}
- restore_rgb_planes(c->pic.data[0], c->planes, c->pic.linesize[0],
+ restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0],
avctx->width, avctx->height);
break;
case AV_PIX_FMT_YUV420P:
for (i = 0; i < 3; i++) {
- ret = decode_plane(c, i, c->pic.data[i], 1, c->pic.linesize[i],
+ ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median(c->pic.data[i], 1, c->pic.linesize[i],
+ restore_median(frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
c->slices, !i);
} else {
- restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
+ restore_median_il(frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i,
avctx->height >> !!i,
c->slices, !i);
break;
case AV_PIX_FMT_YUV422P:
for (i = 0; i < 3; i++) {
- ret = decode_plane(c, i, c->pic.data[i], 1, c->pic.linesize[i],
+ ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median(c->pic.data[i], 1, c->pic.linesize[i],
+ restore_median(frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
} else {
- restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
+ restore_median_il(frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
}
break;
}
- c->pic.key_frame = 1;
- c->pic.pict_type = AV_PICTURE_TYPE_I;
- c->pic.interlaced_frame = !!c->interlaced;
+ frame.f->key_frame = 1;
+ frame.f->pict_type = AV_PICTURE_TYPE_I;
+ frame.f->interlaced_frame = !!c->interlaced;
- *got_frame = 1;
- *(AVFrame*)data = c->pic;
+ *got_frame = 1;
/* always report that the buffer was completely consumed */
return buf_size;
{
UtvideoContext * const c = avctx->priv_data;
- if (c->pic.data[0])
- ff_thread_release_buffer(avctx, &c->pic);
-
av_freep(&c->slice_bits);
return 0;
avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
avctx->bits_per_raw_sample = 10;
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame)
- return AVERROR(ENOMEM);
-
return 0;
}
AVPacket *avpkt)
{
int h, w, ret;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
const uint8_t *psrc = avpkt->data;
uint16_t *y, *u, *v;
int aligned_width = ((avctx->width + 47) / 48) * 48;
int stride = aligned_width * 8 / 3;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
if (avpkt->size < stride * avctx->height) {
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA;
}
- pic->reference = 0;
- if ((ret = ff_get_buffer(avctx, pic)) < 0)
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
y = (uint16_t*)pic->data[0];
}
*got_frame = 1;
- *(AVFrame*)data = *avctx->coded_frame;
return avpkt->size;
}
-static av_cold int decode_close(AVCodecContext *avctx)
-{
- AVFrame *pic = avctx->coded_frame;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
- av_freep(&avctx->coded_frame);
-
- return 0;
-}
-
AVCodec ff_v210_decoder = {
.name = "v210",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_V210,
.init = decode_init,
- .close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
avctx->bits_per_raw_sample = 10;
- avctx->coded_frame= avcodec_alloc_frame();
-
return 0;
}
AVPacket *avpkt)
{
const uint32_t *src = (const uint32_t *)avpkt->data;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
int width = avctx->width;
int y = 0;
uint16_t *ydst, *udst, *vdst, *yend;
int ret;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
if (avpkt->size < avctx->width * avctx->height * 8 / 3) {
av_log(avctx, AV_LOG_ERROR, "Packet too small\n");
return AVERROR_INVALIDDATA;
av_log_ask_for_sample(avctx, "Probably padded data\n");
}
- pic->reference = 0;
- if ((ret = ff_get_buffer(avctx, pic)) < 0)
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
ydst = (uint16_t *)pic->data[0];
}
*got_frame = 1;
- *(AVFrame*)data= *avctx->coded_frame;
return avpkt->size;
}
-static av_cold int decode_close(AVCodecContext *avctx)
-{
- AVFrame *pic = avctx->coded_frame;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
- av_freep(&avctx->coded_frame);
-
- return 0;
-}
-
AVCodec ff_v210x_decoder = {
.name = "v210x",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_V210X,
.init = decode_init,
- .close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
}
}
- avctx->coded_frame = avcodec_alloc_frame();
-
- if (!avctx->coded_frame) {
- av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
- return AVERROR(ENOMEM);
- }
-
return 0;
}
static int v410_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
uint8_t *src = avpkt->data;
uint16_t *y, *u, *v;
uint32_t val;
int i, j;
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
if (avpkt->size < 4 * avctx->height * avctx->width) {
av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
return AVERROR(EINVAL);
}
- pic->reference = 0;
-
- if (ff_get_buffer(avctx, pic) < 0) {
+ if (ff_get_buffer(avctx, pic, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return AVERROR(ENOMEM);
}
}
*got_frame = 1;
- *(AVFrame *)data = *pic;
return avpkt->size;
}
-static av_cold int v410_decode_close(AVCodecContext *avctx)
-{
- if (avctx->coded_frame->data[0])
- avctx->release_buffer(avctx, avctx->coded_frame);
-
- av_freep(&avctx->coded_frame);
-
- return 0;
-}
-
AVCodec ff_v410_decoder = {
.name = "v410",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_V410,
.init = v410_decode_init,
.decode = v410_decode_frame,
- .close = v410_decode_close,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:4:4 10-bit"),
};
int pic_structure)
{
if (pic_structure == 0)
- pic_structure = pic->f.reference;
+ pic_structure = pic->reference;
pic_structure &= PICT_FRAME; /* PICT_TOP_FIELD|PICT_BOTTOM_FIELD */
va_pic->picture_id = ff_vaapi_get_surface_id(pic);
va_pic->flags = 0;
if (pic_structure != PICT_FRAME)
va_pic->flags |= (pic_structure & PICT_TOP_FIELD) ? VA_PICTURE_H264_TOP_FIELD : VA_PICTURE_H264_BOTTOM_FIELD;
- if (pic->f.reference)
+ if (pic->reference)
va_pic->flags |= pic->long_ref ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE;
va_pic->TopFieldOrderCnt = 0;
for (i = 0; i < h->short_ref_count; i++) {
Picture * const pic = h->short_ref[i];
- if (pic && pic->f.reference && dpb_add(&dpb, pic) < 0)
+ if (pic && pic->reference && dpb_add(&dpb, pic) < 0)
return -1;
}
for (i = 0; i < 16; i++) {
Picture * const pic = h->long_ref[i];
- if (pic && pic->f.reference && dpb_add(&dpb, pic) < 0)
+ if (pic && pic->reference && dpb_add(&dpb, pic) < 0)
return -1;
}
return 0;
{
unsigned int i, n = 0;
for (i = 0; i < ref_count; i++)
- if (ref_list[i].f.reference)
+ if (ref_list[i].reference)
fill_vaapi_pic(&RefPicList[n++], &ref_list[i], 0);
for (; n < 32; n++)
typedef struct VBDecContext {
AVCodecContext *avctx;
- AVFrame pic;
uint8_t *frame, *prev_frame;
uint32_t pal[AVPALETTE_COUNT];
AVPacket *avpkt)
{
VBDecContext * const c = avctx->priv_data;
+ AVFrame *frame = data;
uint8_t *outptr, *srcptr;
int i, j, ret;
int flags;
bytestream2_init(&c->stream, avpkt->data, avpkt->size);
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
- c->pic.reference = 1;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
vb_decode_palette(c, size);
}
- memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
- c->pic.palette_has_changed = flags & VB_HAS_PALETTE;
+ memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
+ frame->palette_has_changed = flags & VB_HAS_PALETTE;
- outptr = c->pic.data[0];
+ outptr = frame->data[0];
srcptr = c->frame;
for (i = 0; i < avctx->height; i++) {
memcpy(outptr, srcptr, avctx->width);
srcptr += avctx->width;
- outptr += c->pic.linesize[0];
+ outptr += frame->linesize[0];
}
FFSWAP(uint8_t*, c->frame, c->prev_frame);
*got_frame = 1;
- *(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return avpkt->size;
av_freep(&c->frame);
av_freep(&c->prev_frame);
- if(c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
return 0;
}
return 0;
}
-static void vble_restore_plane(VBLEContext *ctx, int plane, int offset,
- int width, int height)
+static void vble_restore_plane(VBLEContext *ctx, AVFrame *pic,
+ int plane, int offset,
+ int width, int height)
{
- AVFrame *pic = ctx->avctx->coded_frame;
uint8_t *dst = pic->data[plane];
uint8_t *val = ctx->val + offset;
int stride = pic->linesize[plane];
AVPacket *avpkt)
{
VBLEContext *ctx = avctx->priv_data;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
GetBitContext gb;
const uint8_t *src = avpkt->data;
int version;
int offset = 0;
int width_uv = avctx->width / 2, height_uv = avctx->height / 2;
- pic->reference = 0;
-
- /* Clear buffer if need be */
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
/* Allocate buffer */
- if (ff_get_buffer(avctx, pic) < 0) {
+ if (ff_get_buffer(avctx, pic, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return AVERROR(ENOMEM);
}
}
/* Restore planes. Should be almost identical to Huffyuv's. */
- vble_restore_plane(ctx, 0, offset, avctx->width, avctx->height);
+ vble_restore_plane(ctx, pic, 0, offset, avctx->width, avctx->height);
/* Chroma */
if (!(ctx->avctx->flags & CODEC_FLAG_GRAY)) {
offset += avctx->width * avctx->height;
- vble_restore_plane(ctx, 1, offset, width_uv, height_uv);
+ vble_restore_plane(ctx, pic, 1, offset, width_uv, height_uv);
offset += width_uv * height_uv;
- vble_restore_plane(ctx, 2, offset, width_uv, height_uv);
+ vble_restore_plane(ctx, pic, 2, offset, width_uv, height_uv);
}
*got_frame = 1;
- *(AVFrame *)data = *pic;
return avpkt->size;
}
static av_cold int vble_decode_close(AVCodecContext *avctx)
{
VBLEContext *ctx = avctx->priv_data;
- AVFrame *pic = avctx->coded_frame;
-
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
- av_freep(&avctx->coded_frame);
av_freep(&ctx->val);
return 0;
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
avctx->bits_per_raw_sample = 8;
- avctx->coded_frame = avcodec_alloc_frame();
-
- if (!avctx->coded_frame) {
- av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
- return AVERROR(ENOMEM);
- }
ctx->size = avpicture_get_size(avctx->pix_fmt,
avctx->width, avctx->height);
// store motion vectors for further use in B frames
if (s->pict_type == AV_PICTURE_TYPE_P) {
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
}
uvmx = (mx + ((mx & 3) == 3)) >> 1;
ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
break;
}
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
for (k = 0; k < 4; k++)
v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
}
valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
chroma_ref_type = v->reffield;
if (!valid_count) {
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
return; //no need to do MC for intra blocks
}
}
if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
return;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
uvmx = (tx + ((tx & 3) == 3)) >> 1;
uvmy = (ty + ((ty & 3) == 3)) >> 1;
xy = s->block_index[n];
if (s->mb_intra) {
- s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
- s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
+ s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
+ s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
if (mv1) { /* duplicate motion data for 1-MV block */
- s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
- s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
}
return;
}
- C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
- A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
+ C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
+ A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
if (mv1) {
if (v->field_mode && mixedmv_pic)
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
off = -1;
}
}
- B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
+ B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
a_valid = !s->first_slice_line || (n == 2 || n == 3);
b_valid = a_valid && (s->mb_width > 1);
if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
y_bias = 1;
/* store MV using signed modulus of MV range defined in 4.11 */
- s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
- s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
+ s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
if (mv1) { /* duplicate motion data for 1-MV block */
- s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
- s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
- s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
- s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
- s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
- s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
+ s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
+ s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
+ s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
+ s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
+ s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
+ s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
}
xy = s->block_index[n];
if (s->mb_intra) {
- s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
- s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
- s->current_picture.f.motion_val[1][xy][0] = 0;
- s->current_picture.f.motion_val[1][xy][1] = 0;
+ s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
+ s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
+ s->current_picture.motion_val[1][xy][0] = 0;
+ s->current_picture.motion_val[1][xy][1] = 0;
if (mvn == 1) { /* duplicate motion data for 1-MV block */
- s->current_picture.f.motion_val[0][xy + 1][0] = 0;
- s->current_picture.f.motion_val[0][xy + 1][1] = 0;
- s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
- s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
+ s->current_picture.motion_val[0][xy + 1][0] = 0;
+ s->current_picture.motion_val[0][xy + 1][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
- s->current_picture.f.motion_val[1][xy + 1][0] = 0;
- s->current_picture.f.motion_val[1][xy + 1][1] = 0;
- s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
- s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
+ s->current_picture.motion_val[1][xy + 1][0] = 0;
+ s->current_picture.motion_val[1][xy + 1][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
}
return;
}
if (s->mb_x || (n == 1) || (n == 3)) {
if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
|| (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
- A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
- A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
+ A[0] = s->current_picture.motion_val[0][xy - 1][0];
+ A[1] = s->current_picture.motion_val[0][xy - 1][1];
a_valid = 1;
} else { // current block has frame mv and cand. has field MV (so average)
- A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
- + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
- A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
- + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
+ A[0] = (s->current_picture.motion_val[0][xy - 1][0]
+ + s->current_picture.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
+ A[1] = (s->current_picture.motion_val[0][xy - 1][1]
+ + s->current_picture.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
a_valid = 1;
}
if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
n_adj = (n & 2) | (n & 1);
}
- B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
- B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
+ B[0] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
+ B[1] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
- B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
- B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
+ B[0] = (B[0] + s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
+ B[1] = (B[1] + s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
}
}
if (s->mb_width > 1) {
if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
n_adj = n & 2;
}
- C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
- C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
+ C[0] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
+ C[1] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
- C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
- C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
+ C[0] = (1 + C[0] + (s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
+ C[1] = (1 + C[1] + (s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
}
if (s->mb_x == s->mb_width - 1) {
if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
n_adj = n | 1;
}
- C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
- C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
+ C[0] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
+ C[1] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
- C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
- C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
+ C[0] = (1 + C[0] + s->current_picture.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
+ C[1] = (1 + C[1] + s->current_picture.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
}
} else
c_valid = 0;
} else {
pos_b = s->block_index[1];
b_valid = 1;
- B[0] = s->current_picture.f.motion_val[0][pos_b][0];
- B[1] = s->current_picture.f.motion_val[0][pos_b][1];
+ B[0] = s->current_picture.motion_val[0][pos_b][0];
+ B[1] = s->current_picture.motion_val[0][pos_b][1];
pos_c = s->block_index[0];
c_valid = 1;
- C[0] = s->current_picture.f.motion_val[0][pos_c][0];
- C[1] = s->current_picture.f.motion_val[0][pos_c][1];
+ C[0] = s->current_picture.motion_val[0][pos_c][0];
+ C[1] = s->current_picture.motion_val[0][pos_c][1];
}
total_valid = a_valid + b_valid + c_valid;
}
/* store MV using signed modulus of MV range defined in 4.11 */
- s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
- s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
+ s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
if (mvn == 1) { /* duplicate motion data for 1-MV block */
- s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
- s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
- s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
- s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
- s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
- s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + 1 ][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + 1 ][1] = s->current_picture.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + wrap ][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + wrap ][1] = s->current_picture.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
} else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
- s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
- s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
s->mv[0][n + 1][0] = s->mv[0][n][0];
s->mv[0][n + 1][1] = s->mv[0][n][1];
}
xy = s->block_index[0];
if (s->mb_intra) {
- s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
- s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
- s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
- s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[0][xy + v->blocks_off][0] =
+ s->current_picture.motion_val[0][xy + v->blocks_off][1] =
+ s->current_picture.motion_val[1][xy + v->blocks_off][0] =
+ s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
return;
}
if (!v->field_mode) {
- s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
- s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
- s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
- s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
+ s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
+ s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
+ s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
+ s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
/* Pullback predicted motion vectors as specified in 8.4.5.4 */
s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
}
if (direct) {
- s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
- s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
- s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
- s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
+ s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
return;
}
if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
- C = s->current_picture.f.motion_val[0][xy - 2];
- A = s->current_picture.f.motion_val[0][xy - wrap * 2];
+ C = s->current_picture.motion_val[0][xy - 2];
+ A = s->current_picture.motion_val[0][xy - wrap * 2];
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
- B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
+ B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
if (!s->mb_x) C[0] = C[1] = 0;
if (!s->first_slice_line) { // predictor A is not out of bounds
s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
}
if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
- C = s->current_picture.f.motion_val[1][xy - 2];
- A = s->current_picture.f.motion_val[1][xy - wrap * 2];
+ C = s->current_picture.motion_val[1][xy - 2];
+ A = s->current_picture.motion_val[1][xy - wrap * 2];
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
- B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
+ B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
if (!s->mb_x)
C[0] = C[1] = 0;
s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
}
- s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
- s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
- s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
- s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
+ s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
}
static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
if (v->bmvtype == BMV_TYPE_DIRECT) {
int total_opp, k, f;
- if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
- s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
+ if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
+ s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
v->bfraction, 0, s->quarter_sample);
- s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
+ s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
v->bfraction, 0, s->quarter_sample);
- s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
+ s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
v->bfraction, 1, s->quarter_sample);
- s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
+ s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
v->bfraction, 1, s->quarter_sample);
total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
}
v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
for (k = 0; k < 4; k++) {
- s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
- s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
- s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
- s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
+ s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
}
b = dc_val[ - 1 - wrap];
a = dc_val[ - wrap];
/* scale predictors if needed */
- q1 = s->current_picture.f.qscale_table[mb_pos];
+ q1 = s->current_picture.qscale_table[mb_pos];
dqscale_index = s->y_dc_scale_table[q1] - 1;
if (dqscale_index < 0)
return 0;
if (c_avail && (n != 1 && n != 3)) {
- q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
if (q2 && q2 != q1)
c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
if (a_avail && (n != 2 && n != 3)) {
- q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
if (q2 && q2 != q1)
a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
off--;
if (n != 2)
off -= s->mb_stride;
- q2 = s->current_picture.f.qscale_table[off];
+ q2 = s->current_picture.qscale_table[off];
if (q2 && q2 != q1)
b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
else // top
ac_val -= 16 * s->block_wrap[n];
- q1 = s->current_picture.f.qscale_table[mb_pos];
+ q1 = s->current_picture.qscale_table[mb_pos];
if ( dc_pred_dir && c_avail && mb_pos)
- q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
- q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
if ( dc_pred_dir && n == 1)
q2 = q1;
if (!dc_pred_dir && n == 2)
else //top
ac_val -= 16 * s->block_wrap[n];
- q1 = s->current_picture.f.qscale_table[mb_pos];
+ q1 = s->current_picture.qscale_table[mb_pos];
if (dc_pred_dir && c_avail && mb_pos)
- q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
- q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
if ( dc_pred_dir && n == 1)
q2 = q1;
if (!dc_pred_dir && n == 2)
bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
: (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
mv_stride = s->b8_stride;
- mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
+ mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
}
if (bottom_is_intra & 1 || block_is_intra & 1 ||
: (mb_cbp >> ((block_num + 1) * 4));
right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
: (mb_is_intra >> ((block_num + 1) * 4));
- mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
+ mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
}
if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
GET_MVDATA(dmv_x, dmv_y);
if (s->mb_intra) {
- s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
}
- s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
+ s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
/* FIXME Set DC val for inter block ? */
mquant = v->pq;
cbp = 0;
}
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
- s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
+ s->current_picture.qscale_table[mb_pos] = 0;
vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
vc1_mc_1mv(v, 0);
}
if (!intra_count && !coded_inter)
goto end;
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* test if block is intra and has pred */
{
int intrapred = 0;
}
} else { // skipped MB
s->mb_intra = 0;
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
for (i = 0; i < 6; i++) {
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
vc1_mc_4mv_luma(v, i, 0);
}
vc1_mc_4mv_chroma(v, 0);
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
}
}
end:
break;
}
if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
- s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
- s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
s->mb_intra = v->is_intra[s->mb_x] = 1;
for (i = 0; i < 6; i++)
v->mb_type[0][s->block_index[i]] = 1;
cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same (not sure if necessary here) */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
}
if (cbp)
GET_MQUANT(); // p. 227
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && cbp)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
for (i = 0; i < 6; i++) {
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
- s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
+ s->current_picture.qscale_table[mb_pos] = 0;
v->blk_mv_type[s->block_index[0]] = 0;
v->blk_mv_type[s->block_index[1]] = 0;
v->blk_mv_type[s->block_index[2]] = 0;
idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
if (idx_mbmode <= 1) { // intra MB
s->mb_intra = v->is_intra[s->mb_x] = 1;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same (not sure if necessary here) */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
}
} else {
s->mb_intra = v->is_intra[s->mb_x] = 0;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
if (idx_mbmode <= 5) { // 1-MV
dmv_x = dmv_y = pred_flag = 0;
if (cbp) {
GET_MQUANT();
}
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && cbp) {
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
}
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
if (!direct) {
if (!skipped) {
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
s->mb_intra = 0;
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
}
if (s->mb_intra && !mb_has_coeffs) {
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
s->ac_pred = get_bits1(gb);
cbp = 0;
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
s->ac_pred = get_bits1(gb);
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
}
idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
if (idx_mbmode <= 1) { // intra MB
s->mb_intra = v->is_intra[s->mb_x] = 1;
- s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same (not sure if necessary here) */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
}
} else {
s->mb_intra = v->is_intra[s->mb_x] = 0;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
if (v->fmb_is_raw)
fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
if (cbp) {
GET_MQUANT();
}
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && cbp) {
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
}
dst[5] = s->dest[2];
s->dsp.clear_blocks(s->block[0]);
mb_pos = s->mb_x + s->mb_y * s->mb_width;
- s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
- s->current_picture.f.qscale_table[mb_pos] = v->pq;
- s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
+ s->current_picture.qscale_table[mb_pos] = v->pq;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
// do actual MB decoding and displaying
cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
ff_update_block_index(s);
s->dsp.clear_blocks(block[0]);
mb_pos = s->mb_x + s->mb_y * s->mb_stride;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
// do actual MB decoding and displaying
if (v->fieldtx_is_raw)
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
v->two_sprites = 0;
}
- if (v->sprite_output_frame.data[0])
- avctx->release_buffer(avctx, &v->sprite_output_frame);
-
- v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- v->sprite_output_frame.reference = 0;
- if (ff_get_buffer(avctx, &v->sprite_output_frame) < 0) {
+ av_frame_unref(&v->sprite_output_frame);
+ if (ff_get_buffer(avctx, &v->sprite_output_frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
VC1Context *v = avctx->priv_data;
int i;
- if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
- && v->sprite_output_frame.data[0])
- avctx->release_buffer(avctx, &v->sprite_output_frame);
+ av_frame_unref(&v->sprite_output_frame);
+
for (i = 0; i < 4; i++)
av_freep(&v->sr_rows[i >> 1][i & 1]);
av_freep(&v->hrd_rate);
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size, n_slices = 0, i;
+ int buf_size = avpkt->size, n_slices = 0, i, ret;
VC1Context *v = avctx->priv_data;
MpegEncContext *s = &v->s;
AVFrame *pict = data;
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
/* special case for last picture */
if (s->low_delay == 0 && s->next_picture_ptr) {
- *pict = s->next_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
+ return ret;
s->next_picture_ptr = NULL;
*got_frame = 1;
if (vc1_decode_sprites(v, &s->gb))
goto err;
#endif
- *pict = v->sprite_output_frame;
+ if ((ret = av_frame_ref(pict, &v->sprite_output_frame)) < 0)
+ goto err;
*got_frame = 1;
} else {
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = s->current_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ goto err;
+ ff_print_debug_info(s, s->current_picture_ptr);
} else if (s->last_picture_ptr != NULL) {
- *pict = s->last_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ goto err;
+ ff_print_debug_info(s, s->last_picture_ptr);
}
if (s->last_picture_ptr || s->low_delay) {
*got_frame = 1;
- ff_print_debug_info(s, pict);
}
}
#include "libavutil/internal.h"
typedef struct VCR1Context {
- AVFrame picture;
int delta[16];
int offset[4];
} VCR1Context;
-static av_cold int vcr1_common_init(AVCodecContext *avctx)
-{
- VCR1Context *const a = avctx->priv_data;
-
- avctx->coded_frame = &a->picture;
-
- return 0;
-}
-
static av_cold int vcr1_decode_init(AVCodecContext *avctx)
{
- vcr1_common_init(avctx);
-
avctx->pix_fmt = AV_PIX_FMT_YUV410P;
return 0;
}
-static av_cold int vcr1_decode_end(AVCodecContext *avctx)
-{
- VCR1Context *s = avctx->priv_data;
-
- if (s->picture.data[0])
- avctx->release_buffer(avctx, &s->picture);
-
- return 0;
-}
-
static int vcr1_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
VCR1Context *const a = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame *const p = &a->picture;
+ AVFrame *const p = data;
const uint8_t *bytestream = buf;
int i, x, y, ret;
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
for (y = 0; y < avctx->height; y++) {
int offset;
- uint8_t *luma = &a->picture.data[0][y * a->picture.linesize[0]];
+ uint8_t *luma = &p->data[0][y * p->linesize[0]];
if ((y & 3) == 0) {
- uint8_t *cb = &a->picture.data[1][(y >> 2) * a->picture.linesize[1]];
- uint8_t *cr = &a->picture.data[2][(y >> 2) * a->picture.linesize[2]];
+ uint8_t *cb = &p->data[1][(y >> 2) * p->linesize[1]];
+ uint8_t *cr = &p->data[2][(y >> 2) * p->linesize[2]];
for (i = 0; i < 4; i++)
a->offset[i] = *bytestream++;
}
}
- *picture = a->picture;
*got_frame = 1;
return buf_size;
.id = AV_CODEC_ID_VCR1,
.priv_data_size = sizeof(VCR1Context),
.init = vcr1_decode_init,
- .close = vcr1_decode_end,
.decode = vcr1_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATI VCR1"),
for (i = 0; i < ls; ++i) {
pic = lp[i];
- if (!pic || !pic->f.reference)
+ if (!pic || !pic->reference)
continue;
pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
++rf2;
}
if (rf2 != rf) {
- rf2->top_is_reference |= (pic->f.reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
- rf2->bottom_is_reference |= (pic->f.reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
continue;
}
rf->surface = render_ref->surface;
rf->is_long_term = pic->long_ref;
- rf->top_is_reference = (pic->f.reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
- rf->bottom_is_reference = (pic->f.reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
rf->field_order_cnt[0] = pic->field_poc[0];
rf->field_order_cnt[1] = pic->field_poc[1];
rf->frame_idx = pic_frame_idx;
if (render->info.h264.slice_count < 1)
return;
- render->info.h264.is_reference = (h->cur_pic_ptr->f.reference & 3) ? VDP_TRUE : VDP_FALSE;
+ render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME;
render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD;
render->info.h264.num_ref_frames = h->sps.ref_frame_count;
VdpVideoSurface surface = ff_vdpau_get_surface_id(pic);
if (pic_structure == 0)
- pic_structure = pic->f.reference;
+ pic_structure = pic->reference;
rf->surface = surface;
- rf->is_long_term = pic->f.reference && pic->long_ref;
+ rf->is_long_term = pic->reference && pic->long_ref;
rf->top_is_reference = (pic_structure & PICT_TOP_FIELD) != 0;
rf->bottom_is_reference = (pic_structure & PICT_BOTTOM_FIELD) != 0;
rf->field_order_cnt[0] = h264_foc(pic->field_poc[0]);
VdpVideoSurface surface_ref;
int pic_frame_idx;
- if (!pic || !pic->f.reference)
+ if (!pic || !pic->reference)
continue;
pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
surface_ref = ff_vdpau_get_surface_id(pic);
++rf2;
}
if (rf2 != rf) {
- rf2->top_is_reference |= (pic->f.reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
- rf2->bottom_is_reference |= (pic->f.reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
continue;
}
if (rf >= &info->referenceFrames[H264_RF_COUNT])
continue;
- vdpau_h264_set_rf(rf, pic, pic->f.reference);
+ vdpau_h264_set_rf(rf, pic, pic->reference);
++rf;
}
}
#ifndef FF_API_DESTRUCT_PACKET
#define FF_API_DESTRUCT_PACKET (LIBAVCODEC_VERSION_MAJOR < 56)
#endif
+#ifndef FF_API_GET_BUFFER
+#define FF_API_GET_BUFFER (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
#endif /* AVCODEC_VERSION_H */
typedef struct VmdVideoContext {
AVCodecContext *avctx;
- AVFrame frame;
AVFrame prev_frame;
const unsigned char *buf;
return ps - src;
}
-static void vmd_decode(VmdVideoContext *s)
+static void vmd_decode(VmdVideoContext *s, AVFrame *frame)
{
int i;
unsigned int *palette32;
(frame_x || frame_y || (frame_width != s->avctx->width) ||
(frame_height != s->avctx->height))) {
- memcpy(s->frame.data[0], s->prev_frame.data[0],
- s->avctx->height * s->frame.linesize[0]);
+ memcpy(frame->data[0], s->prev_frame.data[0],
+ s->avctx->height * frame->linesize[0]);
}
/* check if there is a new palette */
pb_size = s->unpack_buffer_size;
}
- dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
+ dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x];
pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
switch (meth) {
case 1:
ofs, frame_width);
break;
}
- dp += s->frame.linesize[0];
+ dp += frame->linesize[0];
pp += s->prev_frame.linesize[0];
}
break;
memcpy(dp, pb, frame_width);
pb += frame_width;
pb_size -= frame_width;
- dp += s->frame.linesize[0];
+ dp += frame->linesize[0];
pp += s->prev_frame.linesize[0];
}
break;
av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
ofs, frame_width);
}
- dp += s->frame.linesize[0];
+ dp += frame->linesize[0];
pp += s->prev_frame.linesize[0];
}
break;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
VmdVideoContext *s = avctx->priv_data;
+ AVFrame *frame = data;
+ int ret;
s->buf = buf;
s->size = buf_size;
if (buf_size < 16)
return buf_size;
- s->frame.reference = 1;
- if (ff_get_buffer(avctx, &s->frame)) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "VMD Video: get_buffer() failed\n");
- return -1;
+ return ret;
}
- vmd_decode(s);
+ vmd_decode(s, frame);
/* make the palette available on the way out */
- memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
+ memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4);
/* shuffle frames */
- FFSWAP(AVFrame, s->frame, s->prev_frame);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
+ av_frame_unref(&s->prev_frame);
+ if ((ret = av_frame_ref(&s->prev_frame, frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = s->prev_frame;
/* report that the buffer was completely consumed */
return buf_size;
{
VmdVideoContext *s = avctx->priv_data;
- if (s->prev_frame.data[0])
- avctx->release_buffer(avctx, &s->prev_frame);
+ av_frame_unref(&s->prev_frame);
av_free(s->unpack_buffer);
return 0;
/* get output buffer */
frame->nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) /
avctx->channels;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
+#include "internal.h"
enum EncTypes {
MAGIC_WMVd = 0x574D5664,
VmncContext * const c = avctx->priv_data;
uint8_t *outptr;
const uint8_t *src = buf;
- int dx, dy, w, h, depth, enc, chunks, res, size_left;
+ int dx, dy, w, h, depth, enc, chunks, res, size_left, ret;
- c->pic.reference = 1;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
- if(avctx->reget_buffer(avctx, &c->pic) < 0){
+ if ((ret = ff_reget_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
- return -1;
+ return ret;
}
c->pic.key_frame = 0;
}
}
*got_frame = 1;
- *(AVFrame*)data = c->pic;
+ if ((ret = av_frame_ref(data, &c->pic)) < 0)
+ return ret;
/* always report that the buffer was completely consumed */
return buf_size;
{
VmncContext * const c = avctx->priv_data;
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
+ av_frame_unref(&c->pic);
av_free(c->curbits);
av_free(c->curmask);
/* get output buffer */
frame->nb_samples = vc->blocksize[1] / 2;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
int version;
int width, height;
int chroma_x_shift, chroma_y_shift;
- AVFrame golden_frame;
- AVFrame last_frame;
- AVFrame current_frame;
+ ThreadFrame golden_frame;
+ ThreadFrame last_frame;
+ ThreadFrame current_frame;
int keyframe;
DSPContext dsp;
VideoDSPContext vdsp;
{
Vp3DecodeContext *s = avctx->priv_data;
- if (s->golden_frame.data[0]) {
- if (s->golden_frame.data[0] == s->last_frame.data[0])
- memset(&s->last_frame, 0, sizeof(AVFrame));
- if (s->current_frame.data[0] == s->golden_frame.data[0])
- memset(&s->current_frame, 0, sizeof(AVFrame));
+ if (s->golden_frame.f)
ff_thread_release_buffer(avctx, &s->golden_frame);
- }
- if (s->last_frame.data[0]) {
- if (s->current_frame.data[0] == s->last_frame.data[0])
- memset(&s->current_frame, 0, sizeof(AVFrame));
+ if (s->last_frame.f)
ff_thread_release_buffer(avctx, &s->last_frame);
- }
- if (s->current_frame.data[0])
+ if (s->current_frame.f)
ff_thread_release_buffer(avctx, &s->current_frame);
}
av_freep(&s->motion_val[1]);
av_freep(&s->edge_emu_buffer);
+ /* release all frames */
+ vp3_decode_flush(avctx);
+ av_frame_free(&s->current_frame.f);
+ av_frame_free(&s->last_frame.f);
+ av_frame_free(&s->golden_frame.f);
+
if (avctx->internal->is_copy)
return 0;
ff_free_vlc(&s->mode_code_vlc);
ff_free_vlc(&s->motion_vector_vlc);
- /* release all frames */
- vp3_decode_flush(avctx);
return 0;
}
int width = s->fragment_width[!!plane];
int height = s->fragment_height[!!plane];
int fragment = s->fragment_start [plane] + ystart * width;
- int stride = s->current_frame.linesize[plane];
- uint8_t *plane_data = s->current_frame.data [plane];
+ int stride = s->current_frame.f->linesize[plane];
+ uint8_t *plane_data = s->current_frame.f->data [plane];
if (!s->flipped_image) stride = -stride;
plane_data += s->data_offset[plane] + 8*ystart*stride;
}
cy = y >> s->chroma_y_shift;
- offset[0] = s->current_frame.linesize[0]*y;
- offset[1] = s->current_frame.linesize[1]*cy;
- offset[2] = s->current_frame.linesize[2]*cy;
+ offset[0] = s->current_frame.f->linesize[0]*y;
+ offset[1] = s->current_frame.f->linesize[1]*cy;
+ offset[2] = s->current_frame.f->linesize[2]*cy;
for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
offset[i] = 0;
emms_c();
- s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h);
+ s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
}
/**
*/
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
{
- AVFrame *ref_frame;
+ ThreadFrame *ref_frame;
int ref_row;
int border = motion_y&1;
return;
for (plane = 0; plane < 3; plane++) {
- uint8_t *output_plane = s->current_frame.data [plane] + s->data_offset[plane];
- uint8_t * last_plane = s-> last_frame.data [plane] + s->data_offset[plane];
- uint8_t *golden_plane = s-> golden_frame.data [plane] + s->data_offset[plane];
- int stride = s->current_frame.linesize[plane];
+ uint8_t *output_plane = s->current_frame.f->data [plane] + s->data_offset[plane];
+ uint8_t * last_plane = s-> last_frame.f->data [plane] + s->data_offset[plane];
+ uint8_t *golden_plane = s-> golden_frame.f->data [plane] + s->data_offset[plane];
+ int stride = s->current_frame.f->linesize[plane];
int plane_width = s->width >> (plane && s->chroma_x_shift);
int plane_height = s->height >> (plane && s->chroma_y_shift);
int8_t (*motion_val)[2] = s->motion_val[!!plane];
return 0;
}
+static av_cold int init_frames(Vp3DecodeContext *s)
+{
+ s->current_frame.f = av_frame_alloc();
+ s->last_frame.f = av_frame_alloc();
+ s->golden_frame.f = av_frame_alloc();
+
+ if (!s->current_frame.f || !s->last_frame.f || !s->golden_frame.f) {
+ av_frame_free(&s->current_frame.f);
+ av_frame_free(&s->last_frame.f);
+ av_frame_free(&s->golden_frame.f);
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
static av_cold int vp3_decode_init(AVCodecContext *avctx)
{
Vp3DecodeContext *s = avctx->priv_data;
- int i, inter, plane;
+ int i, inter, plane, ret;
int c_width;
int c_height;
int y_fragment_count, c_fragment_count;
+ ret = init_frames(s);
+ if (ret < 0)
+ return ret;
+
+ avctx->internal->allocate_progress = 1;
+
if (avctx->codec_tag == MKTAG('V','P','3','0'))
s->version = 0;
else
&motion_vector_vlc_table[0][1], 2, 1,
&motion_vector_vlc_table[0][0], 2, 1, 0);
- for (i = 0; i < 3; i++) {
- s->current_frame.data[i] = NULL;
- s->last_frame.data[i] = NULL;
- s->golden_frame.data[i] = NULL;
- }
-
return allocate_tables(avctx);
vlc_fail:
}
/// Release and shuffle frames after decode finishes
-static void update_frames(AVCodecContext *avctx)
+static int update_frames(AVCodecContext *avctx)
{
Vp3DecodeContext *s = avctx->priv_data;
+ int ret = 0;
- /* release the last frame, if it is allocated and if it is not the
- * golden frame */
- if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY)
- ff_thread_release_buffer(avctx, &s->last_frame);
/* shuffle frames (last = current) */
- s->last_frame= s->current_frame;
+ ff_thread_release_buffer(avctx, &s->last_frame);
+ ret = ff_thread_ref_frame(&s->last_frame, &s->current_frame);
+ if (ret < 0)
+ goto fail;
if (s->keyframe) {
- if (s->golden_frame.data[0])
- ff_thread_release_buffer(avctx, &s->golden_frame);
- s->golden_frame = s->current_frame;
- s->last_frame.type = FF_BUFFER_TYPE_COPY;
+ ff_thread_release_buffer(avctx, &s->golden_frame);
+ ret = ff_thread_ref_frame(&s->golden_frame, &s->current_frame);
}
- s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
+fail:
+ ff_thread_release_buffer(avctx, &s->current_frame);
+ return ret;
+}
+
+static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
+{
+ ff_thread_release_buffer(s->avctx, dst);
+ if (src->f->data[0])
+ return ff_thread_ref_frame(dst, src);
+ return 0;
+}
+
+static int ref_frames(Vp3DecodeContext *dst, Vp3DecodeContext *src)
+{
+ int ret;
+ if ((ret = ref_frame(dst, &dst->current_frame, &src->current_frame)) < 0 ||
+ (ret = ref_frame(dst, &dst->golden_frame, &src->golden_frame)) < 0 ||
+ (ret = ref_frame(dst, &dst->last_frame, &src->last_frame)) < 0)
+ return ret;
+ return 0;
}
static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
- if (!s1->current_frame.data[0]
+ if (!s1->current_frame.f->data[0]
||s->width != s1->width
||s->height!= s1->height) {
if (s != s1)
- copy_fields(s, s1, golden_frame, current_frame);
+ ref_frames(s, s1);
return -1;
}
if (s != s1) {
// init tables if the first frame hasn't been decoded
- if (!s->current_frame.data[0]) {
+ if (!s->current_frame.f->data[0]) {
int y_fragment_count, c_fragment_count;
s->avctx = dst;
err = allocate_tables(dst);
}
// copy previous frame data
- copy_fields(s, s1, golden_frame, dsp);
+ if ((err = ref_frames(s, s1)) < 0)
+ return err;
+
+ s->keyframe = s1->keyframe;
// copy qscale data if necessary
for (i = 0; i < 3; i++) {
#undef copy_fields
}
- update_frames(dst);
-
- return 0;
+ return update_frames(dst);
}
static int vp3_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size;
Vp3DecodeContext *s = avctx->priv_data;
GetBitContext gb;
- int i;
+ int i, ret;
init_get_bits(&gb, buf, buf_size * 8);
if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
return buf_size;
- s->current_frame.reference = 3;
- s->current_frame.pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
- if (ff_thread_get_buffer(avctx, &s->current_frame) < 0) {
+ s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+ if (ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
goto error;
}
if (!s->edge_emu_buffer)
- s->edge_emu_buffer = av_malloc(9*FFABS(s->current_frame.linesize[0]));
+ s->edge_emu_buffer = av_malloc(9*FFABS(s->current_frame.f->linesize[0]));
if (s->keyframe) {
if (!s->theora)
skip_bits(&gb, 2); /* reserved? */
}
} else {
- if (!s->golden_frame.data[0]) {
+ if (!s->golden_frame.f->data[0]) {
av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n");
- s->golden_frame.reference = 3;
- s->golden_frame.pict_type = AV_PICTURE_TYPE_I;
- if (ff_thread_get_buffer(avctx, &s->golden_frame) < 0) {
+ s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
+ if (ff_thread_get_buffer(avctx, &s->golden_frame, AV_GET_BUFFER_FLAG_REF) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
goto error;
}
- s->last_frame = s->golden_frame;
- s->last_frame.type = FF_BUFFER_TYPE_COPY;
+ ff_thread_release_buffer(avctx, &s->last_frame);
+ if ((ret = ff_thread_ref_frame(&s->last_frame, &s->golden_frame)) < 0)
+ goto error;
ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
}
}
if (s->flipped_image)
s->data_offset[i] = 0;
else
- s->data_offset[i] = (height-1) * s->current_frame.linesize[i];
+ s->data_offset[i] = (height-1) * s->current_frame.f->linesize[i];
}
s->last_slice_end = 0;
}
vp3_draw_horiz_band(s, s->avctx->height);
+ if ((ret = av_frame_ref(data, s->current_frame.f)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data= s->current_frame;
- if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME))
- update_frames(avctx);
+ if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) {
+ ret = update_frames(avctx);
+ if (ret < 0)
+ return ret;
+ }
return buf_size;
ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME))
- avctx->release_buffer(avctx, &s->current_frame);
+ av_frame_unref(s->current_frame.f);
return -1;
}
s->motion_val[1] = NULL;
s->edge_emu_buffer = NULL;
- return 0;
+ return init_frames(s);
}
#if CONFIG_THEORA_DECODER
int rows, cols;
ff_vp56_init_range_decoder(&s->c, buf, buf_size);
- s->framep[VP56_FRAME_CURRENT]->key_frame = !vp56_rac_get(c);
+ s->frames[VP56_FRAME_CURRENT]->key_frame = !vp56_rac_get(c);
vp56_rac_get(c);
ff_vp56_init_dequant(s, vp56_rac_gets(c, 6));
- if (s->framep[VP56_FRAME_CURRENT]->key_frame)
+ if (s->frames[VP56_FRAME_CURRENT]->key_frame)
{
vp56_rac_gets(c, 8);
if(vp56_rac_gets(c, 5) > 5)
if (vp56_rac_get_prob(c, vp5_dccv_pct[pt][node])) {
def_prob[node] = vp56_rac_gets_nn(c, 7);
model->coeff_dccv[pt][node] = def_prob[node];
- } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ } else if (s->frames[VP56_FRAME_CURRENT]->key_frame) {
model->coeff_dccv[pt][node] = def_prob[node];
}
if (vp56_rac_get_prob(c, vp5_ract_pct[ct][pt][cg][node])) {
def_prob[node] = vp56_rac_gets_nn(c, 7);
model->coeff_ract[pt][ct][cg][node] = def_prob[node];
- } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ } else if (s->frames[VP56_FRAME_CURRENT]->key_frame) {
model->coeff_ract[pt][ct][cg][node] = def_prob[node];
}
static av_cold int vp5_decode_init(AVCodecContext *avctx)
{
VP56Context *s = avctx->priv_data;
+ int ret;
- ff_vp56_init(avctx, 1, 0);
+ if ((ret = ff_vp56_init(avctx, 1, 0)) < 0)
+ return ret;
s->vp56_coord_div = vp5_coord_div;
s->parse_vector_adjustment = vp5_parse_vector_adjustment;
s->parse_coeff = vp5_parse_coeff;
s->quantizer = quantizer;
s->dequant_dc = vp56_dc_dequant[quantizer] << 2;
s->dequant_ac = vp56_ac_dequant[quantizer] << 2;
- memset(s->qscale_table, quantizer, s->mb_width);
}
static int vp56_get_vectors_predictors(VP56Context *s, int row, int col,
static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
int stride, int x, int y)
{
- uint8_t *dst=s->framep[VP56_FRAME_CURRENT]->data[plane]+s->block_offset[b];
+ uint8_t *dst = s->frames[VP56_FRAME_CURRENT]->data[plane] + s->block_offset[b];
uint8_t *src_block;
int src_offset;
int overlap_offset = 0;
if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY
- && !s->framep[VP56_FRAME_CURRENT]->key_frame))
+ && !s->frames[VP56_FRAME_CURRENT]->key_frame))
deblock_filtering = 0;
dx = s->mv[b].x / s->vp56_coord_div[b];
VP56Frame ref_frame;
int b, ab, b_max, plane, off;
- if (s->framep[VP56_FRAME_CURRENT]->key_frame)
+ if (s->frames[VP56_FRAME_CURRENT]->key_frame)
mb_type = VP56_MB_INTRA;
else
mb_type = vp56_decode_mv(s, row, col);
vp56_add_predictors_dc(s, ref_frame);
- frame_current = s->framep[VP56_FRAME_CURRENT];
- frame_ref = s->framep[ref_frame];
+ frame_current = s->frames[VP56_FRAME_CURRENT];
+ frame_ref = s->frames[ref_frame];
if (mb_type != VP56_MB_INTRA && !frame_ref->data[0])
return;
static int vp56_size_changed(AVCodecContext *avctx)
{
VP56Context *s = avctx->priv_data;
- int stride = s->framep[VP56_FRAME_CURRENT]->linesize[0];
+ int stride = s->frames[VP56_FRAME_CURRENT]->linesize[0];
int i;
s->plane_width[0] = s->plane_width[3] = avctx->coded_width;
s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;
for (i=0; i<4; i++)
- s->stride[i] = s->flip * s->framep[VP56_FRAME_CURRENT]->linesize[i];
+ s->stride[i] = s->flip * s->frames[VP56_FRAME_CURRENT]->linesize[i];
s->mb_width = (avctx->coded_width +15) / 16;
s->mb_height = (avctx->coded_height+15) / 16;
return -1;
}
- s->qscale_table = av_realloc(s->qscale_table, s->mb_width);
s->above_blocks = av_realloc(s->above_blocks,
(4*s->mb_width+6) * sizeof(*s->above_blocks));
s->macroblocks = av_realloc(s->macroblocks,
{
const uint8_t *buf = avpkt->data;
VP56Context *s = avctx->priv_data;
- AVFrame *const p = s->framep[VP56_FRAME_CURRENT];
+ AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
int remaining_buf_size = avpkt->size;
int is_alpha, av_uninit(alpha_offset);
+ int res;
if (s->has_alpha) {
if (remaining_buf_size < 3)
int mb_row, mb_col, mb_row_flip, mb_offset = 0;
int block, y, uv, stride_y, stride_uv;
int golden_frame = 0;
- int res;
s->modelp = &s->models[is_alpha];
res = s->parse_header(s, buf, remaining_buf_size, &golden_frame);
if (res < 0) {
int i;
- for (i = 0; i < 4; i++) {
- if (s->frames[i].data[0])
- avctx->release_buffer(avctx, &s->frames[i]);
- }
+ for (i = 0; i < 4; i++)
+ av_frame_unref(s->frames[i]);
return res;
}
if (res == VP56_SIZE_CHANGE) {
int i;
- for (i = 0; i < 4; i++) {
- if (s->frames[i].data[0])
- avctx->release_buffer(avctx, &s->frames[i]);
- }
+ for (i = 0; i < 4; i++)
+ av_frame_unref(s->frames[i]);
if (is_alpha) {
avcodec_set_dimensions(avctx, 0, 0);
return -1;
}
if (!is_alpha) {
- p->reference = 1;
- if (ff_get_buffer(avctx, p) < 0) {
+ if (ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
if (res == VP56_SIZE_CHANGE)
if (vp56_size_changed(avctx)) {
- avctx->release_buffer(avctx, p);
+ av_frame_unref(p);
return -1;
}
}
next:
if (p->key_frame || golden_frame) {
- if (s->framep[VP56_FRAME_GOLDEN]->data[0] &&
- s->framep[VP56_FRAME_GOLDEN] != s->framep[VP56_FRAME_GOLDEN2])
- avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]);
- s->framep[VP56_FRAME_GOLDEN] = p;
+ av_frame_unref(s->frames[VP56_FRAME_GOLDEN]);
+ if ((res = av_frame_ref(s->frames[VP56_FRAME_GOLDEN], p)) < 0)
+ return res;
}
if (s->has_alpha) {
- FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN],
- s->framep[VP56_FRAME_GOLDEN2]);
+ FFSWAP(AVFrame *, s->frames[VP56_FRAME_GOLDEN],
+ s->frames[VP56_FRAME_GOLDEN2]);
buf += alpha_offset;
remaining_buf_size -= alpha_offset;
}
}
- if (s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN] ||
- s->framep[VP56_FRAME_PREVIOUS] == s->framep[VP56_FRAME_GOLDEN2]) {
- if (s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN] &&
- s->framep[VP56_FRAME_UNUSED] != s->framep[VP56_FRAME_GOLDEN2])
- FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS],
- s->framep[VP56_FRAME_UNUSED]);
- else
- FFSWAP(AVFrame *, s->framep[VP56_FRAME_PREVIOUS],
- s->framep[VP56_FRAME_UNUSED2]);
- } else if (s->framep[VP56_FRAME_PREVIOUS]->data[0])
- avctx->release_buffer(avctx, s->framep[VP56_FRAME_PREVIOUS]);
- FFSWAP(AVFrame *, s->framep[VP56_FRAME_CURRENT],
- s->framep[VP56_FRAME_PREVIOUS]);
-
- p->qstride = 0;
- p->qscale_table = s->qscale_table;
- p->qscale_type = FF_QSCALE_TYPE_VP56;
- *(AVFrame*)data = *p;
+ av_frame_unref(s->frames[VP56_FRAME_PREVIOUS]);
+ FFSWAP(AVFrame *, s->frames[VP56_FRAME_CURRENT],
+ s->frames[VP56_FRAME_PREVIOUS]);
+
+ if ((res = av_frame_ref(data, p)) < 0)
+ return res;
*got_frame = 1;
return avpkt->size;
}
-av_cold void ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
+av_cold int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
{
VP56Context *s = avctx->priv_data;
int i;
ff_init_scantable_permutation(s->dsp.idct_permutation, s->vp3dsp.idct_perm);
ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);
- for (i=0; i<4; i++)
- s->framep[i] = &s->frames[i];
- s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN];
- s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2];
+ for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
+ s->frames[i] = av_frame_alloc();
+ if (!s->frames[i]) {
+ ff_vp56_free(avctx);
+ return AVERROR(ENOMEM);
+ }
+ }
s->edge_emu_buffer_alloc = NULL;
s->above_blocks = NULL;
s->frbi = 0;
s->srbi = 2;
}
+
+ return 0;
}
av_cold int ff_vp56_free(AVCodecContext *avctx)
{
VP56Context *s = avctx->priv_data;
+ int i;
- av_freep(&s->qscale_table);
av_freep(&s->above_blocks);
av_freep(&s->macroblocks);
av_freep(&s->edge_emu_buffer_alloc);
- if (s->framep[VP56_FRAME_GOLDEN]->data[0])
- avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN]);
- if (s->framep[VP56_FRAME_GOLDEN2]->data[0])
- avctx->release_buffer(avctx, s->framep[VP56_FRAME_GOLDEN2]);
- if (s->framep[VP56_FRAME_PREVIOUS]->data[0])
- avctx->release_buffer(avctx, s->framep[VP56_FRAME_PREVIOUS]);
+
+ for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
+ av_frame_free(&s->frames[i]);
+
return 0;
}
VP3DSPContext vp3dsp;
VP56DSPContext vp56dsp;
ScanTable scantable;
- AVFrame frames[4];
- AVFrame *framep[6];
+ AVFrame *frames[4];
uint8_t *edge_emu_buffer_alloc;
uint8_t *edge_emu_buffer;
VP56RangeCoder c;
int quantizer;
uint16_t dequant_dc;
uint16_t dequant_ac;
- int8_t *qscale_table;
/* DC predictors management */
VP56RefDc *above_blocks;
};
-void ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha);
+int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha);
int ff_vp56_free(AVCodecContext *avctx);
void ff_vp56_init_dequant(VP56Context *s, int quantizer);
int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
VP56_FRAME_PREVIOUS = 1,
VP56_FRAME_GOLDEN = 2,
VP56_FRAME_GOLDEN2 = 3,
- VP56_FRAME_UNUSED = 4,
- VP56_FRAME_UNUSED2 = 5,
} VP56Frame;
typedef enum {
int res = 0;
int separated_coeff = buf[0] & 1;
- s->framep[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80);
+ s->frames[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80);
ff_vp56_init_dequant(s, (buf[0] >> 1) & 0x3F);
- if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ if (s->frames[VP56_FRAME_CURRENT]->key_frame) {
sub_version = buf[1] >> 3;
if (sub_version > 8)
return AVERROR_INVALIDDATA;
buf += coeff_offset;
buf_size -= coeff_offset;
if (buf_size < 0) {
- if (s->framep[VP56_FRAME_CURRENT]->key_frame)
+ if (s->frames[VP56_FRAME_CURRENT]->key_frame)
avcodec_set_dimensions(s->avctx, 0, 0);
return AVERROR_INVALIDDATA;
}
if (vp56_rac_get_prob(c, vp6_dccv_pct[pt][node])) {
def_prob[node] = vp56_rac_gets_nn(c, 7);
model->coeff_dccv[pt][node] = def_prob[node];
- } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ } else if (s->frames[VP56_FRAME_CURRENT]->key_frame) {
model->coeff_dccv[pt][node] = def_prob[node];
}
if (vp56_rac_get_prob(c, vp6_ract_pct[ct][pt][cg][node])) {
def_prob[node] = vp56_rac_gets_nn(c, 7);
model->coeff_ract[pt][ct][cg][node] = def_prob[node];
- } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
+ } else if (s->frames[VP56_FRAME_CURRENT]->key_frame) {
model->coeff_ract[pt][ct][cg][node] = def_prob[node];
}
static av_cold int vp6_decode_init(AVCodecContext *avctx)
{
VP56Context *s = avctx->priv_data;
+ int ret;
+
+ if ((ret = ff_vp56_init(avctx, avctx->codec->id == AV_CODEC_ID_VP6,
+ avctx->codec->id == AV_CODEC_ID_VP6A)) < 0)
+ return ret;
- ff_vp56_init(avctx, avctx->codec->id == AV_CODEC_ID_VP6,
- avctx->codec->id == AV_CODEC_ID_VP6A);
s->vp56_coord_div = vp6_coord_div;
s->parse_vector_adjustment = vp6_parse_vector_adjustment;
s->filter = vp6_filter;
s->macroblocks = NULL;
}
-static int vp8_alloc_frame(VP8Context *s, AVFrame *f)
+static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
{
int ret;
- if ((ret = ff_thread_get_buffer(s->avctx, f)) < 0)
+ if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
+ ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
return ret;
- if (s->num_maps_to_be_freed && !s->maps_are_invalid) {
- f->ref_index[0] = s->segmentation_maps[--s->num_maps_to_be_freed];
- } else if (!(f->ref_index[0] = av_mallocz(s->mb_width * s->mb_height))) {
- ff_thread_release_buffer(s->avctx, f);
+ if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
+ ff_thread_release_buffer(s->avctx, &f->tf);
return AVERROR(ENOMEM);
}
return 0;
}
-static void vp8_release_frame(VP8Context *s, AVFrame *f, int prefer_delayed_free, int can_direct_free)
+static void vp8_release_frame(VP8Context *s, VP8Frame *f)
{
- if (f->ref_index[0]) {
- if (prefer_delayed_free) {
- /* Upon a size change, we want to free the maps but other threads may still
- * be using them, so queue them. Upon a seek, all threads are inactive so
- * we want to cache one to prevent re-allocation in the next decoding
- * iteration, but the rest we can free directly. */
- int max_queued_maps = can_direct_free ? 1 : FF_ARRAY_ELEMS(s->segmentation_maps);
- if (s->num_maps_to_be_freed < max_queued_maps) {
- s->segmentation_maps[s->num_maps_to_be_freed++] = f->ref_index[0];
- } else if (can_direct_free) /* vp8_decode_flush(), but our queue is full */ {
- av_free(f->ref_index[0]);
- } /* else: MEMLEAK (should never happen, but better that than crash) */
- f->ref_index[0] = NULL;
- } else /* vp8_decode_free() */ {
- av_free(f->ref_index[0]);
- }
+ av_buffer_unref(&f->seg_map);
+ ff_thread_release_buffer(s->avctx, &f->tf);
+}
+
+static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
+{
+ int ret;
+
+ vp8_release_frame(s, dst);
+
+ if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
+ return ret;
+ if (src->seg_map &&
+ !(dst->seg_map = av_buffer_ref(src->seg_map))) {
+ vp8_release_frame(s, dst);
+ return AVERROR(ENOMEM);
}
- ff_thread_release_buffer(s->avctx, f);
+
+ return 0;
}
-static void vp8_decode_flush_impl(AVCodecContext *avctx,
- int prefer_delayed_free, int can_direct_free, int free_mem)
+
+static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
{
VP8Context *s = avctx->priv_data;
int i;
- if (!avctx->internal->is_copy) {
- for (i = 0; i < 5; i++)
- if (s->frames[i].data[0])
- vp8_release_frame(s, &s->frames[i], prefer_delayed_free, can_direct_free);
- }
+ for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
+ vp8_release_frame(s, &s->frames[i]);
memset(s->framep, 0, sizeof(s->framep));
- if (free_mem) {
+ if (free_mem)
free_buffers(s);
- s->maps_are_invalid = 1;
- }
}
static void vp8_decode_flush(AVCodecContext *avctx)
{
- vp8_decode_flush_impl(avctx, 1, 1, 0);
+ vp8_decode_flush_impl(avctx, 0);
}
static int update_dimensions(VP8Context *s, int width, int height)
if (av_image_check_size(width, height, 0, s->avctx))
return AVERROR_INVALIDDATA;
- vp8_decode_flush_impl(s->avctx, 1, 0, 1);
+ vp8_decode_flush_impl(s->avctx, 1);
avcodec_set_dimensions(s->avctx, width, height);
}
*/
static av_always_inline
void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
- AVFrame *ref, const VP56mv *mv,
+ ThreadFrame *ref, const VP56mv *mv,
int x_off, int y_off, int block_w, int block_h,
int width, int height, int linesize,
vp8_mc_func mc_func[3][3])
{
- uint8_t *src = ref->data[0];
+ uint8_t *src = ref->f->data[0];
if (AV_RN32A(mv)) {
*/
static av_always_inline
void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2,
- AVFrame *ref, const VP56mv *mv, int x_off, int y_off,
+ ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off,
int block_w, int block_h, int width, int height, int linesize,
vp8_mc_func mc_func[3][3])
{
- uint8_t *src1 = ref->data[1], *src2 = ref->data[2];
+ uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
if (AV_RN32A(mv)) {
int mx = mv->x&7, mx_idx = subpel_idx[0][mx];
static av_always_inline
void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
- AVFrame *ref_frame, int x_off, int y_off,
+ ThreadFrame *ref_frame, int x_off, int y_off,
int bx_off, int by_off,
int block_w, int block_h,
int width, int height, VP56mv *mv)
int x_off = mb_x << 4, y_off = mb_y << 4;
int mx = (mb->mv.x>>2) + x_off + 8;
int my = (mb->mv.y>>2) + y_off;
- uint8_t **src= s->framep[ref]->data;
+ uint8_t **src= s->framep[ref]->tf.f->data;
int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
/* For threading, a ff_thread_await_progress here might be useful, but
* it actually slows down the decoder. Since a bad prefetch doesn't
{
int x_off = mb_x << 4, y_off = mb_y << 4;
int width = 16*s->mb_width, height = 16*s->mb_height;
- AVFrame *ref = s->framep[mb->ref_frame];
+ ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
VP56mv *bmv = mb->bmv;
switch (mb->partitioning) {
}
}
-static void release_queued_segmaps(VP8Context *s, int is_close)
-{
- int leave_behind = is_close ? 0 : !s->maps_are_invalid;
- while (s->num_maps_to_be_freed > leave_behind)
- av_freep(&s->segmentation_maps[--s->num_maps_to_be_freed]);
- s->maps_are_invalid = 0;
-}
-
#define MARGIN (16 << 2)
-static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, AVFrame *curframe,
- AVFrame *prev_frame)
+static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe,
+ VP8Frame *prev_frame)
{
VP8Context *s = avctx->priv_data;
int mb_x, mb_y;
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
if (mb_y == 0)
AV_WN32A((mb-s->mb_width-1)->intra4x4_pred_mode_top, DC_PRED*0x01010101);
- decode_mb_mode(s, mb, mb_x, mb_y, curframe->ref_index[0] + mb_xy,
- prev_frame && prev_frame->ref_index[0] ? prev_frame->ref_index[0] + mb_xy : NULL, 1);
+ decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
+ prev_frame && prev_frame->seg_map ?
+ prev_frame->seg_map->data + mb_xy : NULL, 1);
s->mv_min.x -= 64;
s->mv_max.x -= 64;
}
int mb_y = td->thread_mb_pos>>16;
int i, y, mb_x, mb_xy = mb_y*s->mb_width;
int num_jobs = s->num_jobs;
- AVFrame *curframe = s->curframe, *prev_frame = s->prev_frame;
+ VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)];
VP8Macroblock *mb;
uint8_t *dst[3] = {
- curframe->data[0] + 16*mb_y*s->linesize,
- curframe->data[1] + 8*mb_y*s->uvlinesize,
- curframe->data[2] + 8*mb_y*s->uvlinesize
+ curframe->tf.f->data[0] + 16*mb_y*s->linesize,
+ curframe->tf.f->data[1] + 8*mb_y*s->uvlinesize,
+ curframe->tf.f->data[2] + 8*mb_y*s->uvlinesize
};
if (mb_y == 0) prev_td = td;
else prev_td = &s->thread_data[(jobnr + num_jobs - 1)%num_jobs];
if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
for (i = 0; i < 3; i++)
for (y = 0; y < 16>>!!i; y++)
- dst[i][y*curframe->linesize[i]-1] = 129;
+ dst[i][y*curframe->tf.f->linesize[i]-1] = 129;
if (mb_y == 1) {
s->top_border[0][15] = s->top_border[0][23] = s->top_border[0][31] = 129;
}
s->vdsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
if (!s->mb_layout)
- decode_mb_mode(s, mb, mb_x, mb_y, curframe->ref_index[0] + mb_xy,
- prev_frame && prev_frame->ref_index[0] ? prev_frame->ref_index[0] + mb_xy : NULL, 0);
+ decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
+ prev_frame && prev_frame->seg_map ?
+ prev_frame->seg_map->data + mb_xy : NULL, 0);
prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
VP8Context *s = avctx->priv_data;
VP8ThreadData *td = &s->thread_data[threadnr];
int mb_x, mb_y = td->thread_mb_pos>>16, num_jobs = s->num_jobs;
- AVFrame *curframe = s->curframe;
+ AVFrame *curframe = s->curframe->tf.f;
VP8Macroblock *mb;
VP8ThreadData *prev_td, *next_td;
uint8_t *dst[3] = {
VP8Context *s = avctx->priv_data;
VP8ThreadData *td = &s->thread_data[jobnr];
VP8ThreadData *next_td = NULL, *prev_td = NULL;
- AVFrame *curframe = s->curframe;
+ VP8Frame *curframe = s->curframe;
int mb_y, num_jobs = s->num_jobs;
td->thread_nr = threadnr;
for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
s->mv_max.y -= 64;
if (avctx->active_thread_type == FF_THREAD_FRAME)
- ff_thread_report_progress(curframe, mb_y, 0);
+ ff_thread_report_progress(&curframe->tf, mb_y, 0);
}
return 0;
VP8Context *s = avctx->priv_data;
int ret, i, referenced, num_jobs;
enum AVDiscard skip_thresh;
- AVFrame *av_uninit(curframe), *prev_frame;
-
- release_queued_segmaps(s, 0);
+ VP8Frame *av_uninit(curframe), *prev_frame;
if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
goto err;
// release no longer referenced frames
for (i = 0; i < 5; i++)
- if (s->frames[i].data[0] &&
+ if (s->frames[i].tf.f->data[0] &&
&s->frames[i] != prev_frame &&
&s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
&s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
&s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
- vp8_release_frame(s, &s->frames[i], 1, 0);
+ vp8_release_frame(s, &s->frames[i]);
// find a free buffer
for (i = 0; i < 5; i++)
av_log(avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
abort();
}
- if (curframe->data[0])
- vp8_release_frame(s, curframe, 1, 0);
+ if (curframe->tf.f->data[0])
+ vp8_release_frame(s, curframe);
// Given that arithmetic probabilities are updated every frame, it's quite likely
// that the values we have on a random interframe are complete junk if we didn't
goto err;
}
- curframe->key_frame = s->keyframe;
- curframe->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
- curframe->reference = referenced ? 3 : 0;
- if ((ret = vp8_alloc_frame(s, curframe))) {
+ curframe->tf.f->key_frame = s->keyframe;
+ curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+ if ((ret = vp8_alloc_frame(s, curframe, referenced))) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
goto err;
}
ff_thread_finish_setup(avctx);
- s->linesize = curframe->linesize[0];
- s->uvlinesize = curframe->linesize[1];
+ s->linesize = curframe->tf.f->linesize[0];
+ s->uvlinesize = curframe->tf.f->linesize[1];
if (!s->thread_data[0].edge_emu_buffer)
for (i = 0; i < MAX_THREADS; i++)
// Make sure the previous frame has read its segmentation map,
// if we re-use the same map.
if (prev_frame && s->segmentation.enabled && !s->segmentation.update_map)
- ff_thread_await_progress(prev_frame, 1, 0);
+ ff_thread_await_progress(&prev_frame->tf, 1, 0);
if (s->mb_layout == 1)
vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
}
avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL, num_jobs);
- ff_thread_report_progress(curframe, INT_MAX, 0);
+ ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
skip_decode:
s->prob[0] = s->prob[1];
if (!s->invisible) {
- *(AVFrame*)data = *curframe;
+ if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
+ return ret;
*got_frame = 1;
}
return ret;
}
+static av_cold int vp8_decode_free(AVCodecContext *avctx)
+{
+ VP8Context *s = avctx->priv_data;
+ int i;
+
+ vp8_decode_flush_impl(avctx, 1);
+ for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
+ av_frame_free(&s->frames[i].tf.f);
+
+ return 0;
+}
+
+static av_cold int vp8_init_frames(VP8Context *s)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
+ s->frames[i].tf.f = av_frame_alloc();
+ if (!s->frames[i].tf.f)
+ return AVERROR(ENOMEM);
+ }
+ return 0;
+}
+
static av_cold int vp8_decode_init(AVCodecContext *avctx)
{
VP8Context *s = avctx->priv_data;
+ int ret;
s->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
+ avctx->internal->allocate_progress = 1;
ff_videodsp_init(&s->vdsp, 8);
ff_h264_pred_init(&s->hpc, AV_CODEC_ID_VP8, 8, 1);
ff_vp8dsp_init(&s->vp8dsp);
- return 0;
-}
+ if ((ret = vp8_init_frames(s)) < 0) {
+ vp8_decode_free(avctx);
+ return ret;
+ }
-static av_cold int vp8_decode_free(AVCodecContext *avctx)
-{
- vp8_decode_flush_impl(avctx, 0, 1, 1);
- release_queued_segmaps(avctx->priv_data, 1);
return 0;
}
static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
{
VP8Context *s = avctx->priv_data;
+ int ret;
s->avctx = avctx;
+ if ((ret = vp8_init_frames(s)) < 0) {
+ vp8_decode_free(avctx);
+ return ret;
+ }
+
return 0;
}
static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
{
VP8Context *s = dst->priv_data, *s_src = src->priv_data;
+ int i;
if (s->macroblocks_base &&
(s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
free_buffers(s);
- s->maps_are_invalid = 1;
s->mb_width = s_src->mb_width;
s->mb_height = s_src->mb_height;
}
s->lf_delta = s_src->lf_delta;
memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
- memcpy(&s->frames, &s_src->frames, sizeof(s->frames));
+ for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
+ if (s_src->frames[i].tf.f->data[0]) {
+ int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
s->framep[0] = REBASE(s_src->next_framep[0]);
s->framep[1] = REBASE(s_src->next_framep[1]);
s->framep[2] = REBASE(s_src->next_framep[2]);
#ifndef AVCODEC_VP8_H
#define AVCODEC_VP8_H
+#include "libavutil/buffer.h"
+
#include "vp56.h"
#include "vp56data.h"
#include "vp8dsp.h"
#include "h264pred.h"
+#include "thread.h"
#if HAVE_PTHREADS
#include <pthread.h>
#elif HAVE_W32THREADS
VP8FilterStrength *filter_strength;
} VP8ThreadData;
+typedef struct VP8Frame {
+ ThreadFrame tf;
+ AVBufferRef *seg_map;
+} VP8Frame;
+
#define MAX_THREADS 8
typedef struct VP8Context {
VP8ThreadData *thread_data;
AVCodecContext *avctx;
- AVFrame *framep[4];
- AVFrame *next_framep[4];
- AVFrame *curframe;
- AVFrame *prev_frame;
+ VP8Frame *framep[4];
+ VP8Frame *next_framep[4];
+ VP8Frame *curframe;
+ VP8Frame *prev_frame;
uint16_t mb_width; /* number of horizontal MB */
uint16_t mb_height; /* number of vertical MB */
VP8DSPContext vp8dsp;
H264PredContext hpc;
vp8_mc_func put_pixels_tab[3][3][3];
- AVFrame frames[5];
+ VP8Frame frames[5];
- /**
- * A list of segmentation_map buffers that are to be free()'ed in
- * the next decoding iteration. We can't free() them right away
- * because the map may still be used by subsequent decoding threads.
- * Unused if frame threading is off.
- */
- uint8_t *segmentation_maps[5];
- int num_maps_to_be_freed;
- int maps_are_invalid;
int num_jobs;
/**
* This describes the macroblock memory layout.
typedef struct VqaContext {
AVCodecContext *avctx;
- AVFrame frame;
GetByteContext gb;
uint32_t palette[PALETTE_COUNT];
}
s->next_codebook_buffer_index = 0;
- s->frame.data[0] = NULL;
-
return 0;
fail:
av_freep(&s->codebook);
return 0; // let's display what we decoded anyway
}
-static int vqa_decode_chunk(VqaContext *s)
+static int vqa_decode_chunk(VqaContext *s, AVFrame *frame)
{
unsigned int chunk_type;
unsigned int chunk_size;
index_shift = 3;
for (y = 0; y < s->height; y += s->vector_height) {
for (x = 0; x < s->width; x += 4, lobytes++, hibytes++) {
- pixel_ptr = y * s->frame.linesize[0] + x;
+ pixel_ptr = y * frame->linesize[0] + x;
/* get the vector index, the method for which varies according to
* VQA file version */
/* uniform color fill - a quick hack */
if (hibyte == 0xFF) {
while (lines--) {
- s->frame.data[0][pixel_ptr + 0] = 255 - lobyte;
- s->frame.data[0][pixel_ptr + 1] = 255 - lobyte;
- s->frame.data[0][pixel_ptr + 2] = 255 - lobyte;
- s->frame.data[0][pixel_ptr + 3] = 255 - lobyte;
- pixel_ptr += s->frame.linesize[0];
+ frame->data[0][pixel_ptr + 0] = 255 - lobyte;
+ frame->data[0][pixel_ptr + 1] = 255 - lobyte;
+ frame->data[0][pixel_ptr + 2] = 255 - lobyte;
+ frame->data[0][pixel_ptr + 3] = 255 - lobyte;
+ pixel_ptr += frame->linesize[0];
}
lines=0;
}
}
while (lines--) {
- s->frame.data[0][pixel_ptr + 0] = s->codebook[vector_index++];
- s->frame.data[0][pixel_ptr + 1] = s->codebook[vector_index++];
- s->frame.data[0][pixel_ptr + 2] = s->codebook[vector_index++];
- s->frame.data[0][pixel_ptr + 3] = s->codebook[vector_index++];
- pixel_ptr += s->frame.linesize[0];
+ frame->data[0][pixel_ptr + 0] = s->codebook[vector_index++];
+ frame->data[0][pixel_ptr + 1] = s->codebook[vector_index++];
+ frame->data[0][pixel_ptr + 2] = s->codebook[vector_index++];
+ frame->data[0][pixel_ptr + 3] = s->codebook[vector_index++];
+ pixel_ptr += frame->linesize[0];
}
}
}
AVPacket *avpkt)
{
VqaContext *s = avctx->priv_data;
+ AVFrame *frame = data;
int res;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- if ((res = ff_get_buffer(avctx, &s->frame)) < 0) {
+ if ((res = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, " VQA Video: get_buffer() failed\n");
return res;
}
bytestream2_init(&s->gb, avpkt->data, avpkt->size);
- if ((res = vqa_decode_chunk(s)) < 0)
+ if ((res = vqa_decode_chunk(s, frame)) < 0)
return res;
/* make the palette available on the way out */
- memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
- s->frame.palette_has_changed = 1;
+ memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4);
+ frame->palette_has_changed = 1;
*got_frame = 1;
- *(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return avpkt->size;
av_freep(&s->next_codebook_buffer);
av_freep(&s->decode_buffer);
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
return 0;
}
/* get output buffer */
frame->nb_samples = s->samples;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
/* get output buffer */
frame->nb_samples = nb_frames * s->frame_len;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
int more_frames = 0, len = 0, i, ret;
s->frame.nb_samples = s->samples_per_frame;
- if ((ret = ff_get_buffer(s->avctx, &s->frame)) < 0) {
+ if ((ret = ff_get_buffer(s->avctx, &s->frame, 0)) < 0) {
/* return an error if no frame could be decoded at all */
av_log(s->avctx, AV_LOG_ERROR,
"not enough space for the output samples\n");
/* get output buffer */
frame->nb_samples = s->samples_per_frame;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
s->packet_loss = 1;
return 0;
/* get output buffer */
frame->nb_samples = 480;
- if ((res = ff_get_buffer(ctx, frame)) < 0) {
+ if ((res = ff_get_buffer(ctx, frame, 0)) < 0) {
av_log(ctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res;
}
static void parse_mb_skip(Wmv2Context * w){
int mb_x, mb_y;
MpegEncContext * const s= &w->s;
- uint32_t * const mb_type = s->current_picture_ptr->f.mb_type;
+ uint32_t * const mb_type = s->current_picture_ptr->mb_type;
w->skip_type= get_bits(&s->gb, 2);
switch(w->skip_type){
wrap = s->b8_stride;
xy = s->block_index[0];
- mot_val = s->current_picture.f.motion_val[0][xy];
+ mot_val = s->current_picture.motion_val[0][xy];
- A = s->current_picture.f.motion_val[0][xy - 1];
- B = s->current_picture.f.motion_val[0][xy - wrap];
- C = s->current_picture.f.motion_val[0][xy + 2 - wrap];
+ A = s->current_picture.motion_val[0][xy - 1];
+ B = s->current_picture.motion_val[0][xy - wrap];
+ C = s->current_picture.motion_val[0][xy + 2 - wrap];
if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag)
diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1]));
if(w->j_type) return 0;
if (s->pict_type == AV_PICTURE_TYPE_P) {
- if (IS_SKIP(s->current_picture.f.mb_type[s->mb_y * s->mb_stride + s->mb_x])) {
+ if (IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])) {
/* skip mb */
s->mb_intra = 0;
for(i=0;i<6;i++)
typedef struct WNV1Context {
AVCodecContext *avctx;
- AVFrame pic;
int shift;
GetBitContext gb;
WNV1Context * const l = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- AVFrame * const p = &l->pic;
+ AVFrame * const p = data;
unsigned char *Y,*U,*V;
int i, j, ret;
int prev_y = 0, prev_u = 0, prev_v = 0;
return AVERROR(ENOMEM);
}
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
av_free(rbuf);
return ret;
*got_frame = 1;
- *(AVFrame*)data = l->pic;
av_free(rbuf);
return buf_size;
return 0;
}
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- WNV1Context * const l = avctx->priv_data;
- AVFrame *pic = &l->pic;
-
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
- return 0;
-}
-
AVCodec ff_wnv1_decoder = {
.name = "wnv1",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_WNV1,
.priv_data_size = sizeof(WNV1Context),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Winnov WNV1"),
/* get output buffer */
frame->nb_samples = out_size;
- if ((ret = ff_get_buffer(avctx, frame)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
AVCodecContext *avctx;
AVFrame last_frame;
- AVFrame current_frame;
const unsigned char *buf;
int size;
}
}
-static inline void xan_wc3_output_pixel_run(XanContext *s,
+static inline void xan_wc3_output_pixel_run(XanContext *s, AVFrame *frame,
const unsigned char *pixel_buffer, int x, int y, int pixel_count)
{
int stride;
int width = s->avctx->width;
unsigned char *palette_plane;
- palette_plane = s->current_frame.data[0];
- stride = s->current_frame.linesize[0];
+ palette_plane = frame->data[0];
+ stride = frame->linesize[0];
line_inc = stride - width;
index = y * stride + x;
current_x = x;
}
}
-static inline void xan_wc3_copy_pixel_run(XanContext *s, int x, int y,
+static inline void xan_wc3_copy_pixel_run(XanContext *s, AVFrame *frame,
+ int x, int y,
int pixel_count, int motion_x,
int motion_y)
{
x + motion_x < 0 || x + motion_x >= s->avctx->width)
return;
- palette_plane = s->current_frame.data[0];
+ palette_plane = frame->data[0];
prev_palette_plane = s->last_frame.data[0];
if (!prev_palette_plane)
prev_palette_plane = palette_plane;
- stride = s->current_frame.linesize[0];
+ stride = frame->linesize[0];
line_inc = stride - width;
curframe_index = y * stride + x;
curframe_x = x;
}
}
-static int xan_wc3_decode_frame(XanContext *s) {
+static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame)
+{
int width = s->avctx->width;
int height = s->avctx->height;
flag ^= 1;
if (flag) {
/* run of (size) pixels is unchanged from last frame */
- xan_wc3_copy_pixel_run(s, x, y, size, 0, 0);
+ xan_wc3_copy_pixel_run(s, frame, x, y, size, 0, 0);
} else {
/* output a run of pixels from imagedata_buffer */
if (imagedata_size < size)
break;
- xan_wc3_output_pixel_run(s, imagedata_buffer, x, y, size);
+ xan_wc3_output_pixel_run(s, frame, imagedata_buffer, x, y, size);
imagedata_buffer += size;
imagedata_size -= size;
}
vector_segment++;
/* copy a run of pixels from the previous frame */
- xan_wc3_copy_pixel_run(s, x, y, size, motion_x, motion_y);
+ xan_wc3_copy_pixel_run(s, frame, x, y, size, motion_x, motion_y);
flag = 0;
}
void *data, int *got_frame,
AVPacket *avpkt)
{
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int ret, buf_size = avpkt->size;
XanContext *s = avctx->priv_data;
return AVERROR_INVALIDDATA;
}
- if ((ret = ff_get_buffer(avctx, &s->current_frame))) {
+ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF))) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- s->current_frame.reference = 3;
if (!s->frame_size)
- s->frame_size = s->current_frame.linesize[0] * s->avctx->height;
+ s->frame_size = frame->linesize[0] * s->avctx->height;
- memcpy(s->current_frame.data[1],
+ memcpy(frame->data[1],
s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE);
s->buf = ctx.buffer;
s->size = buf_size;
- if (xan_wc3_decode_frame(s) < 0)
+ if (xan_wc3_decode_frame(s, frame) < 0)
return AVERROR_INVALIDDATA;
- /* release the last frame if it is allocated */
- if (s->last_frame.data[0])
- avctx->release_buffer(avctx, &s->last_frame);
+ av_frame_unref(&s->last_frame);
+ if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame*)data = s->current_frame;
-
- /* shuffle frames */
- FFSWAP(AVFrame, s->current_frame, s->last_frame);
/* always report that the buffer was completely consumed */
return buf_size;
{
XanContext *s = avctx->priv_data;
- /* release the frames */
- if (s->last_frame.data[0])
- avctx->release_buffer(avctx, &s->last_frame);
- if (s->current_frame.data[0])
- avctx->release_buffer(avctx, &s->current_frame);
+ av_frame_unref(&s->last_frame);
av_freep(&s->buffer1);
av_freep(&s->buffer2);
#include "avcodec.h"
#include "internal.h"
-typedef struct VideoXLContext{
- AVCodecContext *avctx;
- AVFrame pic;
-} VideoXLContext;
-
static const int xl_table[32] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 12, 15, 20, 25, 34, 46,
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- VideoXLContext * const a = avctx->priv_data;
- AVFrame * const p = &a->pic;
+ AVFrame * const p = data;
uint8_t *Y, *U, *V;
int i, j, ret;
int stride;
uint32_t val;
int y0, y1, y2, y3 = 0, c0 = 0, c1 = 0;
- if(p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0){
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
- Y = a->pic.data[0];
- U = a->pic.data[1];
- V = a->pic.data[2];
+ Y = p->data[0];
+ U = p->data[1];
+ V = p->data[2];
stride = avctx->width - 4;
}
buf += avctx->width + 4;
- Y += a->pic.linesize[0];
- U += a->pic.linesize[1];
- V += a->pic.linesize[2];
+ Y += p->linesize[0];
+ U += p->linesize[1];
+ V += p->linesize[2];
}
*got_frame = 1;
- *(AVFrame*)data = a->pic;
return buf_size;
}
static av_cold int decode_init(AVCodecContext *avctx){
-// VideoXLContext * const a = avctx->priv_data;
-
avctx->pix_fmt= AV_PIX_FMT_YUV411P;
return 0;
}
-static av_cold int decode_end(AVCodecContext *avctx){
- VideoXLContext * const a = avctx->priv_data;
- AVFrame *pic = &a->pic;
-
- if (pic->data[0])
- avctx->release_buffer(avctx, pic);
-
- return 0;
-}
-
AVCodec ff_xl_decoder = {
.name = "xl",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_VIXL,
- .priv_data_size = sizeof(VideoXLContext),
.init = decode_init,
- .close = decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Miro VideoXL"),
#include "internal.h"
#include "xwd.h"
-static av_cold int xwd_decode_init(AVCodecContext *avctx)
-{
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame)
- return AVERROR(ENOMEM);
-
- return 0;
-}
-
static int xwd_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
- AVFrame *p = avctx->coded_frame;
+ AVFrame *p = data;
const uint8_t *buf = avpkt->data;
int i, ret, buf_size = avpkt->size;
uint32_t version, header_size, vclass, ncolors;
return AVERROR_PATCHWELCOME;
}
- if (p->data[0])
- avctx->release_buffer(avctx, p);
-
- p->reference = 0;
- if ((ret = ff_get_buffer(avctx, p)) < 0) {
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
}
*got_frame = 1;
- *(AVFrame *)data = *p;
return buf_size;
}
-static av_cold int xwd_decode_close(AVCodecContext *avctx)
-{
- if (avctx->coded_frame->data[0])
- avctx->release_buffer(avctx, avctx->coded_frame);
-
- av_freep(&avctx->coded_frame);
-
- return 0;
-}
-
AVCodec ff_xwd_decoder = {
.name = "xwd",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_XWD,
- .init = xwd_decode_init,
- .close = xwd_decode_close,
.decode = xwd_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("XWD (X Window Dump) image"),
#include "bytestream.h"
#define BITSTREAM_READER_LE
#include "get_bits.h"
+#include "internal.h"
typedef struct XanContext {
AVCodecContext *avctx;
int ftype;
int ret;
- s->pic.reference = 1;
- s->pic.buffer_hints = FF_BUFFER_HINTS_VALID |
- FF_BUFFER_HINTS_PRESERVE |
- FF_BUFFER_HINTS_REUSABLE;
- if ((ret = avctx->reget_buffer(avctx, &s->pic))) {
+ if ((ret = ff_reget_buffer(avctx, &s->pic))) {
av_log(s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
if (ret)
return ret;
+ if ((ret = av_frame_ref(data, &s->pic)) < 0)
+ return ret;
+
*got_frame = 1;
- *(AVFrame*)data = s->pic;
return avpkt->size;
}
{
XanContext *s = avctx->priv_data;
- if (s->pic.data[0])
- avctx->release_buffer(avctx, &s->pic);
+ av_frame_unref(&s->pic);
av_freep(&s->y_buffer);
av_freep(&s->scratch_buffer);
#include "internal.h"
typedef struct YopDecContext {
- AVFrame frame;
AVCodecContext *avctx;
int num_pal_colors;
return 0;
}
-static av_cold int yop_decode_close(AVCodecContext *avctx)
-{
- YopDecContext *s = avctx->priv_data;
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
- return 0;
-}
-
/**
* Paint a macroblock using the pattern in paint_lut.
* @param s codec context
* @param tag the tag that was in the nibble
*/
-static int yop_paint_block(YopDecContext *s, int tag)
+static int yop_paint_block(YopDecContext *s, int linesize, int tag)
{
if (s->src_end - s->srcptr < paint_lut[tag][3]) {
av_log(s->avctx, AV_LOG_ERROR, "Packet too small.\n");
return AVERROR_INVALIDDATA;
}
- s->dstptr[0] = s->srcptr[0];
- s->dstptr[1] = s->srcptr[paint_lut[tag][0]];
- s->dstptr[s->frame.linesize[0]] = s->srcptr[paint_lut[tag][1]];
- s->dstptr[s->frame.linesize[0] + 1] = s->srcptr[paint_lut[tag][2]];
+ s->dstptr[0] = s->srcptr[0];
+ s->dstptr[1] = s->srcptr[paint_lut[tag][0]];
+ s->dstptr[linesize] = s->srcptr[paint_lut[tag][1]];
+ s->dstptr[linesize + 1] = s->srcptr[paint_lut[tag][2]];
// The number of src bytes consumed is in the last part of the lut entry.
s->srcptr += paint_lut[tag][3];
* Copy a previously painted macroblock to the current_block.
* @param copy_tag the tag that was in the nibble
*/
-static int yop_copy_previous_block(YopDecContext *s, int copy_tag)
+static int yop_copy_previous_block(YopDecContext *s, int linesize, int copy_tag)
{
uint8_t *bufptr;
// Calculate position for the copy source
bufptr = s->dstptr + motion_vector[copy_tag][0] +
- s->frame.linesize[0] * motion_vector[copy_tag][1];
+ linesize * motion_vector[copy_tag][1];
if (bufptr < s->dstbuf) {
av_log(s->avctx, AV_LOG_ERROR,
"YOP: cannot decode, file probably corrupt\n");
return AVERROR_INVALIDDATA;
}
- s->dstptr[0] = bufptr[0];
- s->dstptr[1] = bufptr[1];
- s->dstptr[s->frame.linesize[0]] = bufptr[s->frame.linesize[0]];
- s->dstptr[s->frame.linesize[0] + 1] = bufptr[s->frame.linesize[0] + 1];
+ s->dstptr[0] = bufptr[0];
+ s->dstptr[1] = bufptr[1];
+ s->dstptr[linesize] = bufptr[linesize];
+ s->dstptr[linesize + 1] = bufptr[linesize + 1];
return 0;
}
AVPacket *avpkt)
{
YopDecContext *s = avctx->priv_data;
+ AVFrame *frame = data;
int tag, firstcolor, is_odd_frame;
int ret, i, x, y;
uint32_t *palette;
return AVERROR_INVALIDDATA;
}
- if (s->frame.data[0])
- avctx->release_buffer(avctx, &s->frame);
-
- ret = ff_get_buffer(avctx, &s->frame);
+ ret = ff_get_buffer(avctx, frame, 0);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (!avctx->frame_number)
- memset(s->frame.data[1], 0, AVPALETTE_SIZE);
+ memset(frame->data[1], 0, AVPALETTE_SIZE);
- s->dstbuf = s->frame.data[0];
- s->dstptr = s->frame.data[0];
+ s->dstbuf = frame->data[0];
+ s->dstptr = frame->data[0];
s->srcptr = avpkt->data + 4;
s->src_end = avpkt->data + avpkt->size;
s->low_nibble = NULL;
is_odd_frame = avpkt->data[0];
firstcolor = s->first_color[is_odd_frame];
- palette = (uint32_t *)s->frame.data[1];
+ palette = (uint32_t *)frame->data[1];
for (i = 0; i < s->num_pal_colors; i++, s->srcptr += 3)
palette[i + firstcolor] = (s->srcptr[0] << 18) |
(s->srcptr[1] << 10) |
(s->srcptr[2] << 2);
- s->frame.palette_has_changed = 1;
+ frame->palette_has_changed = 1;
for (y = 0; y < avctx->height; y += 2) {
for (x = 0; x < avctx->width; x += 2) {
tag = yop_get_next_nibble(s);
if (tag != 0xf) {
- ret = yop_paint_block(s, tag);
+ ret = yop_paint_block(s, frame->linesize[0], tag);
if (ret < 0)
return ret;
} else {
tag = yop_get_next_nibble(s);
- ret = yop_copy_previous_block(s, tag);
- if (ret < 0) {
- avctx->release_buffer(avctx, &s->frame);
+ ret = yop_copy_previous_block(s, frame->linesize[0], tag);
+ if (ret < 0)
return ret;
- }
}
s->dstptr += 2;
}
- s->dstptr += 2*s->frame.linesize[0] - x;
+ s->dstptr += 2*frame->linesize[0] - x;
}
*got_frame = 1;
- *(AVFrame *) data = s->frame;
return avpkt->size;
}
.id = AV_CODEC_ID_YOP,
.priv_data_size = sizeof(YopDecContext),
.init = yop_decode_init,
- .close = yop_decode_close,
.decode = yop_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("Psygnosis YOP Video"),
.capabilities = CODEC_CAP_DR1,
int *got_frame, AVPacket *avpkt)
{
ZeroCodecContext *zc = avctx->priv_data;
- AVFrame *pic = avctx->coded_frame;
+ AVFrame *pic = data;
AVFrame *prev_pic = &zc->previous_frame;
z_stream *zstream = &zc->zstream;
uint8_t *prev = prev_pic->data[0];
uint8_t *dst;
- int i, j, zret;
-
- pic->reference = 3;
+ int i, j, zret, ret;
if (avpkt->flags & AV_PKT_FLAG_KEY) {
pic->key_frame = 1;
return AVERROR_INVALIDDATA;
}
- if (ff_get_buffer(avctx, pic) < 0) {
+ if (ff_get_buffer(avctx, pic, AV_GET_BUFFER_FLAG_REF) < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return AVERROR(ENOMEM);
}
zret = inflate(zstream, Z_SYNC_FLUSH);
if (zret != Z_OK && zret != Z_STREAM_END) {
- avctx->release_buffer(avctx, pic);
av_log(avctx, AV_LOG_ERROR,
"Inflate failed with return code: %d.\n", zret);
return AVERROR_INVALIDDATA;
dst -= pic->linesize[0];
}
- /* Release the previous buffer if need be */
- if (prev_pic->data[0])
- avctx->release_buffer(avctx, prev_pic);
+ av_frame_unref(&zc->previous_frame);
+ if ((ret = av_frame_ref(&zc->previous_frame, pic)) < 0)
+ return ret;
*got_frame = 1;
- *(AVFrame *)data = *pic;
-
- /* Store the previous frame for use later.
- * FFSWAP ensures that e.g. pic->data is NULLed. */
- FFSWAP(AVFrame, *pic, *prev_pic);
return avpkt->size;
}
static av_cold int zerocodec_decode_close(AVCodecContext *avctx)
{
ZeroCodecContext *zc = avctx->priv_data;
- AVFrame *prev_pic = &zc->previous_frame;
-
- inflateEnd(&zc->zstream);
- /* Release last frame */
- if (prev_pic->data[0])
- avctx->release_buffer(avctx, prev_pic);
+ av_frame_unref(&zc->previous_frame);
- av_freep(&avctx->coded_frame);
+ inflateEnd(&zc->zstream);
return 0;
}
return AVERROR(ENOMEM);
}
- avctx->coded_frame = avcodec_alloc_frame();
- if (!avctx->coded_frame) {
- av_log(avctx, AV_LOG_ERROR, "Could not allocate frame buffer.\n");
- zerocodec_decode_close(avctx);
- return AVERROR(ENOMEM);
- }
-
return 0;
}
*/
typedef struct ZmbvContext {
AVCodecContext *avctx;
- AVFrame pic;
int bpp;
unsigned int decomp_size;
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
ZmbvContext * const c = avctx->priv_data;
int hi_ver, lo_ver, ret;
uint8_t *tmp;
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
-
- c->pic.reference = 1;
- c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
- if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
c->decomp_len = c->zstream.total_out;
}
if (c->flags & ZMBV_KEYFRAME) {
- c->pic.key_frame = 1;
- c->pic.pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
c->decode_intra(c);
} else {
- c->pic.key_frame = 0;
- c->pic.pict_type = AV_PICTURE_TYPE_P;
+ frame->key_frame = 0;
+ frame->pict_type = AV_PICTURE_TYPE_P;
if (c->decomp_len)
c->decode_xor(c);
}
uint8_t *out, *src;
int i, j;
- out = c->pic.data[0];
+ out = frame->data[0];
src = c->cur;
switch (c->fmt) {
case ZMBV_FMT_8BPP:
out[i * 3 + 2] = c->pal[(*src) * 3 + 2];
src++;
}
- out += c->pic.linesize[0];
+ out += frame->linesize[0];
}
break;
case ZMBV_FMT_15BPP:
out[i * 3 + 1] = (tmp & 0x03E0) >> 2;
out[i * 3 + 2] = (tmp & 0x001F) << 3;
}
- out += c->pic.linesize[0];
+ out += frame->linesize[0];
}
break;
case ZMBV_FMT_16BPP:
out[i * 3 + 1] = (tmp & 0x07E0) >> 3;
out[i * 3 + 2] = (tmp & 0x001F) << 3;
}
- out += c->pic.linesize[0];
+ out += frame->linesize[0];
}
break;
#ifdef ZMBV_ENABLE_24BPP
for (j = 0; j < c->height; j++) {
memcpy(out, src, c->width * 3);
src += c->width * 3;
- out += c->pic.linesize[0];
+ out += frame->linesize[0];
}
break;
#endif //ZMBV_ENABLE_24BPP
src += 4;
AV_WB24(out+(i*3), tmp);
}
- out += c->pic.linesize[0];
+ out += frame->linesize[0];
}
break;
default:
FFSWAP(uint8_t *, c->cur, c->prev);
}
*got_frame = 1;
- *(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return buf_size;
av_freep(&c->decomp_buf);
- if (c->pic.data[0])
- avctx->release_buffer(avctx, &c->pic);
inflateEnd(&c->zstream);
av_freep(&c->cur);
av_freep(&c->prev);