int *frame_size)
{
int nb_frames, i, ret, resample_changed;
- AVFrame *final_picture, *formatted_picture, *resampling_dst;
+ AVFrame *final_picture, *formatted_picture;
AVCodecContext *enc, *dec;
double sync_ipts;
formatted_picture = in_picture;
final_picture = formatted_picture;
- resampling_dst = &ost->pict_tmp;
resample_changed = ost->resample_width != dec->width ||
ost->resample_height != dec->height ||
}
}
sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize,
- 0, ost->resample_height, resampling_dst->data, resampling_dst->linesize);
+ 0, ost->resample_height, final_picture->data, final_picture->linesize);
}
#endif
static void new_data_stream(AVFormatContext *oc, int file_idx)
{
AVStream *st;
- AVOutputStream *ost;
AVCodec *codec=NULL;
AVCodecContext *data_enc;
fprintf(stderr, "Could not alloc stream\n");
ffmpeg_exit(1);
}
- ost = new_output_stream(oc, file_idx);
+ new_output_stream(oc, file_idx);
data_enc = st->codec;
output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1);
if (!data_stream_copy) {
int b_width;
int req_size;
- int num_frames = c->mc_lifetime;
int *charmap = c->mc_charmap;
uint8_t *colram = c->mc_colram;
if (!c->mc_lifetime) return 0;
/* no more frames in queue, prepare to flush remaining frames */
if (!c->mc_frame_counter) {
- num_frames = c->mc_lifetime;
c->mc_lifetime = 0;
}
/* still frames in queue so limit lifetime to remaining frames */
int win, int group_len, const float lambda)
{
BandCodingPath path[120][12];
- int w, swb, cb, start, start2, size;
+ int w, swb, cb, start, size;
int i, j;
const int max_sfb = sce->ics.max_sfb;
const int run_bits = sce->ics.num_windows == 1 ? 5 : 3;
path[0][cb].run = 0;
}
for (swb = 0; swb < max_sfb; swb++) {
- start2 = start;
size = sce->ics.swb_sizes[swb];
if (sce->zeroes[win*16 + swb]) {
for (cb = 0; cb < 12; cb++) {
int win, int group_len, const float lambda)
{
BandCodingPath path[120][12];
- int w, swb, cb, start, start2, size;
+ int w, swb, cb, start, size;
int i, j;
const int max_sfb = sce->ics.max_sfb;
const int run_bits = sce->ics.num_windows == 1 ? 5 : 3;
path[0][cb].run = 0;
}
for (swb = 0; swb < max_sfb; swb++) {
- start2 = start;
size = sce->ics.swb_sizes[swb];
if (sce->zeroes[win*16 + swb]) {
for (cb = 0; cb < 12; cb++) {
SingleChannelElement *sce,
const float lambda)
{
- int start = 0, i, w, w2, g;
+ int i, w, w2, g;
int minq = 255;
memset(sce->sf_idx, 0, sizeof(sce->sf_idx));
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
- start = w*128;
for (g = 0; g < sce->ics.num_swb; g++) {
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g];
old_out2 = out[-2];
old_out3 = out[-1];
for (n = 0; n <= buffer_length - 4; n+=4) {
- float tmp0,tmp1,tmp2,tmp3;
+ float tmp0,tmp1,tmp2;
float val;
out0 = in[0];
tmp0 = out0;
tmp1 = out1;
tmp2 = out2;
- tmp3 = out3;
out3 -= a * tmp2;
out2 -= a * tmp1;
{
int ss_index;
int blownup;
- int header_size;
- int hd_size;
int num_audiop = 1;
int num_assets = 1;
int active_ss_mask[8];
ss_index = get_bits(&s->gb, 2);
blownup = get_bits1(&s->gb);
- header_size = get_bits(&s->gb, 8 + 4 * blownup) + 1;
- hd_size = get_bits_long(&s->gb, 16 + 4 * blownup) + 1;
+ skip_bits(&s->gb, 8 + 4 * blownup); // header_size
+ skip_bits(&s->gb, 16 + 4 * blownup); // hd_size
s->static_fields = get_bits1(&s->gb);
if (s->static_fields) {
int ff_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb,
dirac_source_params *source)
{
- unsigned version_major, version_minor;
+ unsigned version_major;
unsigned video_format, picture_coding_mode;
version_major = svq3_get_ue_golomb(gb);
- version_minor = svq3_get_ue_golomb(gb);
+ svq3_get_ue_golomb(gb); /* version_minor */
avctx->profile = svq3_get_ue_golomb(gb);
avctx->level = svq3_get_ue_golomb(gb);
video_format = svq3_get_ue_golomb(gb);
*/
static int h263_decode_gob_header(MpegEncContext *s)
{
- unsigned int val, gfid, gob_number;
+ unsigned int val, gob_number;
int left;
/* Check for GOB Start Code */
s->qscale = get_bits(&s->gb, 5); /* SQUANT */
if(get_bits1(&s->gb)==0)
return -1;
- gfid = get_bits(&s->gb, 2); /* GFID */
+ skip_bits(&s->gb, 2); /* GFID */
}else{
gob_number = get_bits(&s->gb, 5); /* GN */
s->mb_x= 0;
s->mb_y= s->gob_index* gob_number;
- gfid = get_bits(&s->gb, 2); /* GFID */
+ skip_bits(&s->gb, 2); /* GFID */
s->qscale = get_bits(&s->gb, 5); /* GQUANT */
}
Mpeg1Context *s1 = avctx->priv_data;
MpegEncContext *s = &s1->mpeg_enc_ctx;
- int drop_frame_flag;
int time_code_hours, time_code_minutes;
int time_code_seconds, time_code_pictures;
int broken_link;
init_get_bits(&s->gb, buf, buf_size*8);
- drop_frame_flag = get_bits1(&s->gb);
+ skip_bits1(&s->gb); /* drop_frame_flag */
time_code_hours=get_bits(&s->gb,5);
time_code_minutes = get_bits(&s->gb,6);
if(mv==0) len= ff_mpeg12_mbMotionVectorTable[0][1];
else{
- int val, bit_size, range, code;
+ int val, bit_size, code;
bit_size = f_code - 1;
- range = 1 << bit_size;
val=mv;
if (val < 0)
header_extension= get_bits1(&s->gb);
}
if(header_extension){
- int time_increment;
int time_incr=0;
while (get_bits1(&s->gb) != 0)
time_incr++;
check_marker(&s->gb, "before time_increment in video packed header");
- time_increment= get_bits(&s->gb, s->time_increment_bits);
+ skip_bits(&s->gb, s->time_increment_bits); /* time_increment */
check_marker(&s->gb, "before vop_coding_type in video packed header");
skip_bits(&s->gb, 2); /* vop coding type */
if (s->scalability) {
GetBitContext bak= *gb;
- int ref_layer_id;
- int ref_layer_sampling_dir;
int h_sampling_factor_n;
int h_sampling_factor_m;
int v_sampling_factor_n;
int v_sampling_factor_m;
s->hierachy_type= get_bits1(gb);
- ref_layer_id= get_bits(gb, 4);
- ref_layer_sampling_dir= get_bits1(gb);
+ skip_bits(gb, 4); /* ref_layer_id */
+ skip_bits1(gb); /* ref_layer_sampling_dir */
h_sampling_factor_n= get_bits(gb, 5);
h_sampling_factor_m= get_bits(gb, 5);
v_sampling_factor_n= get_bits(gb, 5);
if (s->shape != RECT_SHAPE) {
if (s->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) {
- int width, height, hor_spat_ref, ver_spat_ref;
-
- width = get_bits(gb, 13);
+ skip_bits(gb, 13); /* width */
skip_bits1(gb); /* marker */
- height = get_bits(gb, 13);
+ skip_bits(gb, 13); /* height */
skip_bits1(gb); /* marker */
- hor_spat_ref = get_bits(gb, 13); /* hor_spat_ref */
+ skip_bits(gb, 13); /* hor_spat_ref */
skip_bits1(gb); /* marker */
- ver_spat_ref = get_bits(gb, 13); /* ver_spat_ref */
+ skip_bits(gb, 13); /* ver_spat_ref */
}
skip_bits1(gb); /* change_CR_disable */
uint32_t start_code;
int frame_rate_index, ext_type, bytes_left;
int frame_rate_ext_n, frame_rate_ext_d;
- int picture_structure, top_field_first, repeat_first_field, progressive_frame;
+ int top_field_first, repeat_first_field, progressive_frame;
int horiz_size_ext, vert_size_ext, bit_rate_ext;
int did_set_size=0;
//FIXME replace the crap with get_bits()
break;
case 0x8: /* picture coding extension */
if (bytes_left >= 5) {
- picture_structure = buf[2]&3;
top_field_first = buf[3] & (1 << 7);
repeat_first_field = buf[3] & (1 << 1);
progressive_frame = buf[4] & (1 << 7);
/* the alt_bitstream reader could read over the end so we need to check it */
if(left>=length && left<length+8)
{
- int fps;
-
- fps= get_bits(&s->gb, 5);
+ skip_bits(&s->gb, 5); /* fps */
s->bit_rate= get_bits(&s->gb, 11)*1024;
if(s->msmpeg4_version>=3)
s->flipflop_rounding= get_bits1(&s->gb);
AVFrame *p;
uint8_t *crow_buf_base = NULL;
uint32_t tag, length;
- int ret, crc;
+ int ret;
FFSWAP(AVFrame *, s->current_picture, s->last_picture);
avctx->coded_frame= s->current_picture;
s->compression_type = *s->bytestream++;
s->filter_type = *s->bytestream++;
s->interlace_type = *s->bytestream++;
- crc = bytestream_get_be32(&s->bytestream);
+ s->bytestream += 4; /* crc */
s->state |= PNG_IHDR;
av_dlog(avctx, "width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
s->width, s->height, s->bit_depth, s->color_type,
s->state |= PNG_IDAT;
if (png_decode_idat(s, length) < 0)
goto fail;
- /* skip crc */
- crc = bytestream_get_be32(&s->bytestream);
+ s->bytestream += 4; /* crc */
break;
case MKTAG('P', 'L', 'T', 'E'):
{
s->palette[i] = (0xff << 24);
}
s->state |= PNG_PLTE;
- crc = bytestream_get_be32(&s->bytestream);
+ s->bytestream += 4; /* crc */
}
break;
case MKTAG('t', 'R', 'N', 'S'):
v = *s->bytestream++;
s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
}
- crc = bytestream_get_be32(&s->bytestream);
+ s->bytestream += 4; /* crc */
}
break;
case MKTAG('I', 'E', 'N', 'D'):
if (!(s->state & PNG_ALLIMAGE))
goto fail;
- crc = bytestream_get_be32(&s->bytestream);
+ s->bytestream += 4; /* crc */
goto exit_loop;
default:
/* skip tag */
/* read RV 1.0 compatible frame header */
static int rv10_decode_picture_header(MpegEncContext *s)
{
- int mb_count, pb_frame, marker, unk, mb_xy;
+ int mb_count, pb_frame, marker, mb_xy;
marker = get_bits1(&s->gb);
s->mb_y = 0;
mb_count = s->mb_width * s->mb_height;
}
- unk= get_bits(&s->gb, 3); /* ignored */
+ skip_bits(&s->gb, 3); /* ignored */
s->f_code = 1;
s->unrestricted_mv = 1;
int buf_size)
{
uint32_t h;
- int frame_size, channels, id, bits;
+ int frame_size, channels, bits;
if (buf_size <= AES3_HEADER_LEN) {
av_log(avctx, AV_LOG_ERROR, "frame is too short\n");
h = AV_RB32(buf);
frame_size = (h >> 16) & 0xffff;
channels = ((h >> 14) & 0x0003) * 2 + 2;
- id = (h >> 6) & 0x00ff;
bits = ((h >> 4) & 0x0003) * 4 + 16;
if (AES3_HEADER_LEN + frame_size != buf_size || bits > 24) {
{
GetBitContext hb;
int len;
- int chunk_size;
short wave_format;
init_get_bits(&hb, header, header_size*8);
return -1;
}
- chunk_size = get_le32(&hb);
+ skip_bits_long(&hb, 32); /* chunk_size */
if (get_le32(&hb) != MKTAG('W','A','V','E')) {
av_log(avctx, AV_LOG_ERROR, "missing WAVE tag\n");
int buf_size = avpkt->size;
AVPacket avpkt_recoded;
const int qscale = 5;
- const uint8_t *buf_ptr;
uint8_t *recoded;
int i = 0, j = 0;
if (!avctx->width || !avctx->height)
return -1;
- buf_ptr = buf;
-
recoded = av_mallocz(buf_size + 1024);
if (!recoded)
return -1;
static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
int frame_size_code;
- int temporal_reference;
- temporal_reference = get_bits (bitbuf, 8);
+ skip_bits(bitbuf, 8); /* temporal_reference */
/* frame type */
s->pict_type= get_bits (bitbuf, 2)+1;
AVFrame * const p= (AVFrame*)&s->picture;
uint8_t *dst;
int stride;
- int idlen, pal, compr, x, y, w, h, bpp, flags;
+ int idlen, compr, y, w, h, bpp, flags;
int first_clr, colors, csize;
/* parse image header */
CHECK_BUFFER_SIZE(buf, buf_end, 18, "header");
idlen = *buf++;
- pal = *buf++;
+ buf++; /* pal */
compr = *buf++;
first_clr = AV_RL16(buf); buf += 2;
colors = AV_RL16(buf); buf += 2;
csize = *buf++;
- x = AV_RL16(buf); buf += 2;
+ buf += 2; /* x */
y = AV_RL16(buf); buf += 2;
w = AV_RL16(buf); buf += 2;
h = AV_RL16(buf); buf += 2;
{
uint32_t magic;
const uint8_t *obuf;
- int length;
obuf = buf;
/* av_log (ctx->avctx, AV_LOG_ERROR, "TM2 old header: not implemented (yet)\n"); */
return 40;
} else if(magic == 0x00000101) { /* new header */
- int w, h, size, flags, xr, yr;
-
- length = AV_RL32(buf);
- buf += 4;
-
- init_get_bits(&ctx->gb, buf, 32 * 8);
- size = get_bits_long(&ctx->gb, 31);
- h = get_bits(&ctx->gb, 15);
- w = get_bits(&ctx->gb, 15);
- flags = get_bits_long(&ctx->gb, 31);
- yr = get_bits(&ctx->gb, 9);
- xr = get_bits(&ctx->gb, 9);
-
return 40;
} else {
av_log (ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08X\n", magic);
int buf_size = avpkt->size;
CamtasiaContext * const c = avctx->priv_data;
const unsigned char *encoded = buf;
- unsigned char *outptr;
int zret; // Zlib return code
int len = buf_size;
return -1;
}
- outptr = c->pic.data[0]; // Output image pointer
-
zret = inflateReset(&(c->zstream));
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
int frame_x, frame_y;
int frame_width, frame_height;
- int dp_size;
frame_x = AV_RL16(&s->buf[6]);
frame_y = AV_RL16(&s->buf[8]);
}
dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
- dp_size = s->frame.linesize[0] * s->avctx->height;
pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
switch (meth) {
case 1:
int x, y, bh2, bw2, xored;
uint8_t *tsrc, *tprev;
uint8_t *mv;
- int mx, my, bv;
+ int mx, my;
bw = (avctx->width + ZMBV_BLOCK - 1) / ZMBV_BLOCK;
bh = (avctx->height + ZMBV_BLOCK - 1) / ZMBV_BLOCK;
tsrc = src + x;
tprev = prev + x;
- bv = zmbv_me(c, tsrc, p->linesize[0], tprev, c->pstride, x, y, &mx, &my, &xored);
+ zmbv_me(c, tsrc, p->linesize[0], tprev, c->pstride, x, y, &mx, &my, &xored);
mv[0] = (mx << 1) | !!xored;
mv[1] = my << 1;
tprev += mx + my * c->pstride;
AVFilterBufferRef *inpicref = inlink->cur_buf;
AVFilterBufferRef *outpicref = outlink->out_buf;
- int h, w, plane, line_step, line_size, line;
+ int h, plane, line_step, line_size, line;
uint8_t *cpy_src, *cpy_dst;
if ( inpicref->video->interlaced
"picture will move %s one line\n",
fieldorder->dst_tff ? "up" : "down");
h = inpicref->video->h;
- w = inpicref->video->w;
for (plane = 0; plane < 4 && inpicref->data[plane]; plane++) {
line_step = inpicref->linesize[plane];
line_size = fieldorder->line_size[plane];
FourxmDemuxContext *fourxm = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int fourcc_tag;
- unsigned int size, out_size;
+ unsigned int size;
int ret = 0;
unsigned int track_number;
int packet_read = 0;
case snd__TAG:
track_number = avio_rl32(pb);
- out_size= avio_rl32(pb);
+ avio_skip(pb, 4);
size-=8;
if (track_number < fourxm->track_count && fourxm->tracks[track_number].channels>0) {
{
AVIOContext *pb = s->pb;
uint8_t key[1024], *value;
- uint32_t size, flags;
+ uint32_t size;
int i, c;
size = avio_rl32(pb); /* field size */
- flags = avio_rl32(pb); /* field flags */
+ avio_skip(pb, 4); /* field flags */
for (i = 0; i < sizeof(key) - 1; i++) {
c = avio_r8(pb);
if (c < 0x20 || c > 0x7E)
ff_asf_guid g;
enum AVMediaType type;
int type_specific_size, sizeX;
- uint64_t total_size;
unsigned int tag1;
int64_t pos1, pos2, start_time;
int test_for_ext_stream_audio, is_dvr_ms_audio=0;
return -1;
}
ff_get_guid(pb, &g);
- total_size = avio_rl64(pb);
+ avio_skip(pb, 8); /* total_size */
type_specific_size = avio_rl32(pb);
avio_rl32(pb);
st->id = avio_rl16(pb) & 0x7f; /* stream id */
AVIOContext *pb = s->pb;
ff_asf_guid g;
int ext_len, payload_ext_ct, stream_ct, i;
- uint32_t ext_d, leak_rate, stream_num;
+ uint32_t leak_rate, stream_num;
unsigned int stream_languageid_index;
avio_rl64(pb); // starttime
for (i=0; i<payload_ext_ct; i++){
ff_get_guid(pb, &g);
- ext_d=avio_rl16(pb);
+ avio_skip(pb, 2);
ext_len=avio_rl32(pb);
avio_skip(pb, ext_len);
}
{
AVIOContext *pb = s->pb;
ASFContext *asf = s->priv_data;
- int n, stream_num, name_len, value_len, value_type, value_num;
+ int n, stream_num, name_len, value_len, value_num;
int ret, i;
n = avio_rl16(pb);
avio_rl16(pb); //lang_list_index
stream_num= avio_rl16(pb);
name_len= avio_rl16(pb);
- value_type= avio_rl16(pb);
+ avio_skip(pb, 2); /* value_type */
value_len= avio_rl32(pb);
if ((ret = avio_get_str16le(pb, name_len, name, sizeof(name))) < name_len)
// if so the next iteration will pick it up
continue;
} else if (!ff_guidcmp(&g, &ff_asf_head1_guid)) {
- int v1, v2;
ff_get_guid(pb, &g);
- v1 = avio_rl32(pb);
- v2 = avio_rl16(pb);
+ avio_skip(pb, 6);
continue;
} else if (!ff_guidcmp(&g, &ff_asf_marker_header)) {
asf_read_marker(s, gsize);
ASFContext *asf = s->priv_data;
int rsize = 1;
int num = avio_r8(pb);
- int64_t ts0, ts1;
+ int64_t ts0;
asf->packet_segments--;
asf->packet_key_frame = num >> 7;
// av_log(s, AV_LOG_DEBUG, "\n");
avio_skip(pb, 10);
ts0= avio_rl64(pb);
- ts1= avio_rl64(pb);
+ avio_skip(pb, 8);;
avio_skip(pb, 12);
avio_rl32(pb);
avio_skip(pb, asf->packet_replic_size - 8 - 38 - 4);
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int tag, tag1, handler;
- int codec_type, stream_index, frame_period, bit_rate;
+ int codec_type, stream_index, frame_period;
unsigned int size;
int i;
AVStream *st;
/* AVI header */
/* using frame_period is bad idea */
frame_period = avio_rl32(pb);
- bit_rate = avio_rl32(pb) * 8;
+ avio_skip(pb, 4);
avio_rl32(pb);
avi->non_interleaved |= avio_rl32(pb) & AVIF_MUSTUSEINDEX;
AVCodecContext *enc, const uint8_t *buf, int size)
{
AVIOContext *pb = s->pb;
- GIFContext *gif = s->priv_data;
int jiffies;
- int64_t delay;
/* graphic control extension block */
avio_w8(pb, 0x21);
/* 1 jiffy is 1/70 s */
/* the delay_time field indicates the number of jiffies - 1 */
- delay = gif->file_time - gif->time;
-
/* XXX: should use delay, in order to be more accurate */
/* instead of using the same rounded value each time */
/* XXX: don't even remember if I really use it for now */
unsigned int tag;
AVIOContext *pb = s->pb;
AVStream *st;
- int64_t file_size, size;
+ int64_t size;
int rate, params;
tag = avio_rl32(pb);
if (tag != MKTAG('M', 'M', 'M', 'D'))
return -1;
- file_size = avio_rb32(pb);
+ avio_skip(pb, 4); /* file_size */
/* Skip some unused chunks that may or may not be present */
for(;; avio_skip(pb, size)) {
AVPacket *pkt)
{
MMFContext *mmf = s->priv_data;
- AVStream *st;
int ret, size;
if (s->pb->eof_reached)
return AVERROR(EIO);
- st = s->streams[0];
size = MAX_SIZE;
if(size > mmf->data_size)
int ff_mov_read_esds(AVFormatContext *fc, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
- int tag, len;
+ int tag;
if (fc->nb_streams < 1)
return 0;
st = fc->streams[fc->nb_streams-1];
avio_rb32(pb); /* version + flags */
- len = ff_mp4_read_descr(fc, pb, &tag);
+ ff_mp4_read_descr(fc, pb, &tag);
if (tag == MP4ESDescrTag) {
avio_rb16(pb); /* ID */
avio_r8(pb); /* priority */
} else
avio_rb16(pb); /* ID */
- len = ff_mp4_read_descr(fc, pb, &tag);
+ ff_mp4_read_descr(fc, pb, &tag);
if (tag == MP4DecConfigDescrTag)
ff_mp4_read_dec_config_descr(fc, st, pb);
return 0;
int flags, nsegs;
uint64_t gp;
uint32_t serial;
- uint32_t seq;
- uint32_t crc;
int size, idx;
uint8_t sync[4];
int sp = 0;
flags = avio_r8(bc);
gp = avio_rl64 (bc);
serial = avio_rl32 (bc);
- seq = avio_rl32 (bc);
- crc = avio_rl32 (bc);
+ avio_skip(bc, 8); /* seq, crc */
nsegs = avio_r8(bc);
idx = ogg_find_stream (ogg, serial);
const uint8_t *p = os->buf + os->pstart;
uint64_t time_unit;
uint64_t spu;
- uint32_t default_len;
if(!(*p & 1))
return 0;
time_unit = bytestream_get_le64(&p);
spu = bytestream_get_le64(&p);
- default_len = bytestream_get_le32(&p);
-
+ p += 4; /* default_len */
p += 8; /* buffersize + bits_per_sample */
if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
unsigned int audio_frame_counter = 0;
unsigned int video_frame_counter = 0;
unsigned int back_size;
- int data_size;
- unsigned short encoding_method;
unsigned short sound_rate;
unsigned short rate;
unsigned short channels;
avio_skip(pb,4); /* skip FORM tag */
back_size = avio_rl32(pb); /**< get size of the background frame */
signature = avio_rb32(pb);
- data_size = avio_rb32(pb);
+ avio_skip(pb, 4); /* data size */
frame_count = avio_rl32(pb);
/* disallow back_sizes and frame_counts that may lead to overflows later */
if(back_size > INT_MAX/2 || frame_count > INT_MAX / sizeof(uint32_t))
return AVERROR_INVALIDDATA;
- encoding_method = avio_rl16(pb);
+ avio_skip(pb, 2); /* encoding mentod */
sound_rate = avio_rl16(pb);
rate = avio_rl16(pb);
channels = avio_rl16(pb);
if (rm_read_audio_stream_info(s, pb, st, rst, 0))
return -1;
} else {
- int fps, fps2;
+ int fps;
if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) {
fail1:
av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n");
fps= avio_rb16(pb);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
avio_rb32(pb);
- fps2= avio_rb16(pb);
+ avio_skip(pb, 2);
avio_rb16(pb);
if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0)
stream->codec->codec_tag == 124) {
// We have to split Escape 124 frames because there are
// multiple frames per chunk in Escape 124 samples.
- uint32_t frame_size, frame_flags;
+ uint32_t frame_size;
- frame_flags = avio_rl32(pb);
+ avio_skip(pb, 4); /* flags */
frame_size = avio_rl32(pb);
if (avio_seek(pb, -8, SEEK_CUR) < 0)
return AVERROR(EIO);
int len = ff_hex_to_data(NULL, value), i, ret = 0;
GetBitContext gb;
uint8_t *config;
- int audio_mux_version, same_time_framing, num_sub_frames,
- num_programs, num_layers;
+ int audio_mux_version, same_time_framing, num_programs, num_layers;
/* Pad this buffer, too, to avoid out of bounds reads with get_bits below */
config = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
init_get_bits(&gb, config, len*8);
audio_mux_version = get_bits(&gb, 1);
same_time_framing = get_bits(&gb, 1);
- num_sub_frames = get_bits(&gb, 6);
+ skip_bits(&gb, 6); /* num_sub_frames */
num_programs = get_bits(&gb, 4);
num_layers = get_bits(&gb, 3);
if (audio_mux_version != 0 || same_time_framing != 1 || num_programs != 0 ||
{
AVIOContext *pb = avctx->pb;
char buf[36];
- int datatype, filetype, t1, t2, nb_comments, flags;
+ int datatype, filetype, t1, t2, nb_comments;
uint64_t start_pos = avio_size(pb) - 128;
avio_seek(pb, start_pos, SEEK_SET);
t1 = avio_rl16(pb);
t2 = avio_rl16(pb);
nb_comments = avio_r8(pb);
- flags = avio_r8(pb);
+ avio_skip(pb, 1); /* flags */
avio_skip(pb, 4);
GET_SAUCE_META("encoder", 22);
int i;
int frame_size = 0;
int palchange = 0;
- int pos;
if (s->pb->eof_reached || smk->cur_frame >= smk->frames)
return AVERROR_EOF;
frame_size = smk->frm_size[smk->cur_frame] & (~3);
flags = smk->frm_flags[smk->cur_frame];
/* handle palette change event */
- pos = avio_tell(s->pb);
if(flags & SMACKER_PAL){
int size, sz, t, off, j, pos;
uint8_t *pal = smk->pal;
static int sol_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
- int size;
unsigned int magic,tag;
AVIOContext *pb = s->pb;
unsigned int id, channels, rate, type;
return -1;
rate = avio_rl16(pb);
type = avio_r8(pb);
- size = avio_rl32(pb);
+ avio_skip(pb, 4); /* size */
if (magic != 0x0B8D)
avio_r8(pb); /* newer SOLs contain padding byte */
AVPicture *picture;
int* first_pkt = s->priv_data;
int width, height, h_chroma_shift, v_chroma_shift;
- int i, m;
+ int i;
char buf2[Y4M_LINE_MAX+1];
char buf1[20];
uint8_t *ptr, *ptr1, *ptr2;
/* construct frame header */
- m = snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
+ snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
avio_write(pb, buf1, strlen(buf1));
width = st->codec->width;