assert((asf_st->packet_pos - s->data_offset) % asf->packet_size == 0);
pos= asf_st->packet_pos;
- av_add_index_entry(s->streams[i], pos, pts, pos - start_pos[i] + 1, AVINDEX_KEYFRAME);
+ av_add_index_entry(s->streams[i], pos, pkt->size, pts, pos - start_pos[i] + 1, AVINDEX_KEYFRAME);
start_pos[i]= asf_st->packet_pos + 1;
if(pkt->stream_index == stream_index)
int64_t pos;
int64_t timestamp;
#define AVINDEX_KEYFRAME 0x0001
-/* the following 2 flags indicate that the next/prev keyframe is known, and scaning for it isnt needed */
- int flags;
+ int flags:2;
+ int size:30; //yeah trying to keep the size of this small to reduce memory requirements (its 24 vs 32 byte due to possible 8byte align)
int min_distance; /* min distance between this and the previous keyframe, used to avoid unneeded searching */
} AVIndexEntry;
int av_find_default_stream_index(AVFormatContext *s);
int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
int av_add_index_entry(AVStream *st,
- int64_t pos, int64_t timestamp, int distance, int flags);
+ int64_t pos, int64_t timestamp, int size, int distance, int flags);
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags);
/* media file output */
} AVIContext;
static int avi_load_index(AVFormatContext *s);
+static int guess_ni_flag(AVFormatContext *s);
#ifdef DEBUG
static void print_tag(const char *str, unsigned int tag, int size)
for(i=0; i<entries_in_use; i++){
if(index_type){
- int64_t pos= get_le32(pb) + base;
+ int64_t pos= get_le32(pb) + base - 8;
int len = get_le32(pb);
-
- av_add_index_entry(st, pos, ast->cum_len, 0, (len<0) ? 0 : AVINDEX_KEYFRAME);
+ int key= len >= 0;
len &= 0x7FFFFFFF;
+//av_log(s, AV_LOG_ERROR, "pos:%Ld, len:%X\n", pos, len);
+ av_add_index_entry(st, pos, ast->cum_len, len, 0, key ? AVINDEX_KEYFRAME : 0);
+
if(ast->sample_size)
ast->cum_len += len / ast->sample_size;
else
if(!avi->index_loaded)
avi_load_index(s);
avi->index_loaded = 1;
+ avi->non_interleaved |= guess_ni_flag(s);
return 0;
}
if(last_pos == pos)
avi->non_interleaved= 1;
else
- av_add_index_entry(st, pos, ast->cum_len, 0, (flags&AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
+ av_add_index_entry(st, pos, ast->cum_len, len, 0, (flags&AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
if(ast->sample_size)
ast->cum_len += len / ast->sample_size;
else
}
}
the_end:
- avi->non_interleaved |= guess_ni_flag(s);
url_fseek(pb, pos, SEEK_SET);
return 0;
}
int i;
for(i=0; i<s->nb_streams; i++){
if(startcode == s->streams[i]->id) {
- av_add_index_entry(s->streams[i], *ppos, dts, 0, AVINDEX_KEYFRAME /* FIXME keyframe? */);
+ av_add_index_entry(s->streams[i], *ppos, dts, 0, 0, AVINDEX_KEYFRAME /* FIXME keyframe? */);
}
}
}
s->streams[stream_id],
frame_start,
pts,
+ 0,
frame_start - nut->stream[stream_id].last_sync_pos,
AVINDEX_KEYFRAME);
nut->stream[stream_id].last_sync_pos= frame_start;
if(flags&2){
pkt->flags |= PKT_FLAG_KEY;
if((seq&0x7F) == 1)
- av_add_index_entry(st, pos, timestamp, 0, AVINDEX_KEYFRAME);
+ av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME);
}
}
if((flags&2) && (seq&0x7F) == 1){
// av_log(s, AV_LOG_DEBUG, "%d %d-%d %Ld %d\n", flags, stream_index2, stream_index, dts, seq);
- av_add_index_entry(st, pos, dts, 0, AVINDEX_KEYFRAME);
+ av_add_index_entry(st, pos, dts, 0, 0, AVINDEX_KEYFRAME);
if(stream_index2 == stream_index)
break;
}
* @param timestamp timestamp in the timebase of the given stream
*/
int av_add_index_entry(AVStream *st,
- int64_t pos, int64_t timestamp, int distance, int flags)
+ int64_t pos, int64_t timestamp, int size, int distance, int flags)
{
AVIndexEntry *entries, *ie;
int index;
ie->pos = pos;
ie->timestamp = timestamp;
ie->min_distance= distance;
+ ie->size= size;
ie->flags = flags;
return index;
if (pkt->stream_index == 0 && st->parser &&
(pkt->flags & PKT_FLAG_KEY)) {
av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
- 0, AVINDEX_KEYFRAME);
+ 0, 0, AVINDEX_KEYFRAME);
}
av_free_packet(pkt);
}