In particular, only write a ctts atom if and only if ever a non-zero offset.
atom_full_init (&ctts->header, FOURCC_ctts, 0, 0, 0, flags);
atom_array_init (&ctts->entries, 128);
+ ctts->do_pts = FALSE;
}
static AtomCTTS *
if (!atom_stsz_copy_data (&stbl->stsz, buffer, size, offset)) {
return 0;
}
- if (stbl->ctts) {
+ if (stbl->ctts && stbl->ctts->do_pts) {
if (!atom_ctts_copy_data (stbl->ctts, buffer, size, offset)) {
return 0;
}
nentry.samplecount = nsamples;
nentry.sampleoffset = offset;
atom_array_append (&ctts->entries, nentry, 256);
+ if (offset != 0)
+ ctts->do_pts = TRUE;
} else {
entry->samplecount += nsamples;
}
void
atom_stbl_add_samples (AtomSTBL * stbl, guint32 nsamples, guint32 delta,
- guint32 size, guint64 chunk_offset, gboolean sync,
- gboolean do_pts, gint64 pts_offset)
+ guint32 size, guint64 chunk_offset, gboolean sync, gint64 pts_offset)
{
atom_stts_add_entry (&stbl->stts, nsamples, delta);
atom_stsz_add_entry (&stbl->stsz, nsamples, size);
atom_stco64_get_entry_count (&stbl->stco64), nsamples);
if (sync)
atom_stbl_add_stss_entry (stbl);
- if (do_pts)
- atom_stbl_add_ctts_entry (stbl, nsamples, pts_offset);
+ /* always store to arrange for consistent content */
+ atom_stbl_add_ctts_entry (stbl, nsamples, pts_offset);
}
void
atom_trak_add_samples (AtomTRAK * trak, guint32 nsamples, guint32 delta,
- guint32 size, guint64 chunk_offset, gboolean sync,
- gboolean do_pts, gint64 pts_offset)
+ guint32 size, guint64 chunk_offset, gboolean sync, gint64 pts_offset)
{
AtomSTBL *stbl = &trak->mdia.minf.stbl;
atom_stbl_add_samples (stbl, nsamples, delta, size, chunk_offset, sync,
- do_pts, pts_offset);
+ pts_offset);
}
/* trak and moov molding */
static void
atom_trun_add_samples (AtomTRUN * trun, guint32 delta, guint32 size,
- guint32 flags, gboolean do_pts, gint64 pts_offset)
+ guint32 flags, gint64 pts_offset)
{
TRUNSampleEntry nentry;
- if (do_pts) {
+ if (pts_offset != 0)
trun->header.flags[1] |= TR_COMPOSITION_TIME_OFFSETS;
- }
nentry.sample_duration = delta;
nentry.sample_size = size;
nentry.sample_flags = flags;
- nentry.sample_composition_time_offset = do_pts ? pts_offset : 0;
+ nentry.sample_composition_time_offset = pts_offset;
atom_array_append (&trun->entries, nentry, 256);
trun->sample_count++;
}
void
atom_traf_add_samples (AtomTRAF * traf, guint32 delta, guint32 size,
- gboolean sync, gboolean do_pts, gint64 pts_offset, gboolean sdtp_sync)
+ gboolean sync, gint64 pts_offset, gboolean sdtp_sync)
{
AtomTRUN *trun;
guint32 flags;
}
}
- atom_trun_add_samples (traf->truns->data, delta, size, flags, do_pts,
- pts_offset);
+ atom_trun_add_samples (traf->truns->data, delta, size, flags, pts_offset);
if (traf->sdtps)
atom_sdtp_add_samples (traf->sdtps->data, 0x10 | ((flags & 0xff) >> 4));
/* also entry count here */
ATOM_ARRAY (CTTSEntry) entries;
+ gboolean do_pts;
} AtomCTTS;
typedef struct _AtomSTBL
AtomTRAK* atom_trak_new (AtomsContext *context);
void atom_trak_add_samples (AtomTRAK * trak, guint32 nsamples, guint32 delta,
guint32 size, guint64 chunk_offset, gboolean sync,
- gboolean do_pts, gint64 pts_offset);
+ gint64 pts_offset);
void atom_trak_add_elst_entry (AtomTRAK * trak, guint32 duration,
guint32 media_time, guint32 rate);
guint32 atom_trak_get_timescale (AtomTRAK *trak);
void atom_stbl_add_samples (AtomSTBL * stbl, guint32 nsamples,
guint32 delta, guint32 size,
guint64 chunk_offset, gboolean sync,
- gboolean do_pts, gint64 pts_offset);
+ gint64 pts_offset);
AtomMOOV* atom_moov_new (AtomsContext *context);
void atom_moov_free (AtomMOOV *moov);
AtomTRAF * atom_traf_new (AtomsContext * context, guint32 track_ID);
void atom_traf_free (AtomTRAF * traf);
void atom_traf_add_samples (AtomTRAF * traf, guint32 delta,
- guint32 size, gboolean sync,
- gboolean do_pts, gint64 pts_offset,
+ guint32 size, gboolean sync, gint64 pts_offset,
gboolean sdtp_sync);
guint32 atom_traf_get_sample_num (AtomTRAF * traf);
void atom_moof_add_traf (AtomMOOF *moof, AtomTRAF *traf);
{
trak->duration += b->nsamples * b->delta;
atom_stbl_add_samples (&trak->stbl, b->nsamples, b->delta, b->size,
- b->chunk_offset, b->sync, b->do_pts, b->pts_offset);
+ b->chunk_offset, b->sync, b->pts_offset);
}
/**
static GstFlowReturn
gst_qt_mux_pad_fragment_add_buffer (GstQTMux * qtmux, GstQTPad * pad,
GstBuffer * buf, gboolean force, guint32 nsamples, gint64 dts,
- guint32 delta, guint32 size, gboolean sync, gboolean do_pts,
- gint64 pts_offset)
+ guint32 delta, guint32 size, gboolean sync, gint64 pts_offset)
{
GstFlowReturn ret = GST_FLOW_OK;
}
/* add buffer and metadata */
- atom_traf_add_samples (pad->traf, delta, size, sync, do_pts, pts_offset,
+ atom_traf_add_samples (pad->traf, delta, size, sync, pts_offset,
pad->sync && sync);
atom_array_append (&pad->fragment_buffers, buf, 256);
pad->fragment_duration -= delta;
/* note that a new chunk is started each time (not fancy but works) */
if (qtmux->moov_recov_file) {
if (!atoms_recov_write_trak_samples (qtmux->moov_recov_file, pad->trak,
- nsamples, scaled_duration, sample_size, chunk_offset, sync, do_pts,
- pts_offset)) {
+ nsamples, (gint32) scaled_duration, sample_size, chunk_offset, sync,
+ do_pts, pts_offset)) {
GST_WARNING_OBJECT (qtmux, "Failed to write sample information to "
"recovery file, disabling recovery");
fclose (qtmux->moov_recov_file);
/* ensure that always sync samples are marked as such */
return gst_qt_mux_pad_fragment_add_buffer (qtmux, pad, last_buf,
buf == NULL, nsamples, last_dts, scaled_duration, sample_size,
- !pad->sync || sync, do_pts, pts_offset);
+ !pad->sync || sync, pts_offset);
} else {
- atom_trak_add_samples (pad->trak, nsamples, scaled_duration, sample_size,
- chunk_offset, sync, do_pts, pts_offset);
+ atom_trak_add_samples (pad->trak, nsamples, scaled_duration,
+ sample_size, chunk_offset, sync, pts_offset);
return gst_qt_mux_send_buffer (qtmux, last_buf, &qtmux->mdat_size, TRUE);
}