2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
87 } else if (num >= den) {
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
115 while (*p != NULL) p = &(*p)->next;
120 void av_register_output_format(AVOutputFormat *format)
124 while (*p != NULL) p = &(*p)->next;
129 int match_ext(const char *filename, const char *extensions)
137 ext = strrchr(filename, '.');
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
146 if (!strcasecmp(ext1, ext))
156 static int match_format(const char *name, const char *names)
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
188 /* Find the proper file type. */
192 while (fmt != NULL) {
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
202 if (score > score_max) {
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
259 /* memory handling */
261 void av_destruct_packet(AVPacket *pkt)
264 pkt->data = NULL; pkt->size = 0;
267 void av_init_packet(AVPacket *pkt)
269 pkt->pts = AV_NOPTS_VALUE;
270 pkt->dts = AV_NOPTS_VALUE;
273 pkt->convergence_duration = 0;
275 pkt->stream_index = 0;
276 pkt->destruct= av_destruct_packet_nofree;
279 int av_new_packet(AVPacket *pkt, int size)
282 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
283 return AVERROR(ENOMEM);
284 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
286 return AVERROR(ENOMEM);
287 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
292 pkt->destruct = av_destruct_packet;
296 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
298 int ret= av_new_packet(pkt, size);
303 pkt->pos= url_ftell(s);
305 ret= get_buffer(s, pkt->data, size);
314 int av_dup_packet(AVPacket *pkt)
316 if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
318 /* We duplicate the packet and don't forget to add the padding again. */
319 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
320 return AVERROR(ENOMEM);
321 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
323 return AVERROR(ENOMEM);
325 memcpy(data, pkt->data, pkt->size);
326 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
328 pkt->destruct = av_destruct_packet;
333 int av_filename_number_test(const char *filename)
336 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
339 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
341 AVInputFormat *fmt1, *fmt;
345 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
346 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
349 if (fmt1->read_probe) {
350 score = fmt1->read_probe(pd);
351 } else if (fmt1->extensions) {
352 if (match_ext(pd->filename, fmt1->extensions)) {
356 if (score > *score_max) {
359 }else if (score == *score_max)
365 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
367 return av_probe_input_format2(pd, is_opened, &score);
370 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
373 fmt = av_probe_input_format2(pd, 1, &score);
376 if (!strcmp(fmt->name, "mp3")) {
377 st->codec->codec_id = CODEC_ID_MP3;
378 st->codec->codec_type = CODEC_TYPE_AUDIO;
379 } else if (!strcmp(fmt->name, "ac3")) {
380 st->codec->codec_id = CODEC_ID_AC3;
381 st->codec->codec_type = CODEC_TYPE_AUDIO;
382 } else if (!strcmp(fmt->name, "mpegvideo")) {
383 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
384 st->codec->codec_type = CODEC_TYPE_VIDEO;
385 } else if (!strcmp(fmt->name, "m4v")) {
386 st->codec->codec_id = CODEC_ID_MPEG4;
387 st->codec->codec_type = CODEC_TYPE_VIDEO;
388 } else if (!strcmp(fmt->name, "h264")) {
389 st->codec->codec_id = CODEC_ID_H264;
390 st->codec->codec_type = CODEC_TYPE_VIDEO;
396 /************************************************************/
397 /* input media file */
400 * Open a media file from an IO stream. 'fmt' must be specified.
402 int av_open_input_stream(AVFormatContext **ic_ptr,
403 ByteIOContext *pb, const char *filename,
404 AVInputFormat *fmt, AVFormatParameters *ap)
408 AVFormatParameters default_ap;
412 memset(ap, 0, sizeof(default_ap));
415 if(!ap->prealloced_context)
416 ic = avformat_alloc_context();
420 err = AVERROR(ENOMEM);
425 ic->duration = AV_NOPTS_VALUE;
426 ic->start_time = AV_NOPTS_VALUE;
427 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
429 /* allocate private data */
430 if (fmt->priv_data_size > 0) {
431 ic->priv_data = av_mallocz(fmt->priv_data_size);
432 if (!ic->priv_data) {
433 err = AVERROR(ENOMEM);
437 ic->priv_data = NULL;
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic, ap);
446 if (pb && !ic->data_offset)
447 ic->data_offset = url_ftell(ic->pb);
449 #if LIBAVFORMAT_VERSION_MAJOR < 53
450 ff_metadata_demux_compat(ic);
458 av_freep(&ic->priv_data);
459 for(i=0;i<ic->nb_streams;i++) {
460 AVStream *st = ic->streams[i];
462 av_free(st->priv_data);
463 av_free(st->codec->extradata);
473 /** size of probe buffer, for guessing file type from file contents */
474 #define PROBE_BUF_MIN 2048
475 #define PROBE_BUF_MAX (1<<20)
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
480 AVFormatParameters *ap)
483 AVProbeData probe_data, *pd = &probe_data;
484 ByteIOContext *pb = NULL;
488 pd->filename = filename;
493 /* guess format if no file can be opened */
494 fmt = av_probe_input_format(pd, 0);
497 /* Do not open file if the format does not need it. XXX: specific
498 hack needed to handle RTSP/TCP */
499 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
500 /* if no file needed do not try to open one */
501 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
505 url_setbufsize(pb, buf_size);
508 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
509 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
510 /* read probe data */
511 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
512 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
513 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
514 if (url_fseek(pb, 0, SEEK_SET) < 0) {
516 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
522 /* guess file format */
523 fmt = av_probe_input_format2(pd, 1, &score);
528 /* if still no format found, error */
534 /* check filename in case an image number is expected */
535 if (fmt->flags & AVFMT_NEEDNUMBER) {
536 if (!av_filename_number_test(filename)) {
537 err = AVERROR_NUMEXPECTED;
541 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
554 /*******************************************************/
556 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
557 AVPacketList **plast_pktl){
558 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
563 (*plast_pktl)->next = pktl;
565 *packet_buffer = pktl;
567 /* add the packet in the buffered packet list */
573 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
579 AVPacketList *pktl = s->raw_packet_buffer;
583 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
584 s->raw_packet_buffer = pktl->next;
591 ret= s->iformat->read_packet(s, pkt);
594 st= s->streams[pkt->stream_index];
596 switch(st->codec->codec_type){
597 case CODEC_TYPE_VIDEO:
598 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
600 case CODEC_TYPE_AUDIO:
601 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
603 case CODEC_TYPE_SUBTITLE:
604 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
608 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
611 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
613 if(st->codec->codec_id == CODEC_ID_PROBE){
614 AVProbeData *pd = &st->probe_data;
616 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
617 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
618 pd->buf_size += pkt->size;
619 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
621 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
622 set_codec_from_probe_data(st, pd, 1);
623 if(st->codec->codec_id != CODEC_ID_PROBE){
632 /**********************************************************/
635 * Get the number of samples of an audio frame. Return -1 on error.
637 static int get_audio_frame_size(AVCodecContext *enc, int size)
641 if(enc->codec_id == CODEC_ID_VORBIS)
644 if (enc->frame_size <= 1) {
645 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
647 if (bits_per_sample) {
648 if (enc->channels == 0)
650 frame_size = (size << 3) / (bits_per_sample * enc->channels);
652 /* used for example by ADPCM codecs */
653 if (enc->bit_rate == 0)
655 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
658 frame_size = enc->frame_size;
665 * Return the frame duration in seconds. Return 0 if not available.
667 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
668 AVCodecParserContext *pc, AVPacket *pkt)
674 switch(st->codec->codec_type) {
675 case CODEC_TYPE_VIDEO:
676 if(st->time_base.num*1000LL > st->time_base.den){
677 *pnum = st->time_base.num;
678 *pden = st->time_base.den;
679 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
680 *pnum = st->codec->time_base.num;
681 *pden = st->codec->time_base.den;
682 if (pc && pc->repeat_pict) {
683 *pnum = (*pnum) * (1 + pc->repeat_pict);
687 case CODEC_TYPE_AUDIO:
688 frame_size = get_audio_frame_size(st->codec, pkt->size);
692 *pden = st->codec->sample_rate;
699 static int is_intra_only(AVCodecContext *enc){
700 if(enc->codec_type == CODEC_TYPE_AUDIO){
702 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
703 switch(enc->codec_id){
705 case CODEC_ID_MJPEGB:
707 case CODEC_ID_RAWVIDEO:
708 case CODEC_ID_DVVIDEO:
709 case CODEC_ID_HUFFYUV:
710 case CODEC_ID_FFVHUFF:
715 case CODEC_ID_JPEG2000:
723 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
724 int64_t dts, int64_t pts)
726 AVStream *st= s->streams[stream_index];
727 AVPacketList *pktl= s->packet_buffer;
729 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
732 st->first_dts= dts - st->cur_dts;
735 for(; pktl; pktl= pktl->next){
736 if(pktl->pkt.stream_index != stream_index)
738 //FIXME think more about this check
739 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
740 pktl->pkt.pts += st->first_dts;
742 if(pktl->pkt.dts != AV_NOPTS_VALUE)
743 pktl->pkt.dts += st->first_dts;
745 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
746 st->start_time= pktl->pkt.pts;
748 if (st->start_time == AV_NOPTS_VALUE)
749 st->start_time = pts;
752 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
754 AVPacketList *pktl= s->packet_buffer;
757 if(st->first_dts != AV_NOPTS_VALUE){
758 cur_dts= st->first_dts;
759 for(; pktl; pktl= pktl->next){
760 if(pktl->pkt.stream_index == pkt->stream_index){
761 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
763 cur_dts -= pkt->duration;
766 pktl= s->packet_buffer;
767 st->first_dts = cur_dts;
768 }else if(st->cur_dts)
771 for(; pktl; pktl= pktl->next){
772 if(pktl->pkt.stream_index != pkt->stream_index)
774 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
775 && !pktl->pkt.duration){
776 pktl->pkt.dts= cur_dts;
777 if(!st->codec->has_b_frames)
778 pktl->pkt.pts= cur_dts;
779 cur_dts += pkt->duration;
780 pktl->pkt.duration= pkt->duration;
784 if(st->first_dts == AV_NOPTS_VALUE)
785 st->cur_dts= cur_dts;
788 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
789 AVCodecParserContext *pc, AVPacket *pkt)
791 int num, den, presentation_delayed, delay, i;
794 /* do we have a video B-frame ? */
795 delay= st->codec->has_b_frames;
796 presentation_delayed = 0;
797 /* XXX: need has_b_frame, but cannot get it if the codec is
800 pc && pc->pict_type != FF_B_TYPE)
801 presentation_delayed = 1;
803 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
804 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
805 pkt->dts -= 1LL<<st->pts_wrap_bits;
808 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
809 // we take the conservative approach and discard both
810 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
811 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
812 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
813 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
816 if (pkt->duration == 0) {
817 compute_frame_duration(&num, &den, st, pc, pkt);
819 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
821 if(pkt->duration != 0 && s->packet_buffer)
822 update_initial_durations(s, st, pkt);
826 /* correct timestamps with byte offset if demuxers only have timestamps
827 on packet boundaries */
828 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
829 /* this will estimate bitrate based on this frame's duration and size */
830 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
831 if(pkt->pts != AV_NOPTS_VALUE)
833 if(pkt->dts != AV_NOPTS_VALUE)
837 if (pc && pc->dts_sync_point >= 0) {
838 // we have synchronization info from the parser
839 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
841 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
842 if (pkt->dts != AV_NOPTS_VALUE) {
843 // got DTS from the stream, update reference timestamp
844 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
845 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
846 } else if (st->reference_dts != AV_NOPTS_VALUE) {
847 // compute DTS based on reference timestamp
848 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
849 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
851 if (pc->dts_sync_point > 0)
852 st->reference_dts = pkt->dts; // new reference
856 /* This may be redundant, but it should not hurt. */
857 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
858 presentation_delayed = 1;
860 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
861 /* interpolate PTS and DTS if they are not present */
862 if(delay==0 || (delay==1 && pc)){
863 if (presentation_delayed) {
864 /* DTS = decompression timestamp */
865 /* PTS = presentation timestamp */
866 if (pkt->dts == AV_NOPTS_VALUE)
867 pkt->dts = st->last_IP_pts;
868 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
869 if (pkt->dts == AV_NOPTS_VALUE)
870 pkt->dts = st->cur_dts;
872 /* this is tricky: the dts must be incremented by the duration
873 of the frame we are displaying, i.e. the last I- or P-frame */
874 if (st->last_IP_duration == 0)
875 st->last_IP_duration = pkt->duration;
876 if(pkt->dts != AV_NOPTS_VALUE)
877 st->cur_dts = pkt->dts + st->last_IP_duration;
878 st->last_IP_duration = pkt->duration;
879 st->last_IP_pts= pkt->pts;
880 /* cannot compute PTS if not present (we can compute it only
881 by knowing the future */
882 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
883 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
884 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
885 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
886 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
887 pkt->pts += pkt->duration;
888 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
892 /* presentation is not delayed : PTS and DTS are the same */
893 if(pkt->pts == AV_NOPTS_VALUE)
895 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
896 if(pkt->pts == AV_NOPTS_VALUE)
897 pkt->pts = st->cur_dts;
899 if(pkt->pts != AV_NOPTS_VALUE)
900 st->cur_dts = pkt->pts + pkt->duration;
904 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
905 st->pts_buffer[0]= pkt->pts;
906 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
907 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
908 if(pkt->dts == AV_NOPTS_VALUE)
909 pkt->dts= st->pts_buffer[0];
911 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
913 if(pkt->dts > st->cur_dts)
914 st->cur_dts = pkt->dts;
917 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
920 if(is_intra_only(st->codec))
921 pkt->flags |= PKT_FLAG_KEY;
924 /* keyframe computation */
925 if (pc->key_frame == 1)
926 pkt->flags |= PKT_FLAG_KEY;
927 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
928 pkt->flags |= PKT_FLAG_KEY;
931 pkt->convergence_duration = pc->convergence_duration;
934 void av_destruct_packet_nofree(AVPacket *pkt)
936 pkt->data = NULL; pkt->size = 0;
939 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
947 /* select current input stream component */
950 if (!st->need_parsing || !st->parser) {
951 /* no parsing needed: we just output the packet as is */
952 /* raw data support */
953 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
954 compute_pkt_fields(s, st, NULL, pkt);
957 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
958 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
959 st->cur_ptr, st->cur_len,
960 st->cur_pkt.pts, st->cur_pkt.dts);
961 st->cur_pkt.pts = AV_NOPTS_VALUE;
962 st->cur_pkt.dts = AV_NOPTS_VALUE;
963 /* increment read pointer */
967 /* return packet if any */
969 pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close.
972 pkt->stream_index = st->index;
973 pkt->pts = st->parser->pts;
974 pkt->dts = st->parser->dts;
975 pkt->destruct = av_destruct_packet_nofree;
976 compute_pkt_fields(s, st, st->parser, pkt);
978 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
979 ff_reduce_index(s, st->index);
980 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
981 0, 0, AVINDEX_KEYFRAME);
988 av_free_packet(&st->cur_pkt);
993 /* read next packet */
994 ret = av_read_packet(s, &cur_pkt);
996 if (ret == AVERROR(EAGAIN))
998 /* return the last frames, if any */
999 for(i = 0; i < s->nb_streams; i++) {
1001 if (st->parser && st->need_parsing) {
1002 av_parser_parse(st->parser, st->codec,
1003 &pkt->data, &pkt->size,
1005 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1010 /* no more packets: really terminate parsing */
1013 st = s->streams[cur_pkt.stream_index];
1014 st->cur_pkt= cur_pkt;
1016 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1017 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1018 st->cur_pkt.pts < st->cur_pkt.dts){
1019 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1020 st->cur_pkt.stream_index,
1024 // av_free_packet(&st->cur_pkt);
1028 if(s->debug & FF_FDEBUG_TS)
1029 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1030 st->cur_pkt.stream_index,
1037 st->cur_ptr = st->cur_pkt.data;
1038 st->cur_len = st->cur_pkt.size;
1039 if (st->need_parsing && !st->parser) {
1040 st->parser = av_parser_init(st->codec->codec_id);
1042 /* no parser available: just output the raw packets */
1043 st->need_parsing = AVSTREAM_PARSE_NONE;
1044 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1045 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1047 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1048 st->parser->next_frame_offset=
1049 st->parser->cur_offset= st->cur_pkt.pos;
1054 if(s->debug & FF_FDEBUG_TS)
1055 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1065 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1069 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1072 pktl = s->packet_buffer;
1074 AVPacket *next_pkt= &pktl->pkt;
1076 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1077 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1078 if( pktl->pkt.stream_index == next_pkt->stream_index
1079 && next_pkt->dts < pktl->pkt.dts
1080 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1081 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1082 next_pkt->pts= pktl->pkt.dts;
1086 pktl = s->packet_buffer;
1089 if( next_pkt->pts != AV_NOPTS_VALUE
1090 || next_pkt->dts == AV_NOPTS_VALUE
1092 /* read packet from packet buffer, if there is data */
1094 s->packet_buffer = pktl->next;
1100 int ret= av_read_frame_internal(s, pkt);
1102 if(pktl && ret != AVERROR(EAGAIN)){
1109 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1110 &s->packet_buffer_end)) < 0)
1111 return AVERROR(ENOMEM);
1113 assert(!s->packet_buffer);
1114 return av_read_frame_internal(s, pkt);
1119 /* XXX: suppress the packet queue */
1120 static void flush_packet_queue(AVFormatContext *s)
1125 pktl = s->packet_buffer;
1128 s->packet_buffer = pktl->next;
1129 av_free_packet(&pktl->pkt);
1134 /*******************************************************/
1137 int av_find_default_stream_index(AVFormatContext *s)
1139 int first_audio_index = -1;
1143 if (s->nb_streams <= 0)
1145 for(i = 0; i < s->nb_streams; i++) {
1147 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1150 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1151 first_audio_index = i;
1153 return first_audio_index >= 0 ? first_audio_index : 0;
1157 * Flush the frame reader.
1159 static void av_read_frame_flush(AVFormatContext *s)
1164 flush_packet_queue(s);
1168 /* for each stream, reset read state */
1169 for(i = 0; i < s->nb_streams; i++) {
1173 av_parser_close(st->parser);
1175 av_free_packet(&st->cur_pkt);
1177 st->last_IP_pts = AV_NOPTS_VALUE;
1178 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1179 st->reference_dts = AV_NOPTS_VALUE;
1186 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1189 for(i = 0; i < s->nb_streams; i++) {
1190 AVStream *st = s->streams[i];
1192 st->cur_dts = av_rescale(timestamp,
1193 st->time_base.den * (int64_t)ref_st->time_base.num,
1194 st->time_base.num * (int64_t)ref_st->time_base.den);
1198 void ff_reduce_index(AVFormatContext *s, int stream_index)
1200 AVStream *st= s->streams[stream_index];
1201 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1203 if((unsigned)st->nb_index_entries >= max_entries){
1205 for(i=0; 2*i<st->nb_index_entries; i++)
1206 st->index_entries[i]= st->index_entries[2*i];
1207 st->nb_index_entries= i;
1211 int av_add_index_entry(AVStream *st,
1212 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1214 AVIndexEntry *entries, *ie;
1217 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1220 entries = av_fast_realloc(st->index_entries,
1221 &st->index_entries_allocated_size,
1222 (st->nb_index_entries + 1) *
1223 sizeof(AVIndexEntry));
1227 st->index_entries= entries;
1229 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1232 index= st->nb_index_entries++;
1233 ie= &entries[index];
1234 assert(index==0 || ie[-1].timestamp < timestamp);
1236 ie= &entries[index];
1237 if(ie->timestamp != timestamp){
1238 if(ie->timestamp <= timestamp)
1240 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1241 st->nb_index_entries++;
1242 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1243 distance= ie->min_distance;
1247 ie->timestamp = timestamp;
1248 ie->min_distance= distance;
1255 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1258 AVIndexEntry *entries= st->index_entries;
1259 int nb_entries= st->nb_index_entries;
1268 timestamp = entries[m].timestamp;
1269 if(timestamp >= wanted_timestamp)
1271 if(timestamp <= wanted_timestamp)
1274 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1276 if(!(flags & AVSEEK_FLAG_ANY)){
1277 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1278 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1289 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1290 AVInputFormat *avif= s->iformat;
1291 int64_t pos_min, pos_max, pos, pos_limit;
1292 int64_t ts_min, ts_max, ts;
1296 if (stream_index < 0)
1300 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1304 ts_min= AV_NOPTS_VALUE;
1305 pos_limit= -1; //gcc falsely says it may be uninitialized
1307 st= s->streams[stream_index];
1308 if(st->index_entries){
1311 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1312 index= FFMAX(index, 0);
1313 e= &st->index_entries[index];
1315 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1317 ts_min= e->timestamp;
1319 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1326 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1327 assert(index < st->nb_index_entries);
1329 e= &st->index_entries[index];
1330 assert(e->timestamp >= target_ts);
1332 ts_max= e->timestamp;
1333 pos_limit= pos_max - e->min_distance;
1335 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1336 pos_max,pos_limit, ts_max);
1341 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1346 url_fseek(s->pb, pos, SEEK_SET);
1348 av_update_cur_dts(s, st, ts);
1353 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1355 int64_t start_pos, filesize;
1359 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1362 if(ts_min == AV_NOPTS_VALUE){
1363 pos_min = s->data_offset;
1364 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1365 if (ts_min == AV_NOPTS_VALUE)
1369 if(ts_max == AV_NOPTS_VALUE){
1371 filesize = url_fsize(s->pb);
1372 pos_max = filesize - 1;
1375 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1377 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1378 if (ts_max == AV_NOPTS_VALUE)
1382 int64_t tmp_pos= pos_max + 1;
1383 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1384 if(tmp_ts == AV_NOPTS_VALUE)
1388 if(tmp_pos >= filesize)
1394 if(ts_min > ts_max){
1396 }else if(ts_min == ts_max){
1401 while (pos_min < pos_limit) {
1403 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1407 assert(pos_limit <= pos_max);
1410 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1411 // interpolate position (better than dichotomy)
1412 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1413 + pos_min - approximate_keyframe_distance;
1414 }else if(no_change==1){
1415 // bisection, if interpolation failed to change min or max pos last time
1416 pos = (pos_min + pos_limit)>>1;
1418 /* linear search if bisection failed, can only happen if there
1419 are very few or no keyframes between min/max */
1424 else if(pos > pos_limit)
1428 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1434 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1436 if(ts == AV_NOPTS_VALUE){
1437 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1440 assert(ts != AV_NOPTS_VALUE);
1441 if (target_ts <= ts) {
1442 pos_limit = start_pos - 1;
1446 if (target_ts >= ts) {
1452 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1453 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1456 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1458 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1459 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1460 pos, ts_min, target_ts, ts_max);
1466 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1467 int64_t pos_min, pos_max;
1471 if (stream_index < 0)
1474 st= s->streams[stream_index];
1477 pos_min = s->data_offset;
1478 pos_max = url_fsize(s->pb) - 1;
1480 if (pos < pos_min) pos= pos_min;
1481 else if(pos > pos_max) pos= pos_max;
1483 url_fseek(s->pb, pos, SEEK_SET);
1486 av_update_cur_dts(s, st, ts);
1491 static int av_seek_frame_generic(AVFormatContext *s,
1492 int stream_index, int64_t timestamp, int flags)
1498 st = s->streams[stream_index];
1500 index = av_index_search_timestamp(st, timestamp, flags);
1502 if(index < 0 || index==st->nb_index_entries-1){
1506 if(st->nb_index_entries){
1507 assert(st->index_entries);
1508 ie= &st->index_entries[st->nb_index_entries-1];
1509 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1511 av_update_cur_dts(s, st, ie->timestamp);
1513 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1519 ret = av_read_frame(s, &pkt);
1520 }while(ret == AVERROR(EAGAIN));
1523 av_free_packet(&pkt);
1524 if(stream_index == pkt.stream_index){
1525 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1529 index = av_index_search_timestamp(st, timestamp, flags);
1534 av_read_frame_flush(s);
1535 if (s->iformat->read_seek){
1536 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1539 ie = &st->index_entries[index];
1540 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1542 av_update_cur_dts(s, st, ie->timestamp);
1547 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1552 av_read_frame_flush(s);
1554 if(flags & AVSEEK_FLAG_BYTE)
1555 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1557 if(stream_index < 0){
1558 stream_index= av_find_default_stream_index(s);
1559 if(stream_index < 0)
1562 st= s->streams[stream_index];
1563 /* timestamp for default must be expressed in AV_TIME_BASE units */
1564 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1567 /* first, we try the format specific seek */
1568 if (s->iformat->read_seek)
1569 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1576 if(s->iformat->read_timestamp)
1577 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1579 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1582 /*******************************************************/
1585 * Returns TRUE if the stream has accurate duration in any stream.
1587 * @return TRUE if the stream has accurate duration for at least one component.
1589 static int av_has_duration(AVFormatContext *ic)
1594 for(i = 0;i < ic->nb_streams; i++) {
1595 st = ic->streams[i];
1596 if (st->duration != AV_NOPTS_VALUE)
1603 * Estimate the stream timings from the one of each components.
1605 * Also computes the global bitrate if possible.
1607 static void av_update_stream_timings(AVFormatContext *ic)
1609 int64_t start_time, start_time1, end_time, end_time1;
1610 int64_t duration, duration1;
1614 start_time = INT64_MAX;
1615 end_time = INT64_MIN;
1616 duration = INT64_MIN;
1617 for(i = 0;i < ic->nb_streams; i++) {
1618 st = ic->streams[i];
1619 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1620 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1621 if (start_time1 < start_time)
1622 start_time = start_time1;
1623 if (st->duration != AV_NOPTS_VALUE) {
1624 end_time1 = start_time1
1625 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1626 if (end_time1 > end_time)
1627 end_time = end_time1;
1630 if (st->duration != AV_NOPTS_VALUE) {
1631 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1632 if (duration1 > duration)
1633 duration = duration1;
1636 if (start_time != INT64_MAX) {
1637 ic->start_time = start_time;
1638 if (end_time != INT64_MIN) {
1639 if (end_time - start_time > duration)
1640 duration = end_time - start_time;
1643 if (duration != INT64_MIN) {
1644 ic->duration = duration;
1645 if (ic->file_size > 0) {
1646 /* compute the bitrate */
1647 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1648 (double)ic->duration;
1653 static void fill_all_stream_timings(AVFormatContext *ic)
1658 av_update_stream_timings(ic);
1659 for(i = 0;i < ic->nb_streams; i++) {
1660 st = ic->streams[i];
1661 if (st->start_time == AV_NOPTS_VALUE) {
1662 if(ic->start_time != AV_NOPTS_VALUE)
1663 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1664 if(ic->duration != AV_NOPTS_VALUE)
1665 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1670 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1672 int64_t filesize, duration;
1676 /* if bit_rate is already set, we believe it */
1677 if (ic->bit_rate == 0) {
1679 for(i=0;i<ic->nb_streams;i++) {
1680 st = ic->streams[i];
1681 bit_rate += st->codec->bit_rate;
1683 ic->bit_rate = bit_rate;
1686 /* if duration is already set, we believe it */
1687 if (ic->duration == AV_NOPTS_VALUE &&
1688 ic->bit_rate != 0 &&
1689 ic->file_size != 0) {
1690 filesize = ic->file_size;
1692 for(i = 0; i < ic->nb_streams; i++) {
1693 st = ic->streams[i];
1694 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1695 if (st->duration == AV_NOPTS_VALUE)
1696 st->duration = duration;
1702 #define DURATION_MAX_READ_SIZE 250000
1704 /* only usable for MPEG-PS streams */
1705 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1707 AVPacket pkt1, *pkt = &pkt1;
1709 int read_size, i, ret;
1711 int64_t filesize, offset, duration;
1715 /* flush packet queue */
1716 flush_packet_queue(ic);
1718 for(i=0;i<ic->nb_streams;i++) {
1719 st = ic->streams[i];
1721 av_parser_close(st->parser);
1723 av_free_packet(&st->cur_pkt);
1727 /* we read the first packets to get the first PTS (not fully
1728 accurate, but it is enough now) */
1729 url_fseek(ic->pb, 0, SEEK_SET);
1732 if (read_size >= DURATION_MAX_READ_SIZE)
1734 /* if all info is available, we can stop */
1735 for(i = 0;i < ic->nb_streams; i++) {
1736 st = ic->streams[i];
1737 if (st->start_time == AV_NOPTS_VALUE)
1740 if (i == ic->nb_streams)
1744 ret = av_read_packet(ic, pkt);
1745 }while(ret == AVERROR(EAGAIN));
1748 read_size += pkt->size;
1749 st = ic->streams[pkt->stream_index];
1750 if (pkt->pts != AV_NOPTS_VALUE) {
1751 if (st->start_time == AV_NOPTS_VALUE)
1752 st->start_time = pkt->pts;
1754 av_free_packet(pkt);
1757 /* estimate the end time (duration) */
1758 /* XXX: may need to support wrapping */
1759 filesize = ic->file_size;
1760 offset = filesize - DURATION_MAX_READ_SIZE;
1764 url_fseek(ic->pb, offset, SEEK_SET);
1767 if (read_size >= DURATION_MAX_READ_SIZE)
1771 ret = av_read_packet(ic, pkt);
1772 }while(ret == AVERROR(EAGAIN));
1775 read_size += pkt->size;
1776 st = ic->streams[pkt->stream_index];
1777 if (pkt->pts != AV_NOPTS_VALUE &&
1778 st->start_time != AV_NOPTS_VALUE) {
1779 end_time = pkt->pts;
1780 duration = end_time - st->start_time;
1782 if (st->duration == AV_NOPTS_VALUE ||
1783 st->duration < duration)
1784 st->duration = duration;
1787 av_free_packet(pkt);
1790 fill_all_stream_timings(ic);
1792 url_fseek(ic->pb, old_offset, SEEK_SET);
1793 for(i=0; i<ic->nb_streams; i++){
1795 st->cur_dts= st->first_dts;
1796 st->last_IP_pts = AV_NOPTS_VALUE;
1800 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1804 /* get the file size, if possible */
1805 if (ic->iformat->flags & AVFMT_NOFILE) {
1808 file_size = url_fsize(ic->pb);
1812 ic->file_size = file_size;
1814 if ((!strcmp(ic->iformat->name, "mpeg") ||
1815 !strcmp(ic->iformat->name, "mpegts")) &&
1816 file_size && !url_is_streamed(ic->pb)) {
1817 /* get accurate estimate from the PTSes */
1818 av_estimate_timings_from_pts(ic, old_offset);
1819 } else if (av_has_duration(ic)) {
1820 /* at least one component has timings - we use them for all
1822 fill_all_stream_timings(ic);
1824 /* less precise: use bitrate info */
1825 av_estimate_timings_from_bit_rate(ic);
1827 av_update_stream_timings(ic);
1833 for(i = 0;i < ic->nb_streams; i++) {
1834 st = ic->streams[i];
1835 printf("%d: start_time: %0.3f duration: %0.3f\n",
1836 i, (double)st->start_time / AV_TIME_BASE,
1837 (double)st->duration / AV_TIME_BASE);
1839 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1840 (double)ic->start_time / AV_TIME_BASE,
1841 (double)ic->duration / AV_TIME_BASE,
1842 ic->bit_rate / 1000);
1847 static int has_codec_parameters(AVCodecContext *enc)
1850 switch(enc->codec_type) {
1851 case CODEC_TYPE_AUDIO:
1852 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1853 if(!enc->frame_size &&
1854 (enc->codec_id == CODEC_ID_VORBIS ||
1855 enc->codec_id == CODEC_ID_AAC))
1858 case CODEC_TYPE_VIDEO:
1859 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1865 return enc->codec_id != CODEC_ID_NONE && val != 0;
1868 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1872 int got_picture, data_size, ret=0;
1875 if(!st->codec->codec){
1876 codec = avcodec_find_decoder(st->codec->codec_id);
1879 ret = avcodec_open(st->codec, codec);
1884 if(!has_codec_parameters(st->codec)){
1885 switch(st->codec->codec_type) {
1886 case CODEC_TYPE_VIDEO:
1887 ret = avcodec_decode_video(st->codec, &picture,
1888 &got_picture, data, size);
1890 case CODEC_TYPE_AUDIO:
1891 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1892 samples = av_malloc(data_size);
1895 ret = avcodec_decode_audio2(st->codec, samples,
1896 &data_size, data, size);
1907 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1909 while (tags->id != CODEC_ID_NONE) {
1917 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1920 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1921 if(tag == tags[i].tag)
1924 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1925 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1926 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1927 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1928 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1931 return CODEC_ID_NONE;
1934 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1937 for(i=0; tags && tags[i]; i++){
1938 int tag= codec_get_tag(tags[i], id);
1944 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1947 for(i=0; tags && tags[i]; i++){
1948 enum CodecID id= codec_get_id(tags[i], tag);
1949 if(id!=CODEC_ID_NONE) return id;
1951 return CODEC_ID_NONE;
1954 static void compute_chapters_end(AVFormatContext *s)
1958 for (i=0; i+1<s->nb_chapters; i++)
1959 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1960 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1961 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1962 s->chapters[i]->end = s->chapters[i+1]->start;
1965 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1966 assert(s->start_time != AV_NOPTS_VALUE);
1967 assert(s->duration > 0);
1968 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1970 s->chapters[i]->time_base);
1974 /* absolute maximum size we read until we abort */
1975 #define MAX_READ_SIZE 5000000
1977 #define MAX_STD_TIMEBASES (60*12+5)
1978 static int get_std_framerate(int i){
1979 if(i<60*12) return i*1001;
1980 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1984 * Is the time base unreliable.
1985 * This is a heuristic to balance between quick acceptance of the values in
1986 * the headers vs. some extra checks.
1987 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1988 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1989 * And there are "variable" fps files this needs to detect as well.
1991 static int tb_unreliable(AVCodecContext *c){
1992 if( c->time_base.den >= 101L*c->time_base.num
1993 || c->time_base.den < 5L*c->time_base.num
1994 /* || c->codec_tag == AV_RL32("DIVX")
1995 || c->codec_tag == AV_RL32("XVID")*/
1996 || c->codec_id == CODEC_ID_MPEG2VIDEO
1997 || c->codec_id == CODEC_ID_H264
2003 int av_find_stream_info(AVFormatContext *ic)
2005 int i, count, ret, read_size, j;
2007 AVPacket pkt1, *pkt;
2008 int64_t last_dts[MAX_STREAMS];
2009 int64_t duration_gcd[MAX_STREAMS]={0};
2010 int duration_count[MAX_STREAMS]={0};
2011 double (*duration_error)[MAX_STD_TIMEBASES];
2012 int64_t old_offset = url_ftell(ic->pb);
2013 int64_t codec_info_duration[MAX_STREAMS]={0};
2014 int codec_info_nb_frames[MAX_STREAMS]={0};
2016 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2017 if (!duration_error) return AVERROR(ENOMEM);
2019 for(i=0;i<ic->nb_streams;i++) {
2020 st = ic->streams[i];
2021 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2022 /* if(!st->time_base.num)
2024 if(!st->codec->time_base.num)
2025 st->codec->time_base= st->time_base;
2027 //only for the split stuff
2029 st->parser = av_parser_init(st->codec->codec_id);
2030 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2031 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2036 for(i=0;i<MAX_STREAMS;i++){
2037 last_dts[i]= AV_NOPTS_VALUE;
2043 if(url_interrupt_cb()){
2044 ret= AVERROR(EINTR);
2048 /* check if one codec still needs to be handled */
2049 for(i=0;i<ic->nb_streams;i++) {
2050 st = ic->streams[i];
2051 if (!has_codec_parameters(st->codec))
2053 /* variable fps and no guess at the real fps */
2054 if( tb_unreliable(st->codec)
2055 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2057 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2059 if(st->first_dts == AV_NOPTS_VALUE)
2062 if (i == ic->nb_streams) {
2063 /* NOTE: if the format has no header, then we need to read
2064 some packets to get most of the streams, so we cannot
2066 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2067 /* if we found the info for all the codecs, we can stop */
2072 /* we did not get all the codec info, but we read too much data */
2073 if (read_size >= MAX_READ_SIZE) {
2078 /* NOTE: a new stream can be added there if no header in file
2079 (AVFMTCTX_NOHEADER) */
2080 ret = av_read_frame_internal(ic, &pkt1);
2081 if(ret == AVERROR(EAGAIN))
2085 ret = -1; /* we could not have all the codec parameters before EOF */
2086 for(i=0;i<ic->nb_streams;i++) {
2087 st = ic->streams[i];
2088 if (!has_codec_parameters(st->codec)){
2090 avcodec_string(buf, sizeof(buf), st->codec, 0);
2091 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2099 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2100 if(av_dup_packet(pkt) < 0) {
2101 av_free(duration_error);
2102 return AVERROR(ENOMEM);
2105 read_size += pkt->size;
2107 st = ic->streams[pkt->stream_index];
2108 if(codec_info_nb_frames[st->index]>1)
2109 codec_info_duration[st->index] += pkt->duration;
2110 if (pkt->duration != 0)
2111 codec_info_nb_frames[st->index]++;
2114 int index= pkt->stream_index;
2115 int64_t last= last_dts[index];
2116 int64_t duration= pkt->dts - last;
2118 if (st->codec->ticks_per_frame == 2 &&
2120 st->parser->repeat_pict == 0)
2121 duration *= 2; // two fields are needed per frame
2123 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2124 double dur= duration * av_q2d(st->time_base);
2126 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2127 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2128 if(duration_count[index] < 2)
2129 memset(duration_error[index], 0, sizeof(*duration_error));
2130 for(i=1; i<MAX_STD_TIMEBASES; i++){
2131 int framerate= get_std_framerate(i);
2132 int ticks= lrintf(dur*framerate/(1001*12));
2133 double error= dur - ticks*1001*12/(double)framerate;
2134 duration_error[index][i] += error*error;
2136 duration_count[index]++;
2137 // ignore the first 4 values, they might have some random jitter
2138 if (duration_count[index] > 3)
2139 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2141 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2142 last_dts[pkt->stream_index]= pkt->dts;
2144 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2145 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2147 st->codec->extradata_size= i;
2148 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2149 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2150 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2154 /* if still no information, we try to open the codec and to
2155 decompress the frame. We try to avoid that in most cases as
2156 it takes longer and uses more memory. For MPEG-4, we need to
2157 decompress for QuickTime. */
2158 if (!has_codec_parameters(st->codec) /*&&
2159 (st->codec->codec_id == CODEC_ID_FLV1 ||
2160 st->codec->codec_id == CODEC_ID_H264 ||
2161 st->codec->codec_id == CODEC_ID_H263 ||
2162 st->codec->codec_id == CODEC_ID_H261 ||
2163 st->codec->codec_id == CODEC_ID_VORBIS ||
2164 st->codec->codec_id == CODEC_ID_MJPEG ||
2165 st->codec->codec_id == CODEC_ID_PNG ||
2166 st->codec->codec_id == CODEC_ID_PAM ||
2167 st->codec->codec_id == CODEC_ID_PGM ||
2168 st->codec->codec_id == CODEC_ID_PGMYUV ||
2169 st->codec->codec_id == CODEC_ID_PBM ||
2170 st->codec->codec_id == CODEC_ID_PPM ||
2171 st->codec->codec_id == CODEC_ID_SHORTEN ||
2172 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2173 try_decode_frame(st, pkt->data, pkt->size);
2175 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2181 // close codecs which were opened in try_decode_frame()
2182 for(i=0;i<ic->nb_streams;i++) {
2183 st = ic->streams[i];
2184 if(st->codec->codec)
2185 avcodec_close(st->codec);
2187 for(i=0;i<ic->nb_streams;i++) {
2188 st = ic->streams[i];
2189 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2190 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2191 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2193 // the check for tb_unreliable() is not completely correct, since this is not about handling
2194 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2195 // ipmovie.c produces.
2196 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2197 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2198 if(duration_count[i]
2199 && tb_unreliable(st->codec) /*&&
2200 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2201 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2203 double best_error= 2*av_q2d(st->time_base);
2204 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2206 for(j=1; j<MAX_STD_TIMEBASES; j++){
2207 double error= duration_error[i][j] * get_std_framerate(j);
2208 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2209 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2210 if(error < best_error){
2212 num = get_std_framerate(j);
2215 // do not increase frame rate by more than 1 % in order to match a standard rate.
2216 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2217 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2220 if (!st->r_frame_rate.num){
2221 if( st->codec->time_base.den * (int64_t)st->time_base.num
2222 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2223 st->r_frame_rate.num = st->codec->time_base.den;
2224 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2226 st->r_frame_rate.num = st->time_base.den;
2227 st->r_frame_rate.den = st->time_base.num;
2230 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2231 if(!st->codec->bits_per_coded_sample)
2232 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2236 av_estimate_timings(ic, old_offset);
2238 compute_chapters_end(ic);
2241 /* correct DTS for B-frame streams with no timestamps */
2242 for(i=0;i<ic->nb_streams;i++) {
2243 st = ic->streams[i];
2244 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2246 ppktl = &ic->packet_buffer;
2248 if(ppkt1->stream_index != i)
2250 if(ppkt1->pkt->dts < 0)
2252 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2254 ppkt1->pkt->dts -= delta;
2259 st->cur_dts -= delta;
2265 av_free(duration_error);
2270 /*******************************************************/
2272 int av_read_play(AVFormatContext *s)
2274 if (s->iformat->read_play)
2275 return s->iformat->read_play(s);
2277 return av_url_read_fpause(s->pb, 0);
2278 return AVERROR(ENOSYS);
2281 int av_read_pause(AVFormatContext *s)
2283 if (s->iformat->read_pause)
2284 return s->iformat->read_pause(s);
2286 return av_url_read_fpause(s->pb, 1);
2287 return AVERROR(ENOSYS);
2290 void av_close_input_stream(AVFormatContext *s)
2295 if (s->iformat->read_close)
2296 s->iformat->read_close(s);
2297 for(i=0;i<s->nb_streams;i++) {
2298 /* free all data in a stream component */
2301 av_parser_close(st->parser);
2302 av_free_packet(&st->cur_pkt);
2304 av_metadata_free(&st->metadata);
2305 av_free(st->index_entries);
2306 av_free(st->codec->extradata);
2308 av_free(st->filename);
2309 av_free(st->priv_data);
2312 for(i=s->nb_programs-1; i>=0; i--) {
2313 av_freep(&s->programs[i]->provider_name);
2314 av_freep(&s->programs[i]->name);
2315 av_metadata_free(&s->programs[i]->metadata);
2316 av_freep(&s->programs[i]->stream_index);
2317 av_freep(&s->programs[i]);
2319 av_freep(&s->programs);
2320 flush_packet_queue(s);
2321 av_freep(&s->priv_data);
2322 while(s->nb_chapters--) {
2323 av_free(s->chapters[s->nb_chapters]->title);
2324 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2325 av_free(s->chapters[s->nb_chapters]);
2327 av_freep(&s->chapters);
2328 av_metadata_free(&s->metadata);
2332 void av_close_input_file(AVFormatContext *s)
2334 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2335 av_close_input_stream(s);
2340 AVStream *av_new_stream(AVFormatContext *s, int id)
2345 if (s->nb_streams >= MAX_STREAMS)
2348 st = av_mallocz(sizeof(AVStream));
2352 st->codec= avcodec_alloc_context();
2354 /* no default bitrate if decoding */
2355 st->codec->bit_rate = 0;
2357 st->index = s->nb_streams;
2359 st->start_time = AV_NOPTS_VALUE;
2360 st->duration = AV_NOPTS_VALUE;
2361 /* we set the current DTS to 0 so that formats without any timestamps
2362 but durations get some timestamps, formats with some unknown
2363 timestamps have their first few packets buffered and the
2364 timestamps corrected before they are returned to the user */
2366 st->first_dts = AV_NOPTS_VALUE;
2368 /* default pts setting is MPEG-like */
2369 av_set_pts_info(st, 33, 1, 90000);
2370 st->last_IP_pts = AV_NOPTS_VALUE;
2371 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2372 st->pts_buffer[i]= AV_NOPTS_VALUE;
2373 st->reference_dts = AV_NOPTS_VALUE;
2375 st->sample_aspect_ratio = (AVRational){0,1};
2377 s->streams[s->nb_streams++] = st;
2381 AVProgram *av_new_program(AVFormatContext *ac, int id)
2383 AVProgram *program=NULL;
2387 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2390 for(i=0; i<ac->nb_programs; i++)
2391 if(ac->programs[i]->id == id)
2392 program = ac->programs[i];
2395 program = av_mallocz(sizeof(AVProgram));
2398 dynarray_add(&ac->programs, &ac->nb_programs, program);
2399 program->discard = AVDISCARD_NONE;
2406 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2408 AVChapter *chapter = NULL;
2411 for(i=0; i<s->nb_chapters; i++)
2412 if(s->chapters[i]->id == id)
2413 chapter = s->chapters[i];
2416 chapter= av_mallocz(sizeof(AVChapter));
2419 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2421 av_free(chapter->title);
2422 chapter->title = av_strdup(title);
2424 chapter->time_base= time_base;
2425 chapter->start = start;
2431 /************************************************************/
2432 /* output media file */
2434 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2438 if (s->oformat->priv_data_size > 0) {
2439 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2441 return AVERROR(ENOMEM);
2443 s->priv_data = NULL;
2445 if (s->oformat->set_parameters) {
2446 ret = s->oformat->set_parameters(s, ap);
2453 int av_write_header(AVFormatContext *s)
2458 // some sanity checks
2459 for(i=0;i<s->nb_streams;i++) {
2462 switch (st->codec->codec_type) {
2463 case CODEC_TYPE_AUDIO:
2464 if(st->codec->sample_rate<=0){
2465 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2468 if(!st->codec->block_align)
2469 st->codec->block_align = st->codec->channels *
2470 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2472 case CODEC_TYPE_VIDEO:
2473 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2474 av_log(s, AV_LOG_ERROR, "time base not set\n");
2477 if(st->codec->width<=0 || st->codec->height<=0){
2478 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2481 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2482 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2488 if(s->oformat->codec_tag){
2489 if(st->codec->codec_tag){
2491 //check that tag + id is in the table
2492 //if neither is in the table -> OK
2493 //if tag is in the table with another id -> FAIL
2494 //if id is in the table with another tag -> FAIL unless strict < ?
2496 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2499 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2500 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2501 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2504 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2505 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2507 return AVERROR(ENOMEM);
2510 #if LIBAVFORMAT_VERSION_MAJOR < 53
2511 ff_metadata_mux_compat(s);
2514 if(s->oformat->write_header){
2515 ret = s->oformat->write_header(s);
2520 /* init PTS generation */
2521 for(i=0;i<s->nb_streams;i++) {
2522 int64_t den = AV_NOPTS_VALUE;
2525 switch (st->codec->codec_type) {
2526 case CODEC_TYPE_AUDIO:
2527 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2529 case CODEC_TYPE_VIDEO:
2530 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2535 if (den != AV_NOPTS_VALUE) {
2537 return AVERROR_INVALIDDATA;
2538 av_frac_init(&st->pts, 0, 0, den);
2544 //FIXME merge with compute_pkt_fields
2545 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2546 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2547 int num, den, frame_size, i;
2549 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2551 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2554 /* duration field */
2555 if (pkt->duration == 0) {
2556 compute_frame_duration(&num, &den, st, NULL, pkt);
2558 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2562 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2565 //XXX/FIXME this is a temporary hack until all encoders output pts
2566 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2568 // pkt->pts= st->cur_dts;
2569 pkt->pts= st->pts.val;
2572 //calculate dts from pts
2573 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2574 st->pts_buffer[0]= pkt->pts;
2575 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2576 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2577 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2578 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2580 pkt->dts= st->pts_buffer[0];
2583 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2584 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2587 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2588 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2592 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2593 st->cur_dts= pkt->dts;
2594 st->pts.val= pkt->dts;
2597 switch (st->codec->codec_type) {
2598 case CODEC_TYPE_AUDIO:
2599 frame_size = get_audio_frame_size(st->codec, pkt->size);
2601 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2602 likely equal to the encoder delay, but it would be better if we
2603 had the real timestamps from the encoder */
2604 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2605 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2608 case CODEC_TYPE_VIDEO:
2609 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2617 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2619 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2621 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2624 ret= s->oformat->write_packet(s, pkt);
2626 ret= url_ferror(s->pb);
2630 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2631 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2633 AVPacketList **next_point, *this_pktl;
2635 this_pktl = av_mallocz(sizeof(AVPacketList));
2636 this_pktl->pkt= *pkt;
2637 if(pkt->destruct == av_destruct_packet)
2638 pkt->destruct= NULL; // not shared -> must keep original from being freed
2640 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2642 next_point = &s->packet_buffer;
2644 if(compare(s, &(*next_point)->pkt, pkt))
2646 next_point= &(*next_point)->next;
2648 this_pktl->next= *next_point;
2649 *next_point= this_pktl;
2652 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2654 AVStream *st = s->streams[ pkt ->stream_index];
2655 AVStream *st2= s->streams[ next->stream_index];
2656 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2657 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2659 if (pkt->dts == AV_NOPTS_VALUE)
2662 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2665 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2668 int streams[MAX_STREAMS];
2671 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2674 memset(streams, 0, sizeof(streams));
2675 pktl= s->packet_buffer;
2677 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2678 if(streams[ pktl->pkt.stream_index ] == 0)
2680 streams[ pktl->pkt.stream_index ]++;
2684 if(stream_count && (s->nb_streams == stream_count || flush)){
2685 pktl= s->packet_buffer;
2688 s->packet_buffer= pktl->next;
2692 av_init_packet(out);
2698 * Interleaves an AVPacket correctly so it can be muxed.
2699 * @param out the interleaved packet will be output here
2700 * @param in the input packet
2701 * @param flush 1 if no further packets are available as input and all
2702 * remaining packets should be output
2703 * @return 1 if a packet was output, 0 if no packet could be output,
2704 * < 0 if an error occurred
2706 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2707 if(s->oformat->interleave_packet)
2708 return s->oformat->interleave_packet(s, out, in, flush);
2710 return av_interleave_packet_per_dts(s, out, in, flush);
2713 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2714 AVStream *st= s->streams[ pkt->stream_index];
2716 //FIXME/XXX/HACK drop zero sized packets
2717 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2720 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2721 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2724 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2729 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2730 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2733 ret= s->oformat->write_packet(s, &opkt);
2735 av_free_packet(&opkt);
2740 if(url_ferror(s->pb))
2741 return url_ferror(s->pb);
2745 int av_write_trailer(AVFormatContext *s)
2751 ret= av_interleave_packet(s, &pkt, NULL, 1);
2752 if(ret<0) //FIXME cleanup needed for ret<0 ?
2757 ret= s->oformat->write_packet(s, &pkt);
2759 av_free_packet(&pkt);
2763 if(url_ferror(s->pb))
2767 if(s->oformat->write_trailer)
2768 ret = s->oformat->write_trailer(s);
2771 ret=url_ferror(s->pb);
2772 for(i=0;i<s->nb_streams;i++)
2773 av_freep(&s->streams[i]->priv_data);
2774 av_freep(&s->priv_data);
2778 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2781 AVProgram *program=NULL;
2784 for(i=0; i<ac->nb_programs; i++){
2785 if(ac->programs[i]->id != progid)
2787 program = ac->programs[i];
2788 for(j=0; j<program->nb_stream_indexes; j++)
2789 if(program->stream_index[j] == idx)
2792 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2795 program->stream_index = tmp;
2796 program->stream_index[program->nb_stream_indexes++] = idx;
2801 static void print_fps(double d, const char *postfix){
2802 uint64_t v= lrintf(d*100);
2803 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2804 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2805 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2808 /* "user interface" functions */
2809 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2812 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2813 AVStream *st = ic->streams[i];
2814 int g = av_gcd(st->time_base.num, st->time_base.den);
2815 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2816 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2817 /* the pid is an important information, so we display it */
2818 /* XXX: add a generic system */
2819 if (flags & AVFMT_SHOW_IDS)
2820 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2821 if (strlen(st->language) > 0)
2822 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2823 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2824 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2825 if (st->sample_aspect_ratio.num && // default
2826 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2827 AVRational display_aspect_ratio;
2828 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2829 st->codec->width*st->sample_aspect_ratio.num,
2830 st->codec->height*st->sample_aspect_ratio.den,
2832 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2833 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2834 display_aspect_ratio.num, display_aspect_ratio.den);
2836 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2837 if(st->r_frame_rate.den && st->r_frame_rate.num)
2838 print_fps(av_q2d(st->r_frame_rate), "tbr");
2839 if(st->time_base.den && st->time_base.num)
2840 print_fps(1/av_q2d(st->time_base), "tbn");
2841 if(st->codec->time_base.den && st->codec->time_base.num)
2842 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2844 av_log(NULL, AV_LOG_INFO, "\n");
2847 void dump_format(AVFormatContext *ic,
2854 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2855 is_output ? "Output" : "Input",
2857 is_output ? ic->oformat->name : ic->iformat->name,
2858 is_output ? "to" : "from", url);
2860 av_log(NULL, AV_LOG_INFO, " Duration: ");
2861 if (ic->duration != AV_NOPTS_VALUE) {
2862 int hours, mins, secs, us;
2863 secs = ic->duration / AV_TIME_BASE;
2864 us = ic->duration % AV_TIME_BASE;
2869 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2870 (100 * us) / AV_TIME_BASE);
2872 av_log(NULL, AV_LOG_INFO, "N/A");
2874 if (ic->start_time != AV_NOPTS_VALUE) {
2876 av_log(NULL, AV_LOG_INFO, ", start: ");
2877 secs = ic->start_time / AV_TIME_BASE;
2878 us = ic->start_time % AV_TIME_BASE;
2879 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2880 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2882 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2884 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2886 av_log(NULL, AV_LOG_INFO, "N/A");
2888 av_log(NULL, AV_LOG_INFO, "\n");
2890 if(ic->nb_programs) {
2892 for(j=0; j<ic->nb_programs; j++) {
2893 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2894 ic->programs[j]->name ? ic->programs[j]->name : "");
2895 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2896 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2899 for(i=0;i<ic->nb_streams;i++)
2900 dump_stream_format(ic, i, index, is_output);
2903 #if LIBAVFORMAT_VERSION_MAJOR < 53
2904 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2906 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2909 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2911 AVRational frame_rate;
2912 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2913 *frame_rate_num= frame_rate.num;
2914 *frame_rate_den= frame_rate.den;
2919 int64_t av_gettime(void)
2922 gettimeofday(&tv,NULL);
2923 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2926 int64_t parse_date(const char *datestr, int duration)
2932 static const char * const date_fmt[] = {
2936 static const char * const time_fmt[] = {
2946 time_t now = time(0);
2948 len = strlen(datestr);
2950 lastch = datestr[len - 1];
2953 is_utc = (lastch == 'z' || lastch == 'Z');
2955 memset(&dt, 0, sizeof(dt));
2960 if (!strncasecmp(datestr, "now", len))
2961 return (int64_t) now * 1000000;
2963 /* parse the year-month-day part */
2964 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2965 q = small_strptime(p, date_fmt[i], &dt);
2971 /* if the year-month-day part is missing, then take the
2972 * current year-month-day time */
2977 dt = *localtime(&now);
2979 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2984 if (*p == 'T' || *p == 't' || *p == ' ')
2987 /* parse the hour-minute-second part */
2988 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2989 q = small_strptime(p, time_fmt[i], &dt);
2995 /* parse datestr as a duration */
3000 /* parse datestr as HH:MM:SS */
3001 q = small_strptime(p, time_fmt[0], &dt);
3003 /* parse datestr as S+ */
3004 dt.tm_sec = strtol(p, (char **)&q, 10);
3006 /* the parsing didn't succeed */
3013 /* Now we have all the fields that we can get */
3019 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3021 dt.tm_isdst = -1; /* unknown */
3031 /* parse the .m... part */
3035 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3038 val += n * (*q - '0');
3042 return negative ? -t : t;
3045 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3055 while (*p != '\0' && *p != '=' && *p != '&') {
3056 if ((q - tag) < sizeof(tag) - 1)
3064 while (*p != '&' && *p != '\0') {
3065 if ((q - arg) < arg_size - 1) {
3075 if (!strcmp(tag, tag1))
3084 int av_get_frame_filename(char *buf, int buf_size,
3085 const char *path, int number)
3088 char *q, buf1[20], c;
3089 int nd, len, percentd_found;
3101 while (isdigit(*p)) {
3102 nd = nd * 10 + *p++ - '0';
3105 } while (isdigit(c));
3114 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3116 if ((q - buf + len) > buf_size - 1)
3118 memcpy(q, buf1, len);
3126 if ((q - buf) < buf_size - 1)
3130 if (!percentd_found)
3139 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3142 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3144 for(i=0;i<size;i+=16) {
3151 PRINT(" %02x", buf[i+j]);
3156 for(j=0;j<len;j++) {
3158 if (c < ' ' || c > '~')
3167 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3169 hex_dump_internal(NULL, f, 0, buf, size);
3172 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3174 hex_dump_internal(avcl, NULL, level, buf, size);
3177 //FIXME needs to know the time_base
3178 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3180 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3181 PRINT("stream #%d:\n", pkt->stream_index);
3182 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3183 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3184 /* DTS is _always_ valid after av_read_frame() */
3186 if (pkt->dts == AV_NOPTS_VALUE)
3189 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3190 /* PTS may not be known if B-frames are present. */
3192 if (pkt->pts == AV_NOPTS_VALUE)
3195 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3197 PRINT(" size=%d\n", pkt->size);
3200 av_hex_dump(f, pkt->data, pkt->size);
3203 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3205 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3208 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3210 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3213 void url_split(char *proto, int proto_size,
3214 char *authorization, int authorization_size,
3215 char *hostname, int hostname_size,
3217 char *path, int path_size,
3220 const char *p, *ls, *at, *col, *brk;
3222 if (port_ptr) *port_ptr = -1;
3223 if (proto_size > 0) proto[0] = 0;
3224 if (authorization_size > 0) authorization[0] = 0;
3225 if (hostname_size > 0) hostname[0] = 0;
3226 if (path_size > 0) path[0] = 0;
3228 /* parse protocol */
3229 if ((p = strchr(url, ':'))) {
3230 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3235 /* no protocol means plain filename */
3236 av_strlcpy(path, url, path_size);
3240 /* separate path from hostname */
3241 ls = strchr(p, '/');
3243 ls = strchr(p, '?');
3245 av_strlcpy(path, ls, path_size);
3247 ls = &p[strlen(p)]; // XXX
3249 /* the rest is hostname, use that to parse auth/port */
3251 /* authorization (user[:pass]@hostname) */
3252 if ((at = strchr(p, '@')) && at < ls) {
3253 av_strlcpy(authorization, p,
3254 FFMIN(authorization_size, at + 1 - p));
3255 p = at + 1; /* skip '@' */
3258 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3260 av_strlcpy(hostname, p + 1,
3261 FFMIN(hostname_size, brk - p));
3262 if (brk[1] == ':' && port_ptr)
3263 *port_ptr = atoi(brk + 2);
3264 } else if ((col = strchr(p, ':')) && col < ls) {
3265 av_strlcpy(hostname, p,
3266 FFMIN(col + 1 - p, hostname_size));
3267 if (port_ptr) *port_ptr = atoi(col + 1);
3269 av_strlcpy(hostname, p,
3270 FFMIN(ls + 1 - p, hostname_size));
3274 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3277 static const char hex_table[16] = { '0', '1', '2', '3',
3280 'C', 'D', 'E', 'F' };
3282 for(i = 0; i < s; i++) {
3283 buff[i * 2] = hex_table[src[i] >> 4];
3284 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3290 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3291 int pts_num, int pts_den)
3293 unsigned int gcd= av_gcd(pts_num, pts_den);
3294 s->pts_wrap_bits = pts_wrap_bits;
3295 s->time_base.num = pts_num/gcd;
3296 s->time_base.den = pts_den/gcd;
3299 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);