2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
87 } else if (num >= den) {
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
115 while (*p != NULL) p = &(*p)->next;
120 void av_register_output_format(AVOutputFormat *format)
124 while (*p != NULL) p = &(*p)->next;
129 int match_ext(const char *filename, const char *extensions)
137 ext = strrchr(filename, '.');
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
146 if (!strcasecmp(ext1, ext))
156 static int match_format(const char *name, const char *names)
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
188 /* Find the proper file type. */
192 while (fmt != NULL) {
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
202 if (score > score_max) {
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
259 /* memory handling */
261 void av_destruct_packet(AVPacket *pkt)
264 pkt->data = NULL; pkt->size = 0;
267 void av_init_packet(AVPacket *pkt)
269 pkt->pts = AV_NOPTS_VALUE;
270 pkt->dts = AV_NOPTS_VALUE;
273 pkt->convergence_duration = 0;
275 pkt->stream_index = 0;
276 pkt->destruct= av_destruct_packet_nofree;
279 int av_new_packet(AVPacket *pkt, int size)
282 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
283 return AVERROR(ENOMEM);
284 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
286 return AVERROR(ENOMEM);
287 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
292 pkt->destruct = av_destruct_packet;
296 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
298 int ret= av_new_packet(pkt, size);
303 pkt->pos= url_ftell(s);
305 ret= get_buffer(s, pkt->data, size);
314 int av_dup_packet(AVPacket *pkt)
316 if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
318 /* We duplicate the packet and don't forget to add the padding again. */
319 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
320 return AVERROR(ENOMEM);
321 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
323 return AVERROR(ENOMEM);
325 memcpy(data, pkt->data, pkt->size);
326 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
328 pkt->destruct = av_destruct_packet;
333 int av_filename_number_test(const char *filename)
336 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
339 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
341 AVInputFormat *fmt1, *fmt;
345 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
346 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
349 if (fmt1->read_probe) {
350 score = fmt1->read_probe(pd);
351 } else if (fmt1->extensions) {
352 if (match_ext(pd->filename, fmt1->extensions)) {
356 if (score > *score_max) {
359 }else if (score == *score_max)
365 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
367 return av_probe_input_format2(pd, is_opened, &score);
370 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
373 fmt = av_probe_input_format2(pd, 1, &score);
376 if (!strcmp(fmt->name, "mp3")) {
377 st->codec->codec_id = CODEC_ID_MP3;
378 st->codec->codec_type = CODEC_TYPE_AUDIO;
379 } else if (!strcmp(fmt->name, "ac3")) {
380 st->codec->codec_id = CODEC_ID_AC3;
381 st->codec->codec_type = CODEC_TYPE_AUDIO;
382 } else if (!strcmp(fmt->name, "mpegvideo")) {
383 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
384 st->codec->codec_type = CODEC_TYPE_VIDEO;
385 } else if (!strcmp(fmt->name, "m4v")) {
386 st->codec->codec_id = CODEC_ID_MPEG4;
387 st->codec->codec_type = CODEC_TYPE_VIDEO;
388 } else if (!strcmp(fmt->name, "h264")) {
389 st->codec->codec_id = CODEC_ID_H264;
390 st->codec->codec_type = CODEC_TYPE_VIDEO;
396 /************************************************************/
397 /* input media file */
400 * Open a media file from an IO stream. 'fmt' must be specified.
402 int av_open_input_stream(AVFormatContext **ic_ptr,
403 ByteIOContext *pb, const char *filename,
404 AVInputFormat *fmt, AVFormatParameters *ap)
408 AVFormatParameters default_ap;
412 memset(ap, 0, sizeof(default_ap));
415 if(!ap->prealloced_context)
416 ic = avformat_alloc_context();
420 err = AVERROR(ENOMEM);
425 ic->duration = AV_NOPTS_VALUE;
426 ic->start_time = AV_NOPTS_VALUE;
427 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
429 /* allocate private data */
430 if (fmt->priv_data_size > 0) {
431 ic->priv_data = av_mallocz(fmt->priv_data_size);
432 if (!ic->priv_data) {
433 err = AVERROR(ENOMEM);
437 ic->priv_data = NULL;
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic, ap);
446 if (pb && !ic->data_offset)
447 ic->data_offset = url_ftell(ic->pb);
449 #if LIBAVFORMAT_VERSION_MAJOR < 53
450 ff_metadata_demux_compat(ic);
458 av_freep(&ic->priv_data);
459 for(i=0;i<ic->nb_streams;i++) {
460 AVStream *st = ic->streams[i];
462 av_free(st->priv_data);
463 av_free(st->codec->extradata);
473 /** size of probe buffer, for guessing file type from file contents */
474 #define PROBE_BUF_MIN 2048
475 #define PROBE_BUF_MAX (1<<20)
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
480 AVFormatParameters *ap)
483 AVProbeData probe_data, *pd = &probe_data;
484 ByteIOContext *pb = NULL;
488 pd->filename = filename;
493 /* guess format if no file can be opened */
494 fmt = av_probe_input_format(pd, 0);
497 /* Do not open file if the format does not need it. XXX: specific
498 hack needed to handle RTSP/TCP */
499 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
500 /* if no file needed do not try to open one */
501 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
505 url_setbufsize(pb, buf_size);
508 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
509 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
510 /* read probe data */
511 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
512 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
513 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
514 if (url_fseek(pb, 0, SEEK_SET) < 0) {
516 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
522 /* guess file format */
523 fmt = av_probe_input_format2(pd, 1, &score);
528 /* if still no format found, error */
534 /* check filename in case an image number is expected */
535 if (fmt->flags & AVFMT_NEEDNUMBER) {
536 if (!av_filename_number_test(filename)) {
537 err = AVERROR_NUMEXPECTED;
541 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
554 /*******************************************************/
556 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
557 AVPacketList **plast_pktl){
558 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
563 (*plast_pktl)->next = pktl;
565 *packet_buffer = pktl;
567 /* add the packet in the buffered packet list */
573 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
579 AVPacketList *pktl = s->raw_packet_buffer;
583 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
584 s->raw_packet_buffer = pktl->next;
591 ret= s->iformat->read_packet(s, pkt);
594 st= s->streams[pkt->stream_index];
596 switch(st->codec->codec_type){
597 case CODEC_TYPE_VIDEO:
598 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
600 case CODEC_TYPE_AUDIO:
601 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
603 case CODEC_TYPE_SUBTITLE:
604 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
608 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
611 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
613 if(st->codec->codec_id == CODEC_ID_PROBE){
614 AVProbeData *pd = &st->probe_data;
616 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
617 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
618 pd->buf_size += pkt->size;
619 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
621 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
622 set_codec_from_probe_data(st, pd, 1);
623 if(st->codec->codec_id != CODEC_ID_PROBE){
632 /**********************************************************/
635 * Get the number of samples of an audio frame. Return -1 on error.
637 static int get_audio_frame_size(AVCodecContext *enc, int size)
641 if(enc->codec_id == CODEC_ID_VORBIS)
644 if (enc->frame_size <= 1) {
645 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
647 if (bits_per_sample) {
648 if (enc->channels == 0)
650 frame_size = (size << 3) / (bits_per_sample * enc->channels);
652 /* used for example by ADPCM codecs */
653 if (enc->bit_rate == 0)
655 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
658 frame_size = enc->frame_size;
665 * Return the frame duration in seconds. Return 0 if not available.
667 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
668 AVCodecParserContext *pc, AVPacket *pkt)
674 switch(st->codec->codec_type) {
675 case CODEC_TYPE_VIDEO:
676 if(st->time_base.num*1000LL > st->time_base.den){
677 *pnum = st->time_base.num;
678 *pden = st->time_base.den;
679 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
680 *pnum = st->codec->time_base.num;
681 *pden = st->codec->time_base.den;
682 if (pc && pc->repeat_pict) {
683 // NOTE: repeat_pict can be also -1 for half-frame durations,
684 // e.g., in H.264 interlaced field picture stream
686 *pnum = (*pnum) * (2 + pc->repeat_pict);
690 case CODEC_TYPE_AUDIO:
691 frame_size = get_audio_frame_size(st->codec, pkt->size);
695 *pden = st->codec->sample_rate;
702 static int is_intra_only(AVCodecContext *enc){
703 if(enc->codec_type == CODEC_TYPE_AUDIO){
705 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
706 switch(enc->codec_id){
708 case CODEC_ID_MJPEGB:
710 case CODEC_ID_RAWVIDEO:
711 case CODEC_ID_DVVIDEO:
712 case CODEC_ID_HUFFYUV:
713 case CODEC_ID_FFVHUFF:
718 case CODEC_ID_JPEG2000:
726 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
727 int64_t dts, int64_t pts)
729 AVStream *st= s->streams[stream_index];
730 AVPacketList *pktl= s->packet_buffer;
732 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
735 st->first_dts= dts - st->cur_dts;
738 for(; pktl; pktl= pktl->next){
739 if(pktl->pkt.stream_index != stream_index)
741 //FIXME think more about this check
742 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
743 pktl->pkt.pts += st->first_dts;
745 if(pktl->pkt.dts != AV_NOPTS_VALUE)
746 pktl->pkt.dts += st->first_dts;
748 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
749 st->start_time= pktl->pkt.pts;
751 if (st->start_time == AV_NOPTS_VALUE)
752 st->start_time = pts;
755 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
757 AVPacketList *pktl= s->packet_buffer;
760 if(st->first_dts != AV_NOPTS_VALUE){
761 cur_dts= st->first_dts;
762 for(; pktl; pktl= pktl->next){
763 if(pktl->pkt.stream_index == pkt->stream_index){
764 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
766 cur_dts -= pkt->duration;
769 pktl= s->packet_buffer;
770 st->first_dts = cur_dts;
771 }else if(st->cur_dts)
774 for(; pktl; pktl= pktl->next){
775 if(pktl->pkt.stream_index != pkt->stream_index)
777 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
778 && !pktl->pkt.duration){
779 pktl->pkt.dts= cur_dts;
780 if(!st->codec->has_b_frames)
781 pktl->pkt.pts= cur_dts;
782 cur_dts += pkt->duration;
783 pktl->pkt.duration= pkt->duration;
787 if(st->first_dts == AV_NOPTS_VALUE)
788 st->cur_dts= cur_dts;
791 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
792 AVCodecParserContext *pc, AVPacket *pkt)
794 int num, den, presentation_delayed, delay, i;
797 /* do we have a video B-frame ? */
798 delay= st->codec->has_b_frames;
799 presentation_delayed = 0;
800 /* XXX: need has_b_frame, but cannot get it if the codec is
803 pc && pc->pict_type != FF_B_TYPE)
804 presentation_delayed = 1;
806 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
807 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
808 pkt->dts -= 1LL<<st->pts_wrap_bits;
811 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
812 // we take the conservative approach and discard both
813 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
814 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
815 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
816 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
819 if (pkt->duration == 0) {
820 compute_frame_duration(&num, &den, st, pc, pkt);
822 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
824 if(pkt->duration != 0 && s->packet_buffer)
825 update_initial_durations(s, st, pkt);
829 /* correct timestamps with byte offset if demuxers only have timestamps
830 on packet boundaries */
831 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
832 /* this will estimate bitrate based on this frame's duration and size */
833 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
834 if(pkt->pts != AV_NOPTS_VALUE)
836 if(pkt->dts != AV_NOPTS_VALUE)
840 /* This may be redundant, but it should not hurt. */
841 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
842 presentation_delayed = 1;
844 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
845 /* interpolate PTS and DTS if they are not present */
846 if(delay==0 || (delay==1 && pc)){
847 if (presentation_delayed) {
848 /* DTS = decompression timestamp */
849 /* PTS = presentation timestamp */
850 if (pkt->dts == AV_NOPTS_VALUE)
851 pkt->dts = st->last_IP_pts;
852 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
853 if (pkt->dts == AV_NOPTS_VALUE)
854 pkt->dts = st->cur_dts;
856 /* this is tricky: the dts must be incremented by the duration
857 of the frame we are displaying, i.e. the last I- or P-frame */
858 if (st->last_IP_duration == 0)
859 st->last_IP_duration = pkt->duration;
860 if(pkt->dts != AV_NOPTS_VALUE)
861 st->cur_dts = pkt->dts + st->last_IP_duration;
862 st->last_IP_duration = pkt->duration;
863 st->last_IP_pts= pkt->pts;
864 /* cannot compute PTS if not present (we can compute it only
865 by knowing the future */
866 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
867 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
868 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
869 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
870 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
871 pkt->pts += pkt->duration;
872 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
876 /* presentation is not delayed : PTS and DTS are the same */
877 if(pkt->pts == AV_NOPTS_VALUE)
879 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
880 if(pkt->pts == AV_NOPTS_VALUE)
881 pkt->pts = st->cur_dts;
883 if(pkt->pts != AV_NOPTS_VALUE)
884 st->cur_dts = pkt->pts + pkt->duration;
888 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
889 st->pts_buffer[0]= pkt->pts;
890 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
891 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
892 if(pkt->dts == AV_NOPTS_VALUE)
893 pkt->dts= st->pts_buffer[0];
895 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
897 if(pkt->dts > st->cur_dts)
898 st->cur_dts = pkt->dts;
901 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
904 if(is_intra_only(st->codec))
905 pkt->flags |= PKT_FLAG_KEY;
908 /* keyframe computation */
909 if (pc->key_frame == 1)
910 pkt->flags |= PKT_FLAG_KEY;
911 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
912 pkt->flags |= PKT_FLAG_KEY;
915 pkt->convergence_duration = pc->convergence_duration;
918 void av_destruct_packet_nofree(AVPacket *pkt)
920 pkt->data = NULL; pkt->size = 0;
923 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
931 /* select current input stream component */
934 if (!st->need_parsing || !st->parser) {
935 /* no parsing needed: we just output the packet as is */
936 /* raw data support */
937 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
938 compute_pkt_fields(s, st, NULL, pkt);
941 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
942 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
943 st->cur_ptr, st->cur_len,
944 st->cur_pkt.pts, st->cur_pkt.dts);
945 st->cur_pkt.pts = AV_NOPTS_VALUE;
946 st->cur_pkt.dts = AV_NOPTS_VALUE;
947 /* increment read pointer */
951 /* return packet if any */
953 pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close.
956 pkt->stream_index = st->index;
957 pkt->pts = st->parser->pts;
958 pkt->dts = st->parser->dts;
959 pkt->destruct = av_destruct_packet_nofree;
960 compute_pkt_fields(s, st, st->parser, pkt);
962 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
963 ff_reduce_index(s, st->index);
964 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
965 0, 0, AVINDEX_KEYFRAME);
972 av_free_packet(&st->cur_pkt);
977 /* read next packet */
978 ret = av_read_packet(s, &cur_pkt);
980 if (ret == AVERROR(EAGAIN))
982 /* return the last frames, if any */
983 for(i = 0; i < s->nb_streams; i++) {
985 if (st->parser && st->need_parsing) {
986 av_parser_parse(st->parser, st->codec,
987 &pkt->data, &pkt->size,
989 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
994 /* no more packets: really terminate parsing */
997 st = s->streams[cur_pkt.stream_index];
998 st->cur_pkt= cur_pkt;
1000 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1001 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1002 st->cur_pkt.pts < st->cur_pkt.dts){
1003 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1004 st->cur_pkt.stream_index,
1008 // av_free_packet(&st->cur_pkt);
1012 if(s->debug & FF_FDEBUG_TS)
1013 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1014 st->cur_pkt.stream_index,
1021 st->cur_ptr = st->cur_pkt.data;
1022 st->cur_len = st->cur_pkt.size;
1023 if (st->need_parsing && !st->parser) {
1024 st->parser = av_parser_init(st->codec->codec_id);
1026 /* no parser available: just output the raw packets */
1027 st->need_parsing = AVSTREAM_PARSE_NONE;
1028 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1029 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1031 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1032 st->parser->next_frame_offset=
1033 st->parser->cur_offset= st->cur_pkt.pos;
1038 if(s->debug & FF_FDEBUG_TS)
1039 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1049 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1053 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1056 pktl = s->packet_buffer;
1058 AVPacket *next_pkt= &pktl->pkt;
1060 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1061 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1062 if( pktl->pkt.stream_index == next_pkt->stream_index
1063 && next_pkt->dts < pktl->pkt.dts
1064 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1065 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1066 next_pkt->pts= pktl->pkt.dts;
1070 pktl = s->packet_buffer;
1073 if( next_pkt->pts != AV_NOPTS_VALUE
1074 || next_pkt->dts == AV_NOPTS_VALUE
1076 /* read packet from packet buffer, if there is data */
1078 s->packet_buffer = pktl->next;
1084 int ret= av_read_frame_internal(s, pkt);
1086 if(pktl && ret != AVERROR(EAGAIN)){
1093 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1094 &s->packet_buffer_end)) < 0)
1095 return AVERROR(ENOMEM);
1097 assert(!s->packet_buffer);
1098 return av_read_frame_internal(s, pkt);
1103 /* XXX: suppress the packet queue */
1104 static void flush_packet_queue(AVFormatContext *s)
1109 pktl = s->packet_buffer;
1112 s->packet_buffer = pktl->next;
1113 av_free_packet(&pktl->pkt);
1118 /*******************************************************/
1121 int av_find_default_stream_index(AVFormatContext *s)
1123 int first_audio_index = -1;
1127 if (s->nb_streams <= 0)
1129 for(i = 0; i < s->nb_streams; i++) {
1131 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1134 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1135 first_audio_index = i;
1137 return first_audio_index >= 0 ? first_audio_index : 0;
1141 * Flush the frame reader.
1143 static void av_read_frame_flush(AVFormatContext *s)
1148 flush_packet_queue(s);
1152 /* for each stream, reset read state */
1153 for(i = 0; i < s->nb_streams; i++) {
1157 av_parser_close(st->parser);
1159 av_free_packet(&st->cur_pkt);
1161 st->last_IP_pts = AV_NOPTS_VALUE;
1162 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1169 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1172 for(i = 0; i < s->nb_streams; i++) {
1173 AVStream *st = s->streams[i];
1175 st->cur_dts = av_rescale(timestamp,
1176 st->time_base.den * (int64_t)ref_st->time_base.num,
1177 st->time_base.num * (int64_t)ref_st->time_base.den);
1181 void ff_reduce_index(AVFormatContext *s, int stream_index)
1183 AVStream *st= s->streams[stream_index];
1184 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1186 if((unsigned)st->nb_index_entries >= max_entries){
1188 for(i=0; 2*i<st->nb_index_entries; i++)
1189 st->index_entries[i]= st->index_entries[2*i];
1190 st->nb_index_entries= i;
1194 int av_add_index_entry(AVStream *st,
1195 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1197 AVIndexEntry *entries, *ie;
1200 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1203 entries = av_fast_realloc(st->index_entries,
1204 &st->index_entries_allocated_size,
1205 (st->nb_index_entries + 1) *
1206 sizeof(AVIndexEntry));
1210 st->index_entries= entries;
1212 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1215 index= st->nb_index_entries++;
1216 ie= &entries[index];
1217 assert(index==0 || ie[-1].timestamp < timestamp);
1219 ie= &entries[index];
1220 if(ie->timestamp != timestamp){
1221 if(ie->timestamp <= timestamp)
1223 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1224 st->nb_index_entries++;
1225 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1226 distance= ie->min_distance;
1230 ie->timestamp = timestamp;
1231 ie->min_distance= distance;
1238 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1241 AVIndexEntry *entries= st->index_entries;
1242 int nb_entries= st->nb_index_entries;
1251 timestamp = entries[m].timestamp;
1252 if(timestamp >= wanted_timestamp)
1254 if(timestamp <= wanted_timestamp)
1257 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1259 if(!(flags & AVSEEK_FLAG_ANY)){
1260 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1261 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1272 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1273 AVInputFormat *avif= s->iformat;
1274 int64_t pos_min, pos_max, pos, pos_limit;
1275 int64_t ts_min, ts_max, ts;
1279 if (stream_index < 0)
1283 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1287 ts_min= AV_NOPTS_VALUE;
1288 pos_limit= -1; //gcc falsely says it may be uninitialized
1290 st= s->streams[stream_index];
1291 if(st->index_entries){
1294 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1295 index= FFMAX(index, 0);
1296 e= &st->index_entries[index];
1298 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1300 ts_min= e->timestamp;
1302 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1309 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1310 assert(index < st->nb_index_entries);
1312 e= &st->index_entries[index];
1313 assert(e->timestamp >= target_ts);
1315 ts_max= e->timestamp;
1316 pos_limit= pos_max - e->min_distance;
1318 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1319 pos_max,pos_limit, ts_max);
1324 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1329 url_fseek(s->pb, pos, SEEK_SET);
1331 av_update_cur_dts(s, st, ts);
1336 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1338 int64_t start_pos, filesize;
1342 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1345 if(ts_min == AV_NOPTS_VALUE){
1346 pos_min = s->data_offset;
1347 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1348 if (ts_min == AV_NOPTS_VALUE)
1352 if(ts_max == AV_NOPTS_VALUE){
1354 filesize = url_fsize(s->pb);
1355 pos_max = filesize - 1;
1358 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1360 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1361 if (ts_max == AV_NOPTS_VALUE)
1365 int64_t tmp_pos= pos_max + 1;
1366 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1367 if(tmp_ts == AV_NOPTS_VALUE)
1371 if(tmp_pos >= filesize)
1377 if(ts_min > ts_max){
1379 }else if(ts_min == ts_max){
1384 while (pos_min < pos_limit) {
1386 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1390 assert(pos_limit <= pos_max);
1393 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1394 // interpolate position (better than dichotomy)
1395 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1396 + pos_min - approximate_keyframe_distance;
1397 }else if(no_change==1){
1398 // bisection, if interpolation failed to change min or max pos last time
1399 pos = (pos_min + pos_limit)>>1;
1401 /* linear search if bisection failed, can only happen if there
1402 are very few or no keyframes between min/max */
1407 else if(pos > pos_limit)
1411 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1417 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1419 if(ts == AV_NOPTS_VALUE){
1420 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1423 assert(ts != AV_NOPTS_VALUE);
1424 if (target_ts <= ts) {
1425 pos_limit = start_pos - 1;
1429 if (target_ts >= ts) {
1435 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1436 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1439 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1441 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1442 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1443 pos, ts_min, target_ts, ts_max);
1449 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1450 int64_t pos_min, pos_max;
1454 if (stream_index < 0)
1457 st= s->streams[stream_index];
1460 pos_min = s->data_offset;
1461 pos_max = url_fsize(s->pb) - 1;
1463 if (pos < pos_min) pos= pos_min;
1464 else if(pos > pos_max) pos= pos_max;
1466 url_fseek(s->pb, pos, SEEK_SET);
1469 av_update_cur_dts(s, st, ts);
1474 static int av_seek_frame_generic(AVFormatContext *s,
1475 int stream_index, int64_t timestamp, int flags)
1481 st = s->streams[stream_index];
1483 index = av_index_search_timestamp(st, timestamp, flags);
1485 if(index < 0 || index==st->nb_index_entries-1){
1489 if(st->nb_index_entries){
1490 assert(st->index_entries);
1491 ie= &st->index_entries[st->nb_index_entries-1];
1492 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1494 av_update_cur_dts(s, st, ie->timestamp);
1496 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1502 ret = av_read_frame(s, &pkt);
1503 }while(ret == AVERROR(EAGAIN));
1506 av_free_packet(&pkt);
1507 if(stream_index == pkt.stream_index){
1508 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1512 index = av_index_search_timestamp(st, timestamp, flags);
1517 av_read_frame_flush(s);
1518 if (s->iformat->read_seek){
1519 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1522 ie = &st->index_entries[index];
1523 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1525 av_update_cur_dts(s, st, ie->timestamp);
1530 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1535 av_read_frame_flush(s);
1537 if(flags & AVSEEK_FLAG_BYTE)
1538 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1540 if(stream_index < 0){
1541 stream_index= av_find_default_stream_index(s);
1542 if(stream_index < 0)
1545 st= s->streams[stream_index];
1546 /* timestamp for default must be expressed in AV_TIME_BASE units */
1547 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1550 /* first, we try the format specific seek */
1551 if (s->iformat->read_seek)
1552 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1559 if(s->iformat->read_timestamp)
1560 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1562 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1565 /*******************************************************/
1568 * Returns TRUE if the stream has accurate duration in any stream.
1570 * @return TRUE if the stream has accurate duration for at least one component.
1572 static int av_has_duration(AVFormatContext *ic)
1577 for(i = 0;i < ic->nb_streams; i++) {
1578 st = ic->streams[i];
1579 if (st->duration != AV_NOPTS_VALUE)
1586 * Estimate the stream timings from the one of each components.
1588 * Also computes the global bitrate if possible.
1590 static void av_update_stream_timings(AVFormatContext *ic)
1592 int64_t start_time, start_time1, end_time, end_time1;
1593 int64_t duration, duration1;
1597 start_time = INT64_MAX;
1598 end_time = INT64_MIN;
1599 duration = INT64_MIN;
1600 for(i = 0;i < ic->nb_streams; i++) {
1601 st = ic->streams[i];
1602 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1603 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1604 if (start_time1 < start_time)
1605 start_time = start_time1;
1606 if (st->duration != AV_NOPTS_VALUE) {
1607 end_time1 = start_time1
1608 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1609 if (end_time1 > end_time)
1610 end_time = end_time1;
1613 if (st->duration != AV_NOPTS_VALUE) {
1614 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1615 if (duration1 > duration)
1616 duration = duration1;
1619 if (start_time != INT64_MAX) {
1620 ic->start_time = start_time;
1621 if (end_time != INT64_MIN) {
1622 if (end_time - start_time > duration)
1623 duration = end_time - start_time;
1626 if (duration != INT64_MIN) {
1627 ic->duration = duration;
1628 if (ic->file_size > 0) {
1629 /* compute the bitrate */
1630 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1631 (double)ic->duration;
1636 static void fill_all_stream_timings(AVFormatContext *ic)
1641 av_update_stream_timings(ic);
1642 for(i = 0;i < ic->nb_streams; i++) {
1643 st = ic->streams[i];
1644 if (st->start_time == AV_NOPTS_VALUE) {
1645 if(ic->start_time != AV_NOPTS_VALUE)
1646 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1647 if(ic->duration != AV_NOPTS_VALUE)
1648 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1653 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1655 int64_t filesize, duration;
1659 /* if bit_rate is already set, we believe it */
1660 if (ic->bit_rate == 0) {
1662 for(i=0;i<ic->nb_streams;i++) {
1663 st = ic->streams[i];
1664 bit_rate += st->codec->bit_rate;
1666 ic->bit_rate = bit_rate;
1669 /* if duration is already set, we believe it */
1670 if (ic->duration == AV_NOPTS_VALUE &&
1671 ic->bit_rate != 0 &&
1672 ic->file_size != 0) {
1673 filesize = ic->file_size;
1675 for(i = 0; i < ic->nb_streams; i++) {
1676 st = ic->streams[i];
1677 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1678 if (st->duration == AV_NOPTS_VALUE)
1679 st->duration = duration;
1685 #define DURATION_MAX_READ_SIZE 250000
1687 /* only usable for MPEG-PS streams */
1688 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1690 AVPacket pkt1, *pkt = &pkt1;
1692 int read_size, i, ret;
1694 int64_t filesize, offset, duration;
1698 /* flush packet queue */
1699 flush_packet_queue(ic);
1701 for(i=0;i<ic->nb_streams;i++) {
1702 st = ic->streams[i];
1704 av_parser_close(st->parser);
1706 av_free_packet(&st->cur_pkt);
1710 /* we read the first packets to get the first PTS (not fully
1711 accurate, but it is enough now) */
1712 url_fseek(ic->pb, 0, SEEK_SET);
1715 if (read_size >= DURATION_MAX_READ_SIZE)
1717 /* if all info is available, we can stop */
1718 for(i = 0;i < ic->nb_streams; i++) {
1719 st = ic->streams[i];
1720 if (st->start_time == AV_NOPTS_VALUE)
1723 if (i == ic->nb_streams)
1727 ret = av_read_packet(ic, pkt);
1728 }while(ret == AVERROR(EAGAIN));
1731 read_size += pkt->size;
1732 st = ic->streams[pkt->stream_index];
1733 if (pkt->pts != AV_NOPTS_VALUE) {
1734 if (st->start_time == AV_NOPTS_VALUE)
1735 st->start_time = pkt->pts;
1737 av_free_packet(pkt);
1740 /* estimate the end time (duration) */
1741 /* XXX: may need to support wrapping */
1742 filesize = ic->file_size;
1743 offset = filesize - DURATION_MAX_READ_SIZE;
1747 url_fseek(ic->pb, offset, SEEK_SET);
1750 if (read_size >= DURATION_MAX_READ_SIZE)
1754 ret = av_read_packet(ic, pkt);
1755 }while(ret == AVERROR(EAGAIN));
1758 read_size += pkt->size;
1759 st = ic->streams[pkt->stream_index];
1760 if (pkt->pts != AV_NOPTS_VALUE &&
1761 st->start_time != AV_NOPTS_VALUE) {
1762 end_time = pkt->pts;
1763 duration = end_time - st->start_time;
1765 if (st->duration == AV_NOPTS_VALUE ||
1766 st->duration < duration)
1767 st->duration = duration;
1770 av_free_packet(pkt);
1773 fill_all_stream_timings(ic);
1775 url_fseek(ic->pb, old_offset, SEEK_SET);
1776 for(i=0; i<ic->nb_streams; i++){
1778 st->cur_dts= st->first_dts;
1779 st->last_IP_pts = AV_NOPTS_VALUE;
1783 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1787 /* get the file size, if possible */
1788 if (ic->iformat->flags & AVFMT_NOFILE) {
1791 file_size = url_fsize(ic->pb);
1795 ic->file_size = file_size;
1797 if ((!strcmp(ic->iformat->name, "mpeg") ||
1798 !strcmp(ic->iformat->name, "mpegts")) &&
1799 file_size && !url_is_streamed(ic->pb)) {
1800 /* get accurate estimate from the PTSes */
1801 av_estimate_timings_from_pts(ic, old_offset);
1802 } else if (av_has_duration(ic)) {
1803 /* at least one component has timings - we use them for all
1805 fill_all_stream_timings(ic);
1807 /* less precise: use bitrate info */
1808 av_estimate_timings_from_bit_rate(ic);
1810 av_update_stream_timings(ic);
1816 for(i = 0;i < ic->nb_streams; i++) {
1817 st = ic->streams[i];
1818 printf("%d: start_time: %0.3f duration: %0.3f\n",
1819 i, (double)st->start_time / AV_TIME_BASE,
1820 (double)st->duration / AV_TIME_BASE);
1822 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1823 (double)ic->start_time / AV_TIME_BASE,
1824 (double)ic->duration / AV_TIME_BASE,
1825 ic->bit_rate / 1000);
1830 static int has_codec_parameters(AVCodecContext *enc)
1833 switch(enc->codec_type) {
1834 case CODEC_TYPE_AUDIO:
1835 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1836 if(!enc->frame_size &&
1837 (enc->codec_id == CODEC_ID_VORBIS ||
1838 enc->codec_id == CODEC_ID_AAC))
1841 case CODEC_TYPE_VIDEO:
1842 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1848 return enc->codec_id != CODEC_ID_NONE && val != 0;
1851 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1855 int got_picture, data_size, ret=0;
1858 if(!st->codec->codec){
1859 codec = avcodec_find_decoder(st->codec->codec_id);
1862 ret = avcodec_open(st->codec, codec);
1867 if(!has_codec_parameters(st->codec)){
1868 switch(st->codec->codec_type) {
1869 case CODEC_TYPE_VIDEO:
1870 ret = avcodec_decode_video(st->codec, &picture,
1871 &got_picture, data, size);
1873 case CODEC_TYPE_AUDIO:
1874 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1875 samples = av_malloc(data_size);
1878 ret = avcodec_decode_audio2(st->codec, samples,
1879 &data_size, data, size);
1890 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1892 while (tags->id != CODEC_ID_NONE) {
1900 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1903 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1904 if(tag == tags[i].tag)
1907 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1908 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1909 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1910 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1911 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1914 return CODEC_ID_NONE;
1917 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1920 for(i=0; tags && tags[i]; i++){
1921 int tag= codec_get_tag(tags[i], id);
1927 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1930 for(i=0; tags && tags[i]; i++){
1931 enum CodecID id= codec_get_id(tags[i], tag);
1932 if(id!=CODEC_ID_NONE) return id;
1934 return CODEC_ID_NONE;
1937 static void compute_chapters_end(AVFormatContext *s)
1941 for (i=0; i+1<s->nb_chapters; i++)
1942 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1943 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1944 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1945 s->chapters[i]->end = s->chapters[i+1]->start;
1948 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1949 assert(s->start_time != AV_NOPTS_VALUE);
1950 assert(s->duration > 0);
1951 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1953 s->chapters[i]->time_base);
1957 /* absolute maximum size we read until we abort */
1958 #define MAX_READ_SIZE 5000000
1960 #define MAX_STD_TIMEBASES (60*12+5)
1961 static int get_std_framerate(int i){
1962 if(i<60*12) return i*1001;
1963 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1967 * Is the time base unreliable.
1968 * This is a heuristic to balance between quick acceptance of the values in
1969 * the headers vs. some extra checks.
1970 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1971 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1972 * And there are "variable" fps files this needs to detect as well.
1974 static int tb_unreliable(AVCodecContext *c){
1975 if( c->time_base.den >= 101L*c->time_base.num
1976 || c->time_base.den < 5L*c->time_base.num
1977 /* || c->codec_tag == AV_RL32("DIVX")
1978 || c->codec_tag == AV_RL32("XVID")*/
1979 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1984 int av_find_stream_info(AVFormatContext *ic)
1986 int i, count, ret, read_size, j;
1988 AVPacket pkt1, *pkt;
1989 int64_t last_dts[MAX_STREAMS];
1990 int duration_count[MAX_STREAMS]={0};
1991 double (*duration_error)[MAX_STD_TIMEBASES];
1992 int64_t old_offset = url_ftell(ic->pb);
1993 int64_t codec_info_duration[MAX_STREAMS]={0};
1994 int codec_info_nb_frames[MAX_STREAMS]={0};
1996 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1997 if (!duration_error) return AVERROR(ENOMEM);
1999 for(i=0;i<ic->nb_streams;i++) {
2000 st = ic->streams[i];
2001 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2002 /* if(!st->time_base.num)
2004 if(!st->codec->time_base.num)
2005 st->codec->time_base= st->time_base;
2007 //only for the split stuff
2009 st->parser = av_parser_init(st->codec->codec_id);
2010 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2011 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2016 for(i=0;i<MAX_STREAMS;i++){
2017 last_dts[i]= AV_NOPTS_VALUE;
2023 /* check if one codec still needs to be handled */
2024 for(i=0;i<ic->nb_streams;i++) {
2025 st = ic->streams[i];
2026 if (!has_codec_parameters(st->codec))
2028 /* variable fps and no guess at the real fps */
2029 if( tb_unreliable(st->codec)
2030 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2032 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2034 if(st->first_dts == AV_NOPTS_VALUE)
2037 if (i == ic->nb_streams) {
2038 /* NOTE: if the format has no header, then we need to read
2039 some packets to get most of the streams, so we cannot
2041 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2042 /* if we found the info for all the codecs, we can stop */
2047 /* we did not get all the codec info, but we read too much data */
2048 if (read_size >= MAX_READ_SIZE) {
2053 /* NOTE: a new stream can be added there if no header in file
2054 (AVFMTCTX_NOHEADER) */
2055 ret = av_read_frame_internal(ic, &pkt1);
2056 if(ret == AVERROR(EAGAIN))
2060 ret = -1; /* we could not have all the codec parameters before EOF */
2061 for(i=0;i<ic->nb_streams;i++) {
2062 st = ic->streams[i];
2063 if (!has_codec_parameters(st->codec)){
2065 avcodec_string(buf, sizeof(buf), st->codec, 0);
2066 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2074 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2075 if(av_dup_packet(pkt) < 0) {
2076 av_free(duration_error);
2077 return AVERROR(ENOMEM);
2080 read_size += pkt->size;
2082 st = ic->streams[pkt->stream_index];
2083 if(codec_info_nb_frames[st->index]>1)
2084 codec_info_duration[st->index] += pkt->duration;
2085 if (pkt->duration != 0)
2086 codec_info_nb_frames[st->index]++;
2089 int index= pkt->stream_index;
2090 int64_t last= last_dts[index];
2091 int64_t duration= pkt->dts - last;
2093 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2094 double dur= duration * av_q2d(st->time_base);
2096 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2097 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2098 if(duration_count[index] < 2)
2099 memset(duration_error[index], 0, sizeof(*duration_error));
2100 for(i=1; i<MAX_STD_TIMEBASES; i++){
2101 int framerate= get_std_framerate(i);
2102 int ticks= lrintf(dur*framerate/(1001*12));
2103 double error= dur - ticks*1001*12/(double)framerate;
2104 duration_error[index][i] += error*error;
2106 duration_count[index]++;
2108 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2109 last_dts[pkt->stream_index]= pkt->dts;
2111 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2112 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2114 st->codec->extradata_size= i;
2115 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2116 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2117 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2121 /* if still no information, we try to open the codec and to
2122 decompress the frame. We try to avoid that in most cases as
2123 it takes longer and uses more memory. For MPEG-4, we need to
2124 decompress for QuickTime. */
2125 if (!has_codec_parameters(st->codec) /*&&
2126 (st->codec->codec_id == CODEC_ID_FLV1 ||
2127 st->codec->codec_id == CODEC_ID_H264 ||
2128 st->codec->codec_id == CODEC_ID_H263 ||
2129 st->codec->codec_id == CODEC_ID_H261 ||
2130 st->codec->codec_id == CODEC_ID_VORBIS ||
2131 st->codec->codec_id == CODEC_ID_MJPEG ||
2132 st->codec->codec_id == CODEC_ID_PNG ||
2133 st->codec->codec_id == CODEC_ID_PAM ||
2134 st->codec->codec_id == CODEC_ID_PGM ||
2135 st->codec->codec_id == CODEC_ID_PGMYUV ||
2136 st->codec->codec_id == CODEC_ID_PBM ||
2137 st->codec->codec_id == CODEC_ID_PPM ||
2138 st->codec->codec_id == CODEC_ID_SHORTEN ||
2139 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2140 try_decode_frame(st, pkt->data, pkt->size);
2142 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2148 // close codecs which were opened in try_decode_frame()
2149 for(i=0;i<ic->nb_streams;i++) {
2150 st = ic->streams[i];
2151 if(st->codec->codec)
2152 avcodec_close(st->codec);
2154 for(i=0;i<ic->nb_streams;i++) {
2155 st = ic->streams[i];
2156 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2157 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2158 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2160 if(duration_count[i]
2161 && tb_unreliable(st->codec) /*&&
2162 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2163 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2164 double best_error= 2*av_q2d(st->time_base);
2165 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2167 for(j=1; j<MAX_STD_TIMEBASES; j++){
2168 double error= duration_error[i][j] * get_std_framerate(j);
2169 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2170 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2171 if(error < best_error){
2173 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2178 if (!st->r_frame_rate.num){
2179 if( st->codec->time_base.den * (int64_t)st->time_base.num
2180 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2181 st->r_frame_rate.num = st->codec->time_base.den;
2182 st->r_frame_rate.den = st->codec->time_base.num;
2184 st->r_frame_rate.num = st->time_base.den;
2185 st->r_frame_rate.den = st->time_base.num;
2188 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2189 if(!st->codec->bits_per_coded_sample)
2190 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2194 av_estimate_timings(ic, old_offset);
2196 compute_chapters_end(ic);
2199 /* correct DTS for B-frame streams with no timestamps */
2200 for(i=0;i<ic->nb_streams;i++) {
2201 st = ic->streams[i];
2202 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2204 ppktl = &ic->packet_buffer;
2206 if(ppkt1->stream_index != i)
2208 if(ppkt1->pkt->dts < 0)
2210 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2212 ppkt1->pkt->dts -= delta;
2217 st->cur_dts -= delta;
2223 av_free(duration_error);
2228 /*******************************************************/
2230 int av_read_play(AVFormatContext *s)
2232 if (s->iformat->read_play)
2233 return s->iformat->read_play(s);
2235 return av_url_read_fpause(s->pb, 0);
2236 return AVERROR(ENOSYS);
2239 int av_read_pause(AVFormatContext *s)
2241 if (s->iformat->read_pause)
2242 return s->iformat->read_pause(s);
2244 return av_url_read_fpause(s->pb, 1);
2245 return AVERROR(ENOSYS);
2248 void av_close_input_stream(AVFormatContext *s)
2253 if (s->iformat->read_close)
2254 s->iformat->read_close(s);
2255 for(i=0;i<s->nb_streams;i++) {
2256 /* free all data in a stream component */
2259 av_parser_close(st->parser);
2260 av_free_packet(&st->cur_pkt);
2262 av_metadata_free(&st->metadata);
2263 av_free(st->index_entries);
2264 av_free(st->codec->extradata);
2266 av_free(st->filename);
2267 av_free(st->priv_data);
2270 for(i=s->nb_programs-1; i>=0; i--) {
2271 av_freep(&s->programs[i]->provider_name);
2272 av_freep(&s->programs[i]->name);
2273 av_metadata_free(&s->programs[i]->metadata);
2274 av_freep(&s->programs[i]->stream_index);
2275 av_freep(&s->programs[i]);
2277 av_freep(&s->programs);
2278 flush_packet_queue(s);
2279 av_freep(&s->priv_data);
2280 while(s->nb_chapters--) {
2281 av_free(s->chapters[s->nb_chapters]->title);
2282 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2283 av_free(s->chapters[s->nb_chapters]);
2285 av_freep(&s->chapters);
2286 av_metadata_free(&s->metadata);
2290 void av_close_input_file(AVFormatContext *s)
2292 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2293 av_close_input_stream(s);
2298 AVStream *av_new_stream(AVFormatContext *s, int id)
2303 if (s->nb_streams >= MAX_STREAMS)
2306 st = av_mallocz(sizeof(AVStream));
2310 st->codec= avcodec_alloc_context();
2312 /* no default bitrate if decoding */
2313 st->codec->bit_rate = 0;
2315 st->index = s->nb_streams;
2317 st->start_time = AV_NOPTS_VALUE;
2318 st->duration = AV_NOPTS_VALUE;
2319 /* we set the current DTS to 0 so that formats without any timestamps
2320 but durations get some timestamps, formats with some unknown
2321 timestamps have their first few packets buffered and the
2322 timestamps corrected before they are returned to the user */
2324 st->first_dts = AV_NOPTS_VALUE;
2326 /* default pts setting is MPEG-like */
2327 av_set_pts_info(st, 33, 1, 90000);
2328 st->last_IP_pts = AV_NOPTS_VALUE;
2329 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2330 st->pts_buffer[i]= AV_NOPTS_VALUE;
2332 st->sample_aspect_ratio = (AVRational){0,1};
2334 s->streams[s->nb_streams++] = st;
2338 AVProgram *av_new_program(AVFormatContext *ac, int id)
2340 AVProgram *program=NULL;
2344 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2347 for(i=0; i<ac->nb_programs; i++)
2348 if(ac->programs[i]->id == id)
2349 program = ac->programs[i];
2352 program = av_mallocz(sizeof(AVProgram));
2355 dynarray_add(&ac->programs, &ac->nb_programs, program);
2356 program->discard = AVDISCARD_NONE;
2363 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2365 AVChapter *chapter = NULL;
2368 for(i=0; i<s->nb_chapters; i++)
2369 if(s->chapters[i]->id == id)
2370 chapter = s->chapters[i];
2373 chapter= av_mallocz(sizeof(AVChapter));
2376 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2378 av_free(chapter->title);
2379 chapter->title = av_strdup(title);
2381 chapter->time_base= time_base;
2382 chapter->start = start;
2388 /************************************************************/
2389 /* output media file */
2391 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2395 if (s->oformat->priv_data_size > 0) {
2396 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2398 return AVERROR(ENOMEM);
2400 s->priv_data = NULL;
2402 if (s->oformat->set_parameters) {
2403 ret = s->oformat->set_parameters(s, ap);
2410 int av_write_header(AVFormatContext *s)
2415 // some sanity checks
2416 for(i=0;i<s->nb_streams;i++) {
2419 switch (st->codec->codec_type) {
2420 case CODEC_TYPE_AUDIO:
2421 if(st->codec->sample_rate<=0){
2422 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2425 if(!st->codec->block_align)
2426 st->codec->block_align = st->codec->channels *
2427 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2429 case CODEC_TYPE_VIDEO:
2430 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2431 av_log(s, AV_LOG_ERROR, "time base not set\n");
2434 if(st->codec->width<=0 || st->codec->height<=0){
2435 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2438 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2439 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2445 if(s->oformat->codec_tag){
2446 if(st->codec->codec_tag){
2448 //check that tag + id is in the table
2449 //if neither is in the table -> OK
2450 //if tag is in the table with another id -> FAIL
2451 //if id is in the table with another tag -> FAIL unless strict < ?
2453 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2456 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2457 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2458 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2461 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2462 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2464 return AVERROR(ENOMEM);
2467 #if LIBAVFORMAT_VERSION_MAJOR < 53
2468 ff_metadata_mux_compat(s);
2471 if(s->oformat->write_header){
2472 ret = s->oformat->write_header(s);
2477 /* init PTS generation */
2478 for(i=0;i<s->nb_streams;i++) {
2479 int64_t den = AV_NOPTS_VALUE;
2482 switch (st->codec->codec_type) {
2483 case CODEC_TYPE_AUDIO:
2484 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2486 case CODEC_TYPE_VIDEO:
2487 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2492 if (den != AV_NOPTS_VALUE) {
2494 return AVERROR_INVALIDDATA;
2495 av_frac_init(&st->pts, 0, 0, den);
2501 //FIXME merge with compute_pkt_fields
2502 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2503 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2504 int num, den, frame_size, i;
2506 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2508 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2511 /* duration field */
2512 if (pkt->duration == 0) {
2513 compute_frame_duration(&num, &den, st, NULL, pkt);
2515 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2519 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2522 //XXX/FIXME this is a temporary hack until all encoders output pts
2523 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2525 // pkt->pts= st->cur_dts;
2526 pkt->pts= st->pts.val;
2529 //calculate dts from pts
2530 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2531 st->pts_buffer[0]= pkt->pts;
2532 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2533 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2534 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2535 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2537 pkt->dts= st->pts_buffer[0];
2540 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2541 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2544 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2545 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2549 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2550 st->cur_dts= pkt->dts;
2551 st->pts.val= pkt->dts;
2554 switch (st->codec->codec_type) {
2555 case CODEC_TYPE_AUDIO:
2556 frame_size = get_audio_frame_size(st->codec, pkt->size);
2558 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2559 likely equal to the encoder delay, but it would be better if we
2560 had the real timestamps from the encoder */
2561 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2562 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2565 case CODEC_TYPE_VIDEO:
2566 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2574 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2576 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2578 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2581 ret= s->oformat->write_packet(s, pkt);
2583 ret= url_ferror(s->pb);
2587 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2588 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2590 AVPacketList **next_point, *this_pktl;
2592 this_pktl = av_mallocz(sizeof(AVPacketList));
2593 this_pktl->pkt= *pkt;
2594 if(pkt->destruct == av_destruct_packet)
2595 pkt->destruct= NULL; // not shared -> must keep original from being freed
2597 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2599 next_point = &s->packet_buffer;
2601 if(compare(s, &(*next_point)->pkt, pkt))
2603 next_point= &(*next_point)->next;
2605 this_pktl->next= *next_point;
2606 *next_point= this_pktl;
2609 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2611 AVStream *st = s->streams[ pkt ->stream_index];
2612 AVStream *st2= s->streams[ next->stream_index];
2613 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2614 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2616 if (pkt->dts == AV_NOPTS_VALUE)
2619 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2622 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2625 int streams[MAX_STREAMS];
2628 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2631 memset(streams, 0, sizeof(streams));
2632 pktl= s->packet_buffer;
2634 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2635 if(streams[ pktl->pkt.stream_index ] == 0)
2637 streams[ pktl->pkt.stream_index ]++;
2641 if(stream_count && (s->nb_streams == stream_count || flush)){
2642 pktl= s->packet_buffer;
2645 s->packet_buffer= pktl->next;
2649 av_init_packet(out);
2655 * Interleaves an AVPacket correctly so it can be muxed.
2656 * @param out the interleaved packet will be output here
2657 * @param in the input packet
2658 * @param flush 1 if no further packets are available as input and all
2659 * remaining packets should be output
2660 * @return 1 if a packet was output, 0 if no packet could be output,
2661 * < 0 if an error occurred
2663 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2664 if(s->oformat->interleave_packet)
2665 return s->oformat->interleave_packet(s, out, in, flush);
2667 return av_interleave_packet_per_dts(s, out, in, flush);
2670 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2671 AVStream *st= s->streams[ pkt->stream_index];
2673 //FIXME/XXX/HACK drop zero sized packets
2674 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2677 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2678 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2681 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2686 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2687 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2690 ret= s->oformat->write_packet(s, &opkt);
2692 av_free_packet(&opkt);
2697 if(url_ferror(s->pb))
2698 return url_ferror(s->pb);
2702 int av_write_trailer(AVFormatContext *s)
2708 ret= av_interleave_packet(s, &pkt, NULL, 1);
2709 if(ret<0) //FIXME cleanup needed for ret<0 ?
2714 ret= s->oformat->write_packet(s, &pkt);
2716 av_free_packet(&pkt);
2720 if(url_ferror(s->pb))
2724 if(s->oformat->write_trailer)
2725 ret = s->oformat->write_trailer(s);
2728 ret=url_ferror(s->pb);
2729 for(i=0;i<s->nb_streams;i++)
2730 av_freep(&s->streams[i]->priv_data);
2731 av_freep(&s->priv_data);
2735 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2738 AVProgram *program=NULL;
2741 for(i=0; i<ac->nb_programs; i++){
2742 if(ac->programs[i]->id != progid)
2744 program = ac->programs[i];
2745 for(j=0; j<program->nb_stream_indexes; j++)
2746 if(program->stream_index[j] == idx)
2749 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2752 program->stream_index = tmp;
2753 program->stream_index[program->nb_stream_indexes++] = idx;
2758 /* "user interface" functions */
2759 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2762 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2763 AVStream *st = ic->streams[i];
2764 int g = av_gcd(st->time_base.num, st->time_base.den);
2765 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2766 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2767 /* the pid is an important information, so we display it */
2768 /* XXX: add a generic system */
2769 if (flags & AVFMT_SHOW_IDS)
2770 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2771 if (strlen(st->language) > 0)
2772 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2773 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2774 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2775 if (st->sample_aspect_ratio.num && // default
2776 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2777 AVRational display_aspect_ratio;
2778 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2779 st->codec->width*st->sample_aspect_ratio.num,
2780 st->codec->height*st->sample_aspect_ratio.den,
2782 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2783 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2784 display_aspect_ratio.num, display_aspect_ratio.den);
2786 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2787 if(st->r_frame_rate.den && st->r_frame_rate.num)
2788 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2789 /* else if(st->time_base.den && st->time_base.num)
2790 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2792 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2794 av_log(NULL, AV_LOG_INFO, "\n");
2797 void dump_format(AVFormatContext *ic,
2804 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2805 is_output ? "Output" : "Input",
2807 is_output ? ic->oformat->name : ic->iformat->name,
2808 is_output ? "to" : "from", url);
2810 av_log(NULL, AV_LOG_INFO, " Duration: ");
2811 if (ic->duration != AV_NOPTS_VALUE) {
2812 int hours, mins, secs, us;
2813 secs = ic->duration / AV_TIME_BASE;
2814 us = ic->duration % AV_TIME_BASE;
2819 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2820 (100 * us) / AV_TIME_BASE);
2822 av_log(NULL, AV_LOG_INFO, "N/A");
2824 if (ic->start_time != AV_NOPTS_VALUE) {
2826 av_log(NULL, AV_LOG_INFO, ", start: ");
2827 secs = ic->start_time / AV_TIME_BASE;
2828 us = ic->start_time % AV_TIME_BASE;
2829 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2830 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2832 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2834 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2836 av_log(NULL, AV_LOG_INFO, "N/A");
2838 av_log(NULL, AV_LOG_INFO, "\n");
2840 if(ic->nb_programs) {
2842 for(j=0; j<ic->nb_programs; j++) {
2843 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2844 ic->programs[j]->name ? ic->programs[j]->name : "");
2845 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2846 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2849 for(i=0;i<ic->nb_streams;i++)
2850 dump_stream_format(ic, i, index, is_output);
2853 #if LIBAVFORMAT_VERSION_MAJOR < 53
2854 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2856 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2859 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2861 AVRational frame_rate;
2862 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2863 *frame_rate_num= frame_rate.num;
2864 *frame_rate_den= frame_rate.den;
2869 int64_t av_gettime(void)
2872 gettimeofday(&tv,NULL);
2873 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2876 int64_t parse_date(const char *datestr, int duration)
2882 static const char * const date_fmt[] = {
2886 static const char * const time_fmt[] = {
2896 time_t now = time(0);
2898 len = strlen(datestr);
2900 lastch = datestr[len - 1];
2903 is_utc = (lastch == 'z' || lastch == 'Z');
2905 memset(&dt, 0, sizeof(dt));
2910 if (!strncasecmp(datestr, "now", len))
2911 return (int64_t) now * 1000000;
2913 /* parse the year-month-day part */
2914 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2915 q = small_strptime(p, date_fmt[i], &dt);
2921 /* if the year-month-day part is missing, then take the
2922 * current year-month-day time */
2927 dt = *localtime(&now);
2929 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2934 if (*p == 'T' || *p == 't' || *p == ' ')
2937 /* parse the hour-minute-second part */
2938 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2939 q = small_strptime(p, time_fmt[i], &dt);
2945 /* parse datestr as a duration */
2950 /* parse datestr as HH:MM:SS */
2951 q = small_strptime(p, time_fmt[0], &dt);
2953 /* parse datestr as S+ */
2954 dt.tm_sec = strtol(p, (char **)&q, 10);
2956 /* the parsing didn't succeed */
2963 /* Now we have all the fields that we can get */
2969 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2971 dt.tm_isdst = -1; /* unknown */
2981 /* parse the .m... part */
2985 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2988 val += n * (*q - '0');
2992 return negative ? -t : t;
2995 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3005 while (*p != '\0' && *p != '=' && *p != '&') {
3006 if ((q - tag) < sizeof(tag) - 1)
3014 while (*p != '&' && *p != '\0') {
3015 if ((q - arg) < arg_size - 1) {
3025 if (!strcmp(tag, tag1))
3034 int av_get_frame_filename(char *buf, int buf_size,
3035 const char *path, int number)
3038 char *q, buf1[20], c;
3039 int nd, len, percentd_found;
3051 while (isdigit(*p)) {
3052 nd = nd * 10 + *p++ - '0';
3055 } while (isdigit(c));
3064 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3066 if ((q - buf + len) > buf_size - 1)
3068 memcpy(q, buf1, len);
3076 if ((q - buf) < buf_size - 1)
3080 if (!percentd_found)
3089 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3092 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3094 for(i=0;i<size;i+=16) {
3101 PRINT(" %02x", buf[i+j]);
3106 for(j=0;j<len;j++) {
3108 if (c < ' ' || c > '~')
3117 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3119 hex_dump_internal(NULL, f, 0, buf, size);
3122 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3124 hex_dump_internal(avcl, NULL, level, buf, size);
3127 //FIXME needs to know the time_base
3128 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3130 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3131 PRINT("stream #%d:\n", pkt->stream_index);
3132 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3133 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3134 /* DTS is _always_ valid after av_read_frame() */
3136 if (pkt->dts == AV_NOPTS_VALUE)
3139 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3140 /* PTS may not be known if B-frames are present. */
3142 if (pkt->pts == AV_NOPTS_VALUE)
3145 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3147 PRINT(" size=%d\n", pkt->size);
3150 av_hex_dump(f, pkt->data, pkt->size);
3153 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3155 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3158 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3160 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3163 void url_split(char *proto, int proto_size,
3164 char *authorization, int authorization_size,
3165 char *hostname, int hostname_size,
3167 char *path, int path_size,
3170 const char *p, *ls, *at, *col, *brk;
3172 if (port_ptr) *port_ptr = -1;
3173 if (proto_size > 0) proto[0] = 0;
3174 if (authorization_size > 0) authorization[0] = 0;
3175 if (hostname_size > 0) hostname[0] = 0;
3176 if (path_size > 0) path[0] = 0;
3178 /* parse protocol */
3179 if ((p = strchr(url, ':'))) {
3180 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3185 /* no protocol means plain filename */
3186 av_strlcpy(path, url, path_size);
3190 /* separate path from hostname */
3191 ls = strchr(p, '/');
3193 ls = strchr(p, '?');
3195 av_strlcpy(path, ls, path_size);
3197 ls = &p[strlen(p)]; // XXX
3199 /* the rest is hostname, use that to parse auth/port */
3201 /* authorization (user[:pass]@hostname) */
3202 if ((at = strchr(p, '@')) && at < ls) {
3203 av_strlcpy(authorization, p,
3204 FFMIN(authorization_size, at + 1 - p));
3205 p = at + 1; /* skip '@' */
3208 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3210 av_strlcpy(hostname, p + 1,
3211 FFMIN(hostname_size, brk - p));
3212 if (brk[1] == ':' && port_ptr)
3213 *port_ptr = atoi(brk + 2);
3214 } else if ((col = strchr(p, ':')) && col < ls) {
3215 av_strlcpy(hostname, p,
3216 FFMIN(col + 1 - p, hostname_size));
3217 if (port_ptr) *port_ptr = atoi(col + 1);
3219 av_strlcpy(hostname, p,
3220 FFMIN(ls + 1 - p, hostname_size));
3224 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3227 static const char hex_table[16] = { '0', '1', '2', '3',
3230 'C', 'D', 'E', 'F' };
3232 for(i = 0; i < s; i++) {
3233 buff[i * 2] = hex_table[src[i] >> 4];
3234 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3240 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3241 int pts_num, int pts_den)
3243 unsigned int gcd= av_gcd(pts_num, pts_den);
3244 s->pts_wrap_bits = pts_wrap_bits;
3245 s->time_base.num = pts_num/gcd;
3246 s->time_base.den = pts_den/gcd;
3249 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);