2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavcodec/opt.h"
23 #include "libavutil/avstring.h"
32 * @file libavformat/utils.c
33 * various utility functions for use within FFmpeg
36 unsigned avformat_version(void)
38 return LIBAVFORMAT_VERSION_INT;
41 /* fraction handling */
44 * f = val + (num / den) + 0.5.
46 * 'num' is normalized so that it is such as 0 <= num < den.
48 * @param f fractional number
49 * @param val integer value
50 * @param num must be >= 0
51 * @param den must be >= 1
53 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
66 * Fractional addition to f: f = f + (incr / f->den).
68 * @param f fractional number
69 * @param incr increment, can be positive or negative
71 static void av_frac_add(AVFrac *f, int64_t incr)
84 } else if (num >= den) {
91 /** head of registered input format linked list */
92 AVInputFormat *first_iformat = NULL;
93 /** head of registered output format linked list */
94 AVOutputFormat *first_oformat = NULL;
96 AVInputFormat *av_iformat_next(AVInputFormat *f)
99 else return first_iformat;
102 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
104 if(f) return f->next;
105 else return first_oformat;
108 void av_register_input_format(AVInputFormat *format)
112 while (*p != NULL) p = &(*p)->next;
117 void av_register_output_format(AVOutputFormat *format)
121 while (*p != NULL) p = &(*p)->next;
126 int match_ext(const char *filename, const char *extensions)
134 ext = strrchr(filename, '.');
140 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
143 if (!strcasecmp(ext1, ext))
153 AVOutputFormat *guess_format(const char *short_name, const char *filename,
154 const char *mime_type)
156 AVOutputFormat *fmt, *fmt_found;
157 int score_max, score;
159 /* specific test for image sequences */
160 #ifdef CONFIG_IMAGE2_MUXER
161 if (!short_name && filename &&
162 av_filename_number_test(filename) &&
163 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
164 return guess_format("image2", NULL, NULL);
167 /* Find the proper file type. */
171 while (fmt != NULL) {
173 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
175 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
177 if (filename && fmt->extensions &&
178 match_ext(filename, fmt->extensions)) {
181 if (score > score_max) {
190 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
191 const char *mime_type)
193 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
196 AVOutputFormat *stream_fmt;
197 char stream_format_name[64];
199 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
200 stream_fmt = guess_format(stream_format_name, NULL, NULL);
209 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
210 const char *filename, const char *mime_type, enum CodecType type){
211 if(type == CODEC_TYPE_VIDEO){
212 enum CodecID codec_id= CODEC_ID_NONE;
214 #ifdef CONFIG_IMAGE2_MUXER
215 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
216 codec_id= av_guess_image2_codec(filename);
219 if(codec_id == CODEC_ID_NONE)
220 codec_id= fmt->video_codec;
222 }else if(type == CODEC_TYPE_AUDIO)
223 return fmt->audio_codec;
225 return CODEC_ID_NONE;
228 AVInputFormat *av_find_input_format(const char *short_name)
231 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
232 if (!strcmp(fmt->name, short_name))
238 /* memory handling */
240 void av_destruct_packet(AVPacket *pkt)
243 pkt->data = NULL; pkt->size = 0;
246 void av_init_packet(AVPacket *pkt)
248 pkt->pts = AV_NOPTS_VALUE;
249 pkt->dts = AV_NOPTS_VALUE;
253 pkt->stream_index = 0;
254 pkt->destruct= av_destruct_packet_nofree;
257 int av_new_packet(AVPacket *pkt, int size)
260 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
261 return AVERROR(ENOMEM);
262 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
264 return AVERROR(ENOMEM);
265 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
270 pkt->destruct = av_destruct_packet;
274 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
276 int ret= av_new_packet(pkt, size);
281 pkt->pos= url_ftell(s);
283 ret= get_buffer(s, pkt->data, size);
292 int av_dup_packet(AVPacket *pkt)
294 if (pkt->destruct != av_destruct_packet) {
296 /* We duplicate the packet and don't forget to add the padding again. */
297 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
298 return AVERROR(ENOMEM);
299 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
301 return AVERROR(ENOMEM);
303 memcpy(data, pkt->data, pkt->size);
304 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
306 pkt->destruct = av_destruct_packet;
311 int av_filename_number_test(const char *filename)
314 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
317 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
319 AVInputFormat *fmt1, *fmt;
323 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
324 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
327 if (fmt1->read_probe) {
328 score = fmt1->read_probe(pd);
329 } else if (fmt1->extensions) {
330 if (match_ext(pd->filename, fmt1->extensions)) {
334 if (score > *score_max) {
337 }else if (score == *score_max)
343 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
345 return av_probe_input_format2(pd, is_opened, &score);
348 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
351 fmt = av_probe_input_format2(pd, 1, &score);
354 if (!strcmp(fmt->name, "mp3"))
355 st->codec->codec_id = CODEC_ID_MP3;
356 else if (!strcmp(fmt->name, "ac3"))
357 st->codec->codec_id = CODEC_ID_AC3;
358 else if (!strcmp(fmt->name, "mpegvideo"))
359 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
360 else if (!strcmp(fmt->name, "h264"))
361 st->codec->codec_id = CODEC_ID_H264;
366 /************************************************************/
367 /* input media file */
370 * Open a media file from an IO stream. 'fmt' must be specified.
372 static const char* format_to_name(void* ptr)
374 AVFormatContext* fc = (AVFormatContext*) ptr;
375 if(fc->iformat) return fc->iformat->name;
376 else if(fc->oformat) return fc->oformat->name;
380 #define OFFSET(x) offsetof(AVFormatContext,x)
381 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
382 //these names are too long to be readable
383 #define E AV_OPT_FLAG_ENCODING_PARAM
384 #define D AV_OPT_FLAG_DECODING_PARAM
386 static const AVOption options[]={
387 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
388 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
389 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
390 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
391 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
392 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
393 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
394 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
395 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
396 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
397 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
398 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
399 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
400 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
408 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
410 static void avformat_get_context_defaults(AVFormatContext *s)
412 memset(s, 0, sizeof(AVFormatContext));
414 s->av_class = &av_format_context_class;
416 av_opt_set_defaults(s);
419 AVFormatContext *av_alloc_format_context(void)
422 ic = av_malloc(sizeof(AVFormatContext));
424 avformat_get_context_defaults(ic);
425 ic->av_class = &av_format_context_class;
429 int av_open_input_stream(AVFormatContext **ic_ptr,
430 ByteIOContext *pb, const char *filename,
431 AVInputFormat *fmt, AVFormatParameters *ap)
435 AVFormatParameters default_ap;
439 memset(ap, 0, sizeof(default_ap));
442 if(!ap->prealloced_context)
443 ic = av_alloc_format_context();
447 err = AVERROR(ENOMEM);
452 ic->duration = AV_NOPTS_VALUE;
453 ic->start_time = AV_NOPTS_VALUE;
454 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
456 /* allocate private data */
457 if (fmt->priv_data_size > 0) {
458 ic->priv_data = av_mallocz(fmt->priv_data_size);
459 if (!ic->priv_data) {
460 err = AVERROR(ENOMEM);
464 ic->priv_data = NULL;
467 if (ic->iformat->read_header) {
468 err = ic->iformat->read_header(ic, ap);
473 if (pb && !ic->data_offset)
474 ic->data_offset = url_ftell(ic->pb);
481 av_freep(&ic->priv_data);
482 for(i=0;i<ic->nb_streams;i++) {
483 AVStream *st = ic->streams[i];
485 av_free(st->priv_data);
486 av_free(st->codec->extradata);
496 /** size of probe buffer, for guessing file type from file contents */
497 #define PROBE_BUF_MIN 2048
498 #define PROBE_BUF_MAX (1<<20)
500 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
503 AVFormatParameters *ap)
506 AVProbeData probe_data, *pd = &probe_data;
507 ByteIOContext *pb = NULL;
511 pd->filename = filename;
516 /* guess format if no file can be opened */
517 fmt = av_probe_input_format(pd, 0);
520 /* Do not open file if the format does not need it. XXX: specific
521 hack needed to handle RTSP/TCP */
522 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
523 /* if no file needed do not try to open one */
524 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
528 url_setbufsize(pb, buf_size);
531 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
532 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
533 /* read probe data */
534 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
535 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
536 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
537 if (url_fseek(pb, 0, SEEK_SET) < 0) {
539 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
545 /* guess file format */
546 fmt = av_probe_input_format2(pd, 1, &score);
551 /* if still no format found, error */
557 /* check filename in case an image number is expected */
558 if (fmt->flags & AVFMT_NEEDNUMBER) {
559 if (!av_filename_number_test(filename)) {
560 err = AVERROR_NUMEXPECTED;
564 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
577 /*******************************************************/
579 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
580 AVPacketList **plast_pktl){
581 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
586 (*plast_pktl)->next = pktl;
588 *packet_buffer = pktl;
590 /* add the packet in the buffered packet list */
596 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
602 AVPacketList *pktl = s->raw_packet_buffer;
606 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
607 s->raw_packet_buffer = pktl->next;
614 ret= s->iformat->read_packet(s, pkt);
617 st= s->streams[pkt->stream_index];
619 switch(st->codec->codec_type){
620 case CODEC_TYPE_VIDEO:
621 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
623 case CODEC_TYPE_AUDIO:
624 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
626 case CODEC_TYPE_SUBTITLE:
627 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
631 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
634 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
636 if(st->codec->codec_id == CODEC_ID_PROBE){
637 AVProbeData *pd = &st->probe_data;
639 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
640 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
641 pd->buf_size += pkt->size;
642 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
644 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
645 set_codec_from_probe_data(st, pd, 1);
646 if(st->codec->codec_id != CODEC_ID_PROBE){
655 /**********************************************************/
658 * Get the number of samples of an audio frame. Return -1 on error.
660 static int get_audio_frame_size(AVCodecContext *enc, int size)
664 if(enc->codec_id == CODEC_ID_VORBIS)
667 if (enc->frame_size <= 1) {
668 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
670 if (bits_per_sample) {
671 if (enc->channels == 0)
673 frame_size = (size << 3) / (bits_per_sample * enc->channels);
675 /* used for example by ADPCM codecs */
676 if (enc->bit_rate == 0)
678 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
681 frame_size = enc->frame_size;
688 * Return the frame duration in seconds. Return 0 if not available.
690 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
691 AVCodecParserContext *pc, AVPacket *pkt)
697 switch(st->codec->codec_type) {
698 case CODEC_TYPE_VIDEO:
699 if(st->time_base.num*1000LL > st->time_base.den){
700 *pnum = st->time_base.num;
701 *pden = st->time_base.den;
702 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
703 *pnum = st->codec->time_base.num;
704 *pden = st->codec->time_base.den;
705 if (pc && pc->repeat_pict) {
707 *pnum = (*pnum) * (2 + pc->repeat_pict);
711 case CODEC_TYPE_AUDIO:
712 frame_size = get_audio_frame_size(st->codec, pkt->size);
716 *pden = st->codec->sample_rate;
723 static int is_intra_only(AVCodecContext *enc){
724 if(enc->codec_type == CODEC_TYPE_AUDIO){
726 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
727 switch(enc->codec_id){
729 case CODEC_ID_MJPEGB:
731 case CODEC_ID_RAWVIDEO:
732 case CODEC_ID_DVVIDEO:
733 case CODEC_ID_HUFFYUV:
734 case CODEC_ID_FFVHUFF:
745 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
746 int64_t dts, int64_t pts)
748 AVStream *st= s->streams[stream_index];
749 AVPacketList *pktl= s->packet_buffer;
751 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
754 st->first_dts= dts - st->cur_dts;
757 for(; pktl; pktl= pktl->next){
758 if(pktl->pkt.stream_index != stream_index)
760 //FIXME think more about this check
761 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
762 pktl->pkt.pts += st->first_dts;
764 if(pktl->pkt.dts != AV_NOPTS_VALUE)
765 pktl->pkt.dts += st->first_dts;
767 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
768 st->start_time= pktl->pkt.pts;
770 if (st->start_time == AV_NOPTS_VALUE)
771 st->start_time = pts;
774 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
776 AVPacketList *pktl= s->packet_buffer;
779 if(st->first_dts != AV_NOPTS_VALUE){
780 cur_dts= st->first_dts;
781 for(; pktl; pktl= pktl->next){
782 if(pktl->pkt.stream_index == pkt->stream_index){
783 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
785 cur_dts -= pkt->duration;
788 pktl= s->packet_buffer;
789 st->first_dts = cur_dts;
790 }else if(st->cur_dts)
793 for(; pktl; pktl= pktl->next){
794 if(pktl->pkt.stream_index != pkt->stream_index)
796 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
797 && !pktl->pkt.duration){
798 pktl->pkt.dts= cur_dts;
799 if(!st->codec->has_b_frames)
800 pktl->pkt.pts= cur_dts;
801 cur_dts += pkt->duration;
802 pktl->pkt.duration= pkt->duration;
806 if(st->first_dts == AV_NOPTS_VALUE)
807 st->cur_dts= cur_dts;
810 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
811 AVCodecParserContext *pc, AVPacket *pkt)
813 int num, den, presentation_delayed, delay, i;
816 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
817 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
818 pkt->dts -= 1LL<<st->pts_wrap_bits;
821 if (pkt->duration == 0) {
822 compute_frame_duration(&num, &den, st, pc, pkt);
824 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
826 if(pkt->duration != 0 && s->packet_buffer)
827 update_initial_durations(s, st, pkt);
831 /* correct timestamps with byte offset if demuxers only have timestamps
832 on packet boundaries */
833 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
834 /* this will estimate bitrate based on this frame's duration and size */
835 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
836 if(pkt->pts != AV_NOPTS_VALUE)
838 if(pkt->dts != AV_NOPTS_VALUE)
842 /* do we have a video B-frame ? */
843 delay= st->codec->has_b_frames;
844 presentation_delayed = 0;
845 /* XXX: need has_b_frame, but cannot get it if the codec is
848 pc && pc->pict_type != FF_B_TYPE)
849 presentation_delayed = 1;
850 /* This may be redundant, but it should not hurt. */
851 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
852 presentation_delayed = 1;
854 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
855 /* interpolate PTS and DTS if they are not present */
856 if(delay==0 || (delay==1 && pc)){
857 if (presentation_delayed) {
858 /* DTS = decompression timestamp */
859 /* PTS = presentation timestamp */
860 if (pkt->dts == AV_NOPTS_VALUE)
861 pkt->dts = st->last_IP_pts;
862 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
863 if (pkt->dts == AV_NOPTS_VALUE)
864 pkt->dts = st->cur_dts;
866 /* this is tricky: the dts must be incremented by the duration
867 of the frame we are displaying, i.e. the last I- or P-frame */
868 if (st->last_IP_duration == 0)
869 st->last_IP_duration = pkt->duration;
870 if(pkt->dts != AV_NOPTS_VALUE)
871 st->cur_dts = pkt->dts + st->last_IP_duration;
872 st->last_IP_duration = pkt->duration;
873 st->last_IP_pts= pkt->pts;
874 /* cannot compute PTS if not present (we can compute it only
875 by knowing the future */
876 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
877 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
878 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
879 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
880 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
881 pkt->pts += pkt->duration;
882 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
886 /* presentation is not delayed : PTS and DTS are the same */
887 if(pkt->pts == AV_NOPTS_VALUE)
889 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
890 if(pkt->pts == AV_NOPTS_VALUE)
891 pkt->pts = st->cur_dts;
893 if(pkt->pts != AV_NOPTS_VALUE)
894 st->cur_dts = pkt->pts + pkt->duration;
898 if(pkt->pts != AV_NOPTS_VALUE){
899 st->pts_buffer[0]= pkt->pts;
900 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
901 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
902 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
903 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
904 if(pkt->dts == AV_NOPTS_VALUE)
905 pkt->dts= st->pts_buffer[0];
907 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
909 if(pkt->dts > st->cur_dts)
910 st->cur_dts = pkt->dts;
913 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
916 if(is_intra_only(st->codec))
917 pkt->flags |= PKT_FLAG_KEY;
920 /* keyframe computation */
921 if (pc->pict_type == FF_I_TYPE)
922 pkt->flags |= PKT_FLAG_KEY;
926 void av_destruct_packet_nofree(AVPacket *pkt)
928 pkt->data = NULL; pkt->size = 0;
931 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
939 /* select current input stream component */
942 if (!st->need_parsing || !st->parser) {
943 /* no parsing needed: we just output the packet as is */
944 /* raw data support */
946 compute_pkt_fields(s, st, NULL, pkt);
949 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
950 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
951 s->cur_ptr, s->cur_len,
952 s->cur_pkt.pts, s->cur_pkt.dts);
953 s->cur_pkt.pts = AV_NOPTS_VALUE;
954 s->cur_pkt.dts = AV_NOPTS_VALUE;
955 /* increment read pointer */
959 /* return packet if any */
962 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
964 pkt->stream_index = st->index;
965 pkt->pts = st->parser->pts;
966 pkt->dts = st->parser->dts;
967 pkt->destruct = av_destruct_packet_nofree;
968 compute_pkt_fields(s, st, st->parser, pkt);
970 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
971 ff_reduce_index(s, st->index);
972 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
973 0, 0, AVINDEX_KEYFRAME);
980 av_free_packet(&s->cur_pkt);
984 /* read next packet */
985 ret = av_read_packet(s, &s->cur_pkt);
987 if (ret == AVERROR(EAGAIN))
989 /* return the last frames, if any */
990 for(i = 0; i < s->nb_streams; i++) {
992 if (st->parser && st->need_parsing) {
993 av_parser_parse(st->parser, st->codec,
994 &pkt->data, &pkt->size,
996 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1001 /* no more packets: really terminate parsing */
1005 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
1006 s->cur_pkt.dts != AV_NOPTS_VALUE &&
1007 s->cur_pkt.pts < s->cur_pkt.dts){
1008 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1009 s->cur_pkt.stream_index,
1013 // av_free_packet(&s->cur_pkt);
1017 st = s->streams[s->cur_pkt.stream_index];
1018 if(s->debug & FF_FDEBUG_TS)
1019 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1020 s->cur_pkt.stream_index,
1027 s->cur_ptr = s->cur_pkt.data;
1028 s->cur_len = s->cur_pkt.size;
1029 if (st->need_parsing && !st->parser) {
1030 st->parser = av_parser_init(st->codec->codec_id);
1032 /* no parser available: just output the raw packets */
1033 st->need_parsing = AVSTREAM_PARSE_NONE;
1034 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1035 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1037 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1038 st->parser->next_frame_offset=
1039 st->parser->cur_offset= s->cur_pkt.pos;
1044 if(s->debug & FF_FDEBUG_TS)
1045 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1055 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1059 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1062 pktl = s->packet_buffer;
1064 AVPacket *next_pkt= &pktl->pkt;
1066 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1067 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1068 if( pktl->pkt.stream_index == next_pkt->stream_index
1069 && next_pkt->dts < pktl->pkt.dts
1070 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1071 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1072 next_pkt->pts= pktl->pkt.dts;
1076 pktl = s->packet_buffer;
1079 if( next_pkt->pts != AV_NOPTS_VALUE
1080 || next_pkt->dts == AV_NOPTS_VALUE
1082 /* read packet from packet buffer, if there is data */
1084 s->packet_buffer = pktl->next;
1090 int ret= av_read_frame_internal(s, pkt);
1092 if(pktl && ret != AVERROR(EAGAIN)){
1099 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1100 &s->packet_buffer_end)) < 0)
1101 return AVERROR(ENOMEM);
1103 assert(!s->packet_buffer);
1104 return av_read_frame_internal(s, pkt);
1109 /* XXX: suppress the packet queue */
1110 static void flush_packet_queue(AVFormatContext *s)
1115 pktl = s->packet_buffer;
1118 s->packet_buffer = pktl->next;
1119 av_free_packet(&pktl->pkt);
1124 /*******************************************************/
1127 int av_find_default_stream_index(AVFormatContext *s)
1129 int first_audio_index = -1;
1133 if (s->nb_streams <= 0)
1135 for(i = 0; i < s->nb_streams; i++) {
1137 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1140 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1141 first_audio_index = i;
1143 return first_audio_index >= 0 ? first_audio_index : 0;
1147 * Flush the frame reader.
1149 static void av_read_frame_flush(AVFormatContext *s)
1154 flush_packet_queue(s);
1156 /* free previous packet */
1158 if (s->cur_st->parser)
1159 av_free_packet(&s->cur_pkt);
1166 /* for each stream, reset read state */
1167 for(i = 0; i < s->nb_streams; i++) {
1171 av_parser_close(st->parser);
1174 st->last_IP_pts = AV_NOPTS_VALUE;
1175 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1179 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1182 for(i = 0; i < s->nb_streams; i++) {
1183 AVStream *st = s->streams[i];
1185 st->cur_dts = av_rescale(timestamp,
1186 st->time_base.den * (int64_t)ref_st->time_base.num,
1187 st->time_base.num * (int64_t)ref_st->time_base.den);
1191 void ff_reduce_index(AVFormatContext *s, int stream_index)
1193 AVStream *st= s->streams[stream_index];
1194 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1196 if((unsigned)st->nb_index_entries >= max_entries){
1198 for(i=0; 2*i<st->nb_index_entries; i++)
1199 st->index_entries[i]= st->index_entries[2*i];
1200 st->nb_index_entries= i;
1204 int av_add_index_entry(AVStream *st,
1205 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1207 AVIndexEntry *entries, *ie;
1210 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1213 entries = av_fast_realloc(st->index_entries,
1214 &st->index_entries_allocated_size,
1215 (st->nb_index_entries + 1) *
1216 sizeof(AVIndexEntry));
1220 st->index_entries= entries;
1222 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1225 index= st->nb_index_entries++;
1226 ie= &entries[index];
1227 assert(index==0 || ie[-1].timestamp < timestamp);
1229 ie= &entries[index];
1230 if(ie->timestamp != timestamp){
1231 if(ie->timestamp <= timestamp)
1233 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1234 st->nb_index_entries++;
1235 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1236 distance= ie->min_distance;
1240 ie->timestamp = timestamp;
1241 ie->min_distance= distance;
1248 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1251 AVIndexEntry *entries= st->index_entries;
1252 int nb_entries= st->nb_index_entries;
1261 timestamp = entries[m].timestamp;
1262 if(timestamp >= wanted_timestamp)
1264 if(timestamp <= wanted_timestamp)
1267 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1269 if(!(flags & AVSEEK_FLAG_ANY)){
1270 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1271 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1282 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1283 AVInputFormat *avif= s->iformat;
1284 int64_t pos_min, pos_max, pos, pos_limit;
1285 int64_t ts_min, ts_max, ts;
1289 if (stream_index < 0)
1293 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1297 ts_min= AV_NOPTS_VALUE;
1298 pos_limit= -1; //gcc falsely says it may be uninitialized
1300 st= s->streams[stream_index];
1301 if(st->index_entries){
1304 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1305 index= FFMAX(index, 0);
1306 e= &st->index_entries[index];
1308 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1310 ts_min= e->timestamp;
1312 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1319 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1320 assert(index < st->nb_index_entries);
1322 e= &st->index_entries[index];
1323 assert(e->timestamp >= target_ts);
1325 ts_max= e->timestamp;
1326 pos_limit= pos_max - e->min_distance;
1328 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1329 pos_max,pos_limit, ts_max);
1334 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1339 url_fseek(s->pb, pos, SEEK_SET);
1341 av_update_cur_dts(s, st, ts);
1346 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1348 int64_t start_pos, filesize;
1352 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1355 if(ts_min == AV_NOPTS_VALUE){
1356 pos_min = s->data_offset;
1357 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1358 if (ts_min == AV_NOPTS_VALUE)
1362 if(ts_max == AV_NOPTS_VALUE){
1364 filesize = url_fsize(s->pb);
1365 pos_max = filesize - 1;
1368 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1370 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1371 if (ts_max == AV_NOPTS_VALUE)
1375 int64_t tmp_pos= pos_max + 1;
1376 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1377 if(tmp_ts == AV_NOPTS_VALUE)
1381 if(tmp_pos >= filesize)
1387 if(ts_min > ts_max){
1389 }else if(ts_min == ts_max){
1394 while (pos_min < pos_limit) {
1396 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1400 assert(pos_limit <= pos_max);
1403 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1404 // interpolate position (better than dichotomy)
1405 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1406 + pos_min - approximate_keyframe_distance;
1407 }else if(no_change==1){
1408 // bisection, if interpolation failed to change min or max pos last time
1409 pos = (pos_min + pos_limit)>>1;
1411 /* linear search if bisection failed, can only happen if there
1412 are very few or no keyframes between min/max */
1417 else if(pos > pos_limit)
1421 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1427 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1429 if(ts == AV_NOPTS_VALUE){
1430 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1433 assert(ts != AV_NOPTS_VALUE);
1434 if (target_ts <= ts) {
1435 pos_limit = start_pos - 1;
1439 if (target_ts >= ts) {
1445 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1446 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1449 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1451 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1452 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1453 pos, ts_min, target_ts, ts_max);
1459 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1460 int64_t pos_min, pos_max;
1464 if (stream_index < 0)
1467 st= s->streams[stream_index];
1470 pos_min = s->data_offset;
1471 pos_max = url_fsize(s->pb) - 1;
1473 if (pos < pos_min) pos= pos_min;
1474 else if(pos > pos_max) pos= pos_max;
1476 url_fseek(s->pb, pos, SEEK_SET);
1479 av_update_cur_dts(s, st, ts);
1484 static int av_seek_frame_generic(AVFormatContext *s,
1485 int stream_index, int64_t timestamp, int flags)
1491 st = s->streams[stream_index];
1493 index = av_index_search_timestamp(st, timestamp, flags);
1495 if(index < 0 || index==st->nb_index_entries-1){
1499 if(st->nb_index_entries){
1500 assert(st->index_entries);
1501 ie= &st->index_entries[st->nb_index_entries-1];
1502 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1504 av_update_cur_dts(s, st, ie->timestamp);
1506 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1510 int ret = av_read_frame(s, &pkt);
1513 av_free_packet(&pkt);
1514 if(stream_index == pkt.stream_index){
1515 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1519 index = av_index_search_timestamp(st, timestamp, flags);
1524 av_read_frame_flush(s);
1525 if (s->iformat->read_seek){
1526 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1529 ie = &st->index_entries[index];
1530 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1532 av_update_cur_dts(s, st, ie->timestamp);
1537 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1542 av_read_frame_flush(s);
1544 if(flags & AVSEEK_FLAG_BYTE)
1545 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1547 if(stream_index < 0){
1548 stream_index= av_find_default_stream_index(s);
1549 if(stream_index < 0)
1552 st= s->streams[stream_index];
1553 /* timestamp for default must be expressed in AV_TIME_BASE units */
1554 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1556 st= s->streams[stream_index];
1558 /* first, we try the format specific seek */
1559 if (s->iformat->read_seek)
1560 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1567 if(s->iformat->read_timestamp)
1568 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1570 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1573 /*******************************************************/
1576 * Returns TRUE if the stream has accurate duration in any stream.
1578 * @return TRUE if the stream has accurate duration for at least one component.
1580 static int av_has_duration(AVFormatContext *ic)
1585 for(i = 0;i < ic->nb_streams; i++) {
1586 st = ic->streams[i];
1587 if (st->duration != AV_NOPTS_VALUE)
1594 * Estimate the stream timings from the one of each components.
1596 * Also computes the global bitrate if possible.
1598 static void av_update_stream_timings(AVFormatContext *ic)
1600 int64_t start_time, start_time1, end_time, end_time1;
1601 int64_t duration, duration1;
1605 start_time = INT64_MAX;
1606 end_time = INT64_MIN;
1607 duration = INT64_MIN;
1608 for(i = 0;i < ic->nb_streams; i++) {
1609 st = ic->streams[i];
1610 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1611 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1612 if (start_time1 < start_time)
1613 start_time = start_time1;
1614 if (st->duration != AV_NOPTS_VALUE) {
1615 end_time1 = start_time1
1616 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1617 if (end_time1 > end_time)
1618 end_time = end_time1;
1621 if (st->duration != AV_NOPTS_VALUE) {
1622 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1623 if (duration1 > duration)
1624 duration = duration1;
1627 if (start_time != INT64_MAX) {
1628 ic->start_time = start_time;
1629 if (end_time != INT64_MIN) {
1630 if (end_time - start_time > duration)
1631 duration = end_time - start_time;
1634 if (duration != INT64_MIN) {
1635 ic->duration = duration;
1636 if (ic->file_size > 0) {
1637 /* compute the bitrate */
1638 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1639 (double)ic->duration;
1644 static void fill_all_stream_timings(AVFormatContext *ic)
1649 av_update_stream_timings(ic);
1650 for(i = 0;i < ic->nb_streams; i++) {
1651 st = ic->streams[i];
1652 if (st->start_time == AV_NOPTS_VALUE) {
1653 if(ic->start_time != AV_NOPTS_VALUE)
1654 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1655 if(ic->duration != AV_NOPTS_VALUE)
1656 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1661 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1663 int64_t filesize, duration;
1667 /* if bit_rate is already set, we believe it */
1668 if (ic->bit_rate == 0) {
1670 for(i=0;i<ic->nb_streams;i++) {
1671 st = ic->streams[i];
1672 bit_rate += st->codec->bit_rate;
1674 ic->bit_rate = bit_rate;
1677 /* if duration is already set, we believe it */
1678 if (ic->duration == AV_NOPTS_VALUE &&
1679 ic->bit_rate != 0 &&
1680 ic->file_size != 0) {
1681 filesize = ic->file_size;
1683 for(i = 0; i < ic->nb_streams; i++) {
1684 st = ic->streams[i];
1685 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1686 if (st->duration == AV_NOPTS_VALUE)
1687 st->duration = duration;
1693 #define DURATION_MAX_READ_SIZE 250000
1695 /* only usable for MPEG-PS streams */
1696 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1698 AVPacket pkt1, *pkt = &pkt1;
1700 int read_size, i, ret;
1702 int64_t filesize, offset, duration;
1704 /* free previous packet */
1705 if (ic->cur_st && ic->cur_st->parser)
1706 av_free_packet(&ic->cur_pkt);
1709 /* flush packet queue */
1710 flush_packet_queue(ic);
1712 for(i=0;i<ic->nb_streams;i++) {
1713 st = ic->streams[i];
1715 av_parser_close(st->parser);
1720 /* we read the first packets to get the first PTS (not fully
1721 accurate, but it is enough now) */
1722 url_fseek(ic->pb, 0, SEEK_SET);
1725 if (read_size >= DURATION_MAX_READ_SIZE)
1727 /* if all info is available, we can stop */
1728 for(i = 0;i < ic->nb_streams; i++) {
1729 st = ic->streams[i];
1730 if (st->start_time == AV_NOPTS_VALUE)
1733 if (i == ic->nb_streams)
1736 ret = av_read_packet(ic, pkt);
1739 read_size += pkt->size;
1740 st = ic->streams[pkt->stream_index];
1741 if (pkt->pts != AV_NOPTS_VALUE) {
1742 if (st->start_time == AV_NOPTS_VALUE)
1743 st->start_time = pkt->pts;
1745 av_free_packet(pkt);
1748 /* estimate the end time (duration) */
1749 /* XXX: may need to support wrapping */
1750 filesize = ic->file_size;
1751 offset = filesize - DURATION_MAX_READ_SIZE;
1755 url_fseek(ic->pb, offset, SEEK_SET);
1758 if (read_size >= DURATION_MAX_READ_SIZE)
1761 ret = av_read_packet(ic, pkt);
1764 read_size += pkt->size;
1765 st = ic->streams[pkt->stream_index];
1766 if (pkt->pts != AV_NOPTS_VALUE &&
1767 st->start_time != AV_NOPTS_VALUE) {
1768 end_time = pkt->pts;
1769 duration = end_time - st->start_time;
1771 if (st->duration == AV_NOPTS_VALUE ||
1772 st->duration < duration)
1773 st->duration = duration;
1776 av_free_packet(pkt);
1779 fill_all_stream_timings(ic);
1781 url_fseek(ic->pb, old_offset, SEEK_SET);
1782 for(i=0; i<ic->nb_streams; i++){
1784 st->cur_dts= st->first_dts;
1785 st->last_IP_pts = AV_NOPTS_VALUE;
1789 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1793 /* get the file size, if possible */
1794 if (ic->iformat->flags & AVFMT_NOFILE) {
1797 file_size = url_fsize(ic->pb);
1801 ic->file_size = file_size;
1803 if ((!strcmp(ic->iformat->name, "mpeg") ||
1804 !strcmp(ic->iformat->name, "mpegts")) &&
1805 file_size && !url_is_streamed(ic->pb)) {
1806 /* get accurate estimate from the PTSes */
1807 av_estimate_timings_from_pts(ic, old_offset);
1808 } else if (av_has_duration(ic)) {
1809 /* at least one component has timings - we use them for all
1811 fill_all_stream_timings(ic);
1813 /* less precise: use bitrate info */
1814 av_estimate_timings_from_bit_rate(ic);
1816 av_update_stream_timings(ic);
1822 for(i = 0;i < ic->nb_streams; i++) {
1823 st = ic->streams[i];
1824 printf("%d: start_time: %0.3f duration: %0.3f\n",
1825 i, (double)st->start_time / AV_TIME_BASE,
1826 (double)st->duration / AV_TIME_BASE);
1828 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1829 (double)ic->start_time / AV_TIME_BASE,
1830 (double)ic->duration / AV_TIME_BASE,
1831 ic->bit_rate / 1000);
1836 static int has_codec_parameters(AVCodecContext *enc)
1839 switch(enc->codec_type) {
1840 case CODEC_TYPE_AUDIO:
1841 val = enc->sample_rate && enc->channels;
1842 if(!enc->frame_size &&
1843 (enc->codec_id == CODEC_ID_VORBIS ||
1844 enc->codec_id == CODEC_ID_AAC))
1847 case CODEC_TYPE_VIDEO:
1848 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1854 return enc->codec_id != CODEC_ID_NONE && val != 0;
1857 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1861 int got_picture, data_size, ret=0;
1864 if(!st->codec->codec){
1865 codec = avcodec_find_decoder(st->codec->codec_id);
1868 ret = avcodec_open(st->codec, codec);
1873 if(!has_codec_parameters(st->codec)){
1874 switch(st->codec->codec_type) {
1875 case CODEC_TYPE_VIDEO:
1876 ret = avcodec_decode_video(st->codec, &picture,
1877 &got_picture, data, size);
1879 case CODEC_TYPE_AUDIO:
1880 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1881 samples = av_malloc(data_size);
1884 ret = avcodec_decode_audio2(st->codec, samples,
1885 &data_size, data, size);
1896 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1898 while (tags->id != CODEC_ID_NONE) {
1906 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1909 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1910 if(tag == tags[i].tag)
1913 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1914 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1915 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1916 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1917 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1920 return CODEC_ID_NONE;
1923 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1926 for(i=0; tags && tags[i]; i++){
1927 int tag= codec_get_tag(tags[i], id);
1933 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1936 for(i=0; tags && tags[i]; i++){
1937 enum CodecID id= codec_get_id(tags[i], tag);
1938 if(id!=CODEC_ID_NONE) return id;
1940 return CODEC_ID_NONE;
1943 static void compute_chapters_end(AVFormatContext *s)
1947 for (i=0; i+1<s->nb_chapters; i++)
1948 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1949 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1950 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1951 s->chapters[i]->end = s->chapters[i+1]->start;
1954 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1955 assert(s->start_time != AV_NOPTS_VALUE);
1956 assert(s->duration > 0);
1957 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1959 s->chapters[i]->time_base);
1963 /* absolute maximum size we read until we abort */
1964 #define MAX_READ_SIZE 5000000
1966 #define MAX_STD_TIMEBASES (60*12+5)
1967 static int get_std_framerate(int i){
1968 if(i<60*12) return i*1001;
1969 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1973 * Is the time base unreliable.
1974 * This is a heuristic to balance between quick acceptance of the values in
1975 * the headers vs. some extra checks.
1976 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1977 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1978 * And there are "variable" fps files this needs to detect as well.
1980 static int tb_unreliable(AVCodecContext *c){
1981 if( c->time_base.den >= 101L*c->time_base.num
1982 || c->time_base.den < 5L*c->time_base.num
1983 /* || c->codec_tag == ff_get_fourcc("DIVX")
1984 || c->codec_tag == ff_get_fourcc("XVID")*/
1985 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1990 int av_find_stream_info(AVFormatContext *ic)
1992 int i, count, ret, read_size, j;
1994 AVPacket pkt1, *pkt;
1995 int64_t last_dts[MAX_STREAMS];
1996 int duration_count[MAX_STREAMS]={0};
1997 double (*duration_error)[MAX_STD_TIMEBASES];
1998 offset_t old_offset = url_ftell(ic->pb);
1999 int64_t codec_info_duration[MAX_STREAMS]={0};
2000 int codec_info_nb_frames[MAX_STREAMS]={0};
2002 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2003 if (!duration_error) return AVERROR(ENOMEM);
2005 for(i=0;i<ic->nb_streams;i++) {
2006 st = ic->streams[i];
2007 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2008 /* if(!st->time_base.num)
2010 if(!st->codec->time_base.num)
2011 st->codec->time_base= st->time_base;
2013 //only for the split stuff
2015 st->parser = av_parser_init(st->codec->codec_id);
2016 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2017 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2022 for(i=0;i<MAX_STREAMS;i++){
2023 last_dts[i]= AV_NOPTS_VALUE;
2029 /* check if one codec still needs to be handled */
2030 for(i=0;i<ic->nb_streams;i++) {
2031 st = ic->streams[i];
2032 if (!has_codec_parameters(st->codec))
2034 /* variable fps and no guess at the real fps */
2035 if( tb_unreliable(st->codec)
2036 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2038 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2040 if(st->first_dts == AV_NOPTS_VALUE)
2043 if (i == ic->nb_streams) {
2044 /* NOTE: if the format has no header, then we need to read
2045 some packets to get most of the streams, so we cannot
2047 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2048 /* if we found the info for all the codecs, we can stop */
2053 /* we did not get all the codec info, but we read too much data */
2054 if (read_size >= MAX_READ_SIZE) {
2059 /* NOTE: a new stream can be added there if no header in file
2060 (AVFMTCTX_NOHEADER) */
2061 ret = av_read_frame_internal(ic, &pkt1);
2064 ret = -1; /* we could not have all the codec parameters before EOF */
2065 for(i=0;i<ic->nb_streams;i++) {
2066 st = ic->streams[i];
2067 if (!has_codec_parameters(st->codec)){
2069 avcodec_string(buf, sizeof(buf), st->codec, 0);
2070 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2078 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2079 if(av_dup_packet(pkt) < 0) {
2080 av_free(duration_error);
2081 return AVERROR(ENOMEM);
2084 read_size += pkt->size;
2086 st = ic->streams[pkt->stream_index];
2087 if(codec_info_nb_frames[st->index]>1)
2088 codec_info_duration[st->index] += pkt->duration;
2089 if (pkt->duration != 0)
2090 codec_info_nb_frames[st->index]++;
2093 int index= pkt->stream_index;
2094 int64_t last= last_dts[index];
2095 int64_t duration= pkt->dts - last;
2097 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2098 double dur= duration * av_q2d(st->time_base);
2100 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2101 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2102 if(duration_count[index] < 2)
2103 memset(duration_error[index], 0, sizeof(*duration_error));
2104 for(i=1; i<MAX_STD_TIMEBASES; i++){
2105 int framerate= get_std_framerate(i);
2106 int ticks= lrintf(dur*framerate/(1001*12));
2107 double error= dur - ticks*1001*12/(double)framerate;
2108 duration_error[index][i] += error*error;
2110 duration_count[index]++;
2112 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2113 last_dts[pkt->stream_index]= pkt->dts;
2115 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2116 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2118 st->codec->extradata_size= i;
2119 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2120 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2121 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2125 /* if still no information, we try to open the codec and to
2126 decompress the frame. We try to avoid that in most cases as
2127 it takes longer and uses more memory. For MPEG-4, we need to
2128 decompress for QuickTime. */
2129 if (!has_codec_parameters(st->codec) /*&&
2130 (st->codec->codec_id == CODEC_ID_FLV1 ||
2131 st->codec->codec_id == CODEC_ID_H264 ||
2132 st->codec->codec_id == CODEC_ID_H263 ||
2133 st->codec->codec_id == CODEC_ID_H261 ||
2134 st->codec->codec_id == CODEC_ID_VORBIS ||
2135 st->codec->codec_id == CODEC_ID_MJPEG ||
2136 st->codec->codec_id == CODEC_ID_PNG ||
2137 st->codec->codec_id == CODEC_ID_PAM ||
2138 st->codec->codec_id == CODEC_ID_PGM ||
2139 st->codec->codec_id == CODEC_ID_PGMYUV ||
2140 st->codec->codec_id == CODEC_ID_PBM ||
2141 st->codec->codec_id == CODEC_ID_PPM ||
2142 st->codec->codec_id == CODEC_ID_SHORTEN ||
2143 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2144 try_decode_frame(st, pkt->data, pkt->size);
2146 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2152 // close codecs which were opened in try_decode_frame()
2153 for(i=0;i<ic->nb_streams;i++) {
2154 st = ic->streams[i];
2155 if(st->codec->codec)
2156 avcodec_close(st->codec);
2158 for(i=0;i<ic->nb_streams;i++) {
2159 st = ic->streams[i];
2160 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2161 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2162 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2164 if(duration_count[i]
2165 && tb_unreliable(st->codec) /*&&
2166 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2167 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2168 double best_error= 2*av_q2d(st->time_base);
2169 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2171 for(j=1; j<MAX_STD_TIMEBASES; j++){
2172 double error= duration_error[i][j] * get_std_framerate(j);
2173 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2174 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2175 if(error < best_error){
2177 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2182 if (!st->r_frame_rate.num){
2183 if( st->codec->time_base.den * (int64_t)st->time_base.num
2184 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2185 st->r_frame_rate.num = st->codec->time_base.den;
2186 st->r_frame_rate.den = st->codec->time_base.num;
2188 st->r_frame_rate.num = st->time_base.den;
2189 st->r_frame_rate.den = st->time_base.num;
2192 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2193 if(!st->codec->bits_per_sample)
2194 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2198 av_estimate_timings(ic, old_offset);
2200 compute_chapters_end(ic);
2203 /* correct DTS for B-frame streams with no timestamps */
2204 for(i=0;i<ic->nb_streams;i++) {
2205 st = ic->streams[i];
2206 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2208 ppktl = &ic->packet_buffer;
2210 if(ppkt1->stream_index != i)
2212 if(ppkt1->pkt->dts < 0)
2214 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2216 ppkt1->pkt->dts -= delta;
2221 st->cur_dts -= delta;
2227 av_free(duration_error);
2232 /*******************************************************/
2234 int av_read_play(AVFormatContext *s)
2236 if (s->iformat->read_play)
2237 return s->iformat->read_play(s);
2239 return av_url_read_fpause(s->pb, 0);
2240 return AVERROR(ENOSYS);
2243 int av_read_pause(AVFormatContext *s)
2245 if (s->iformat->read_pause)
2246 return s->iformat->read_pause(s);
2248 return av_url_read_fpause(s->pb, 1);
2249 return AVERROR(ENOSYS);
2252 void av_close_input_stream(AVFormatContext *s)
2257 /* free previous packet */
2258 if (s->cur_st && s->cur_st->parser)
2259 av_free_packet(&s->cur_pkt);
2261 if (s->iformat->read_close)
2262 s->iformat->read_close(s);
2263 for(i=0;i<s->nb_streams;i++) {
2264 /* free all data in a stream component */
2267 av_parser_close(st->parser);
2269 av_free(st->index_entries);
2270 av_free(st->codec->extradata);
2272 av_free(st->filename);
2273 av_free(st->priv_data);
2276 for(i=s->nb_programs-1; i>=0; i--) {
2277 av_freep(&s->programs[i]->provider_name);
2278 av_freep(&s->programs[i]->name);
2279 av_freep(&s->programs[i]->stream_index);
2280 av_freep(&s->programs[i]);
2282 av_freep(&s->programs);
2283 flush_packet_queue(s);
2284 av_freep(&s->priv_data);
2285 while(s->nb_chapters--) {
2286 av_free(s->chapters[s->nb_chapters]->title);
2287 av_free(s->chapters[s->nb_chapters]);
2289 av_freep(&s->chapters);
2293 void av_close_input_file(AVFormatContext *s)
2295 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2296 av_close_input_stream(s);
2301 AVStream *av_new_stream(AVFormatContext *s, int id)
2306 if (s->nb_streams >= MAX_STREAMS)
2309 st = av_mallocz(sizeof(AVStream));
2313 st->codec= avcodec_alloc_context();
2315 /* no default bitrate if decoding */
2316 st->codec->bit_rate = 0;
2318 st->index = s->nb_streams;
2320 st->start_time = AV_NOPTS_VALUE;
2321 st->duration = AV_NOPTS_VALUE;
2322 /* we set the current DTS to 0 so that formats without any timestamps
2323 but durations get some timestamps, formats with some unknown
2324 timestamps have their first few packets buffered and the
2325 timestamps corrected before they are returned to the user */
2327 st->first_dts = AV_NOPTS_VALUE;
2329 /* default pts setting is MPEG-like */
2330 av_set_pts_info(st, 33, 1, 90000);
2331 st->last_IP_pts = AV_NOPTS_VALUE;
2332 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2333 st->pts_buffer[i]= AV_NOPTS_VALUE;
2335 s->streams[s->nb_streams++] = st;
2339 AVProgram *av_new_program(AVFormatContext *ac, int id)
2341 AVProgram *program=NULL;
2345 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2348 for(i=0; i<ac->nb_programs; i++)
2349 if(ac->programs[i]->id == id)
2350 program = ac->programs[i];
2353 program = av_mallocz(sizeof(AVProgram));
2356 dynarray_add(&ac->programs, &ac->nb_programs, program);
2357 program->discard = AVDISCARD_NONE;
2364 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2366 assert(!provider_name == !name);
2368 av_free(program->provider_name);
2369 av_free(program-> name);
2370 program->provider_name = av_strdup(provider_name);
2371 program-> name = av_strdup( name);
2375 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2377 AVChapter *chapter = NULL;
2380 for(i=0; i<s->nb_chapters; i++)
2381 if(s->chapters[i]->id == id)
2382 chapter = s->chapters[i];
2385 chapter= av_mallocz(sizeof(AVChapter));
2388 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2390 av_free(chapter->title);
2391 chapter->title = av_strdup(title);
2393 chapter->time_base= time_base;
2394 chapter->start = start;
2400 /************************************************************/
2401 /* output media file */
2403 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2407 if (s->oformat->priv_data_size > 0) {
2408 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2410 return AVERROR(ENOMEM);
2412 s->priv_data = NULL;
2414 if (s->oformat->set_parameters) {
2415 ret = s->oformat->set_parameters(s, ap);
2422 int av_write_header(AVFormatContext *s)
2427 // some sanity checks
2428 for(i=0;i<s->nb_streams;i++) {
2431 switch (st->codec->codec_type) {
2432 case CODEC_TYPE_AUDIO:
2433 if(st->codec->sample_rate<=0){
2434 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2438 case CODEC_TYPE_VIDEO:
2439 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2440 av_log(s, AV_LOG_ERROR, "time base not set\n");
2443 if(st->codec->width<=0 || st->codec->height<=0){
2444 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2450 if(s->oformat->codec_tag){
2451 if(st->codec->codec_tag){
2453 //check that tag + id is in the table
2454 //if neither is in the table -> OK
2455 //if tag is in the table with another id -> FAIL
2456 //if id is in the table with another tag -> FAIL unless strict < ?
2458 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2462 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2463 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2465 return AVERROR(ENOMEM);
2468 if(s->oformat->write_header){
2469 ret = s->oformat->write_header(s);
2474 /* init PTS generation */
2475 for(i=0;i<s->nb_streams;i++) {
2476 int64_t den = AV_NOPTS_VALUE;
2479 switch (st->codec->codec_type) {
2480 case CODEC_TYPE_AUDIO:
2481 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2483 case CODEC_TYPE_VIDEO:
2484 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2489 if (den != AV_NOPTS_VALUE) {
2491 return AVERROR_INVALIDDATA;
2492 av_frac_init(&st->pts, 0, 0, den);
2498 //FIXME merge with compute_pkt_fields
2499 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2500 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2501 int num, den, frame_size, i;
2503 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2505 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2508 /* duration field */
2509 if (pkt->duration == 0) {
2510 compute_frame_duration(&num, &den, st, NULL, pkt);
2512 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2516 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2519 //XXX/FIXME this is a temporary hack until all encoders output pts
2520 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2522 // pkt->pts= st->cur_dts;
2523 pkt->pts= st->pts.val;
2526 //calculate dts from pts
2527 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2528 st->pts_buffer[0]= pkt->pts;
2529 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2530 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2531 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2532 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2534 pkt->dts= st->pts_buffer[0];
2537 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2538 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2541 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2542 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2546 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2547 st->cur_dts= pkt->dts;
2548 st->pts.val= pkt->dts;
2551 switch (st->codec->codec_type) {
2552 case CODEC_TYPE_AUDIO:
2553 frame_size = get_audio_frame_size(st->codec, pkt->size);
2555 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2556 likely equal to the encoder delay, but it would be better if we
2557 had the real timestamps from the encoder */
2558 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2559 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2562 case CODEC_TYPE_VIDEO:
2563 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2571 static void truncate_ts(AVStream *st, AVPacket *pkt){
2572 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2575 // pkt->dts= 0; //this happens for low_delay=0 and B-frames, FIXME, needs further investigation about what we should do here
2577 if (pkt->pts != AV_NOPTS_VALUE)
2578 pkt->pts &= pts_mask;
2579 if (pkt->dts != AV_NOPTS_VALUE)
2580 pkt->dts &= pts_mask;
2583 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2585 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2587 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2590 truncate_ts(s->streams[pkt->stream_index], pkt);
2592 ret= s->oformat->write_packet(s, pkt);
2594 ret= url_ferror(s->pb);
2598 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2599 AVPacketList *pktl, **next_point, *this_pktl;
2601 int streams[MAX_STREAMS];
2604 AVStream *st= s->streams[ pkt->stream_index];
2606 // assert(pkt->destruct != av_destruct_packet); //FIXME
2608 this_pktl = av_mallocz(sizeof(AVPacketList));
2609 this_pktl->pkt= *pkt;
2610 if(pkt->destruct == av_destruct_packet)
2611 pkt->destruct= NULL; // not shared -> must keep original from being freed
2613 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2615 next_point = &s->packet_buffer;
2617 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2618 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2619 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2620 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2622 next_point= &(*next_point)->next;
2624 this_pktl->next= *next_point;
2625 *next_point= this_pktl;
2628 memset(streams, 0, sizeof(streams));
2629 pktl= s->packet_buffer;
2631 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2632 if(streams[ pktl->pkt.stream_index ] == 0)
2634 streams[ pktl->pkt.stream_index ]++;
2638 if(stream_count && (s->nb_streams == stream_count || flush)){
2639 pktl= s->packet_buffer;
2642 s->packet_buffer= pktl->next;
2646 av_init_packet(out);
2652 * Interleaves an AVPacket correctly so it can be muxed.
2653 * @param out the interleaved packet will be output here
2654 * @param in the input packet
2655 * @param flush 1 if no further packets are available as input and all
2656 * remaining packets should be output
2657 * @return 1 if a packet was output, 0 if no packet could be output,
2658 * < 0 if an error occurred
2660 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2661 if(s->oformat->interleave_packet)
2662 return s->oformat->interleave_packet(s, out, in, flush);
2664 return av_interleave_packet_per_dts(s, out, in, flush);
2667 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2668 AVStream *st= s->streams[ pkt->stream_index];
2670 //FIXME/XXX/HACK drop zero sized packets
2671 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2674 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2675 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2678 if(pkt->dts == AV_NOPTS_VALUE)
2683 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2684 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2687 truncate_ts(s->streams[opkt.stream_index], &opkt);
2688 ret= s->oformat->write_packet(s, &opkt);
2690 av_free_packet(&opkt);
2695 if(url_ferror(s->pb))
2696 return url_ferror(s->pb);
2700 int av_write_trailer(AVFormatContext *s)
2706 ret= av_interleave_packet(s, &pkt, NULL, 1);
2707 if(ret<0) //FIXME cleanup needed for ret<0 ?
2712 truncate_ts(s->streams[pkt.stream_index], &pkt);
2713 ret= s->oformat->write_packet(s, &pkt);
2715 av_free_packet(&pkt);
2719 if(url_ferror(s->pb))
2723 if(s->oformat->write_trailer)
2724 ret = s->oformat->write_trailer(s);
2727 ret=url_ferror(s->pb);
2728 for(i=0;i<s->nb_streams;i++)
2729 av_freep(&s->streams[i]->priv_data);
2730 av_freep(&s->priv_data);
2734 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2737 AVProgram *program=NULL;
2740 for(i=0; i<ac->nb_programs; i++){
2741 if(ac->programs[i]->id != progid)
2743 program = ac->programs[i];
2744 for(j=0; j<program->nb_stream_indexes; j++)
2745 if(program->stream_index[j] == idx)
2748 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2751 program->stream_index = tmp;
2752 program->stream_index[program->nb_stream_indexes++] = idx;
2757 /* "user interface" functions */
2758 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2761 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2762 AVStream *st = ic->streams[i];
2763 int g = ff_gcd(st->time_base.num, st->time_base.den);
2764 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2765 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2766 /* the pid is an important information, so we display it */
2767 /* XXX: add a generic system */
2768 if (flags & AVFMT_SHOW_IDS)
2769 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2770 if (strlen(st->language) > 0)
2771 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2772 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2773 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2774 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2775 if(st->r_frame_rate.den && st->r_frame_rate.num)
2776 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2777 /* else if(st->time_base.den && st->time_base.num)
2778 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2780 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2782 av_log(NULL, AV_LOG_INFO, "\n");
2785 void dump_format(AVFormatContext *ic,
2792 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2793 is_output ? "Output" : "Input",
2795 is_output ? ic->oformat->name : ic->iformat->name,
2796 is_output ? "to" : "from", url);
2798 av_log(NULL, AV_LOG_INFO, " Duration: ");
2799 if (ic->duration != AV_NOPTS_VALUE) {
2800 int hours, mins, secs, us;
2801 secs = ic->duration / AV_TIME_BASE;
2802 us = ic->duration % AV_TIME_BASE;
2807 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2808 (100 * us) / AV_TIME_BASE);
2810 av_log(NULL, AV_LOG_INFO, "N/A");
2812 if (ic->start_time != AV_NOPTS_VALUE) {
2814 av_log(NULL, AV_LOG_INFO, ", start: ");
2815 secs = ic->start_time / AV_TIME_BASE;
2816 us = ic->start_time % AV_TIME_BASE;
2817 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2818 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2820 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2822 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2824 av_log(NULL, AV_LOG_INFO, "N/A");
2826 av_log(NULL, AV_LOG_INFO, "\n");
2828 if(ic->nb_programs) {
2830 for(j=0; j<ic->nb_programs; j++) {
2831 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2832 ic->programs[j]->name ? ic->programs[j]->name : "");
2833 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2834 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2837 for(i=0;i<ic->nb_streams;i++)
2838 dump_stream_format(ic, i, index, is_output);
2841 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2843 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2846 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2848 AVRational frame_rate;
2849 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2850 *frame_rate_num= frame_rate.num;
2851 *frame_rate_den= frame_rate.den;
2856 * Gets the current time in microseconds.
2858 int64_t av_gettime(void)
2861 gettimeofday(&tv,NULL);
2862 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2865 int64_t parse_date(const char *datestr, int duration)
2871 static const char *date_fmt[] = {
2875 static const char *time_fmt[] = {
2885 time_t now = time(0);
2887 len = strlen(datestr);
2889 lastch = datestr[len - 1];
2892 is_utc = (lastch == 'z' || lastch == 'Z');
2894 memset(&dt, 0, sizeof(dt));
2899 /* parse the year-month-day part */
2900 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2901 q = small_strptime(p, date_fmt[i], &dt);
2907 /* if the year-month-day part is missing, then take the
2908 * current year-month-day time */
2913 dt = *localtime(&now);
2915 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2920 if (*p == 'T' || *p == 't' || *p == ' ')
2923 /* parse the hour-minute-second part */
2924 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2925 q = small_strptime(p, time_fmt[i], &dt);
2931 /* parse datestr as a duration */
2936 /* parse datestr as HH:MM:SS */
2937 q = small_strptime(p, time_fmt[0], &dt);
2939 /* parse datestr as S+ */
2940 dt.tm_sec = strtol(p, (char **)&q, 10);
2942 /* the parsing didn't succeed */
2949 /* Now we have all the fields that we can get */
2955 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2957 dt.tm_isdst = -1; /* unknown */
2967 /* parse the .m... part */
2971 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2974 val += n * (*q - '0');
2978 return negative ? -t : t;
2981 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2991 while (*p != '\0' && *p != '=' && *p != '&') {
2992 if ((q - tag) < sizeof(tag) - 1)
3000 while (*p != '&' && *p != '\0') {
3001 if ((q - arg) < arg_size - 1) {
3011 if (!strcmp(tag, tag1))
3020 int av_get_frame_filename(char *buf, int buf_size,
3021 const char *path, int number)
3024 char *q, buf1[20], c;
3025 int nd, len, percentd_found;
3037 while (isdigit(*p)) {
3038 nd = nd * 10 + *p++ - '0';
3041 } while (isdigit(c));
3050 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3052 if ((q - buf + len) > buf_size - 1)
3054 memcpy(q, buf1, len);
3062 if ((q - buf) < buf_size - 1)
3066 if (!percentd_found)
3075 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3078 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3080 for(i=0;i<size;i+=16) {
3087 PRINT(" %02x", buf[i+j]);
3092 for(j=0;j<len;j++) {
3094 if (c < ' ' || c > '~')
3103 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3105 hex_dump_internal(NULL, f, 0, buf, size);
3108 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3110 hex_dump_internal(avcl, NULL, level, buf, size);
3113 //FIXME needs to know the time_base
3114 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3116 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3117 PRINT("stream #%d:\n", pkt->stream_index);
3118 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3119 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3120 /* DTS is _always_ valid after av_read_frame() */
3122 if (pkt->dts == AV_NOPTS_VALUE)
3125 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3126 /* PTS may not be known if B-frames are present. */
3128 if (pkt->pts == AV_NOPTS_VALUE)
3131 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3133 PRINT(" size=%d\n", pkt->size);
3136 av_hex_dump(f, pkt->data, pkt->size);
3139 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3141 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3144 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3146 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3149 void url_split(char *proto, int proto_size,
3150 char *authorization, int authorization_size,
3151 char *hostname, int hostname_size,
3153 char *path, int path_size,
3156 const char *p, *ls, *at, *col, *brk;
3158 if (port_ptr) *port_ptr = -1;
3159 if (proto_size > 0) proto[0] = 0;
3160 if (authorization_size > 0) authorization[0] = 0;
3161 if (hostname_size > 0) hostname[0] = 0;
3162 if (path_size > 0) path[0] = 0;
3164 /* parse protocol */
3165 if ((p = strchr(url, ':'))) {
3166 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3171 /* no protocol means plain filename */
3172 av_strlcpy(path, url, path_size);
3176 /* separate path from hostname */
3177 ls = strchr(p, '/');
3179 ls = strchr(p, '?');
3181 av_strlcpy(path, ls, path_size);
3183 ls = &p[strlen(p)]; // XXX
3185 /* the rest is hostname, use that to parse auth/port */
3187 /* authorization (user[:pass]@hostname) */
3188 if ((at = strchr(p, '@')) && at < ls) {
3189 av_strlcpy(authorization, p,
3190 FFMIN(authorization_size, at + 1 - p));
3191 p = at + 1; /* skip '@' */
3194 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3196 av_strlcpy(hostname, p + 1,
3197 FFMIN(hostname_size, brk - p));
3198 if (brk[1] == ':' && port_ptr)
3199 *port_ptr = atoi(brk + 2);
3200 } else if ((col = strchr(p, ':')) && col < ls) {
3201 av_strlcpy(hostname, p,
3202 FFMIN(col + 1 - p, hostname_size));
3203 if (port_ptr) *port_ptr = atoi(col + 1);
3205 av_strlcpy(hostname, p,
3206 FFMIN(ls + 1 - p, hostname_size));
3210 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3211 int pts_num, int pts_den)
3213 unsigned int gcd= ff_gcd(pts_num, pts_den);
3214 s->pts_wrap_bits = pts_wrap_bits;
3215 s->time_base.num = pts_num/gcd;
3216 s->time_base.den = pts_den/gcd;
3219 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);