2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavcodec/opt.h"
23 #include "libavutil/avstring.h"
33 * @file libavformat/utils.c
34 * various utility functions for use within FFmpeg
37 unsigned avformat_version(void)
39 return LIBAVFORMAT_VERSION_INT;
42 /* fraction handling */
45 * f = val + (num / den) + 0.5.
47 * 'num' is normalized so that it is such as 0 <= num < den.
49 * @param f fractional number
50 * @param val integer value
51 * @param num must be >= 0
52 * @param den must be >= 1
54 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
67 * Fractional addition to f: f = f + (incr / f->den).
69 * @param f fractional number
70 * @param incr increment, can be positive or negative
72 static void av_frac_add(AVFrac *f, int64_t incr)
85 } else if (num >= den) {
92 /** head of registered input format linked list */
93 AVInputFormat *first_iformat = NULL;
94 /** head of registered output format linked list */
95 AVOutputFormat *first_oformat = NULL;
97 AVInputFormat *av_iformat_next(AVInputFormat *f)
100 else return first_iformat;
103 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
105 if(f) return f->next;
106 else return first_oformat;
109 void av_register_input_format(AVInputFormat *format)
113 while (*p != NULL) p = &(*p)->next;
118 void av_register_output_format(AVOutputFormat *format)
122 while (*p != NULL) p = &(*p)->next;
127 int match_ext(const char *filename, const char *extensions)
135 ext = strrchr(filename, '.');
141 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
144 if (!strcasecmp(ext1, ext))
154 AVOutputFormat *guess_format(const char *short_name, const char *filename,
155 const char *mime_type)
157 AVOutputFormat *fmt, *fmt_found;
158 int score_max, score;
160 /* specific test for image sequences */
161 #ifdef CONFIG_IMAGE2_MUXER
162 if (!short_name && filename &&
163 av_filename_number_test(filename) &&
164 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
165 return guess_format("image2", NULL, NULL);
168 /* Find the proper file type. */
172 while (fmt != NULL) {
174 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
176 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
178 if (filename && fmt->extensions &&
179 match_ext(filename, fmt->extensions)) {
182 if (score > score_max) {
191 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
192 const char *mime_type)
194 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
197 AVOutputFormat *stream_fmt;
198 char stream_format_name[64];
200 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
201 stream_fmt = guess_format(stream_format_name, NULL, NULL);
210 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
211 const char *filename, const char *mime_type, enum CodecType type){
212 if(type == CODEC_TYPE_VIDEO){
213 enum CodecID codec_id= CODEC_ID_NONE;
215 #ifdef CONFIG_IMAGE2_MUXER
216 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
217 codec_id= av_guess_image2_codec(filename);
220 if(codec_id == CODEC_ID_NONE)
221 codec_id= fmt->video_codec;
223 }else if(type == CODEC_TYPE_AUDIO)
224 return fmt->audio_codec;
226 return CODEC_ID_NONE;
229 AVInputFormat *av_find_input_format(const char *short_name)
232 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
233 if (!strcmp(fmt->name, short_name))
239 /* memory handling */
241 void av_destruct_packet(AVPacket *pkt)
244 pkt->data = NULL; pkt->size = 0;
247 void av_init_packet(AVPacket *pkt)
249 pkt->pts = AV_NOPTS_VALUE;
250 pkt->dts = AV_NOPTS_VALUE;
254 pkt->stream_index = 0;
255 pkt->destruct= av_destruct_packet_nofree;
258 int av_new_packet(AVPacket *pkt, int size)
261 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
262 return AVERROR(ENOMEM);
263 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
265 return AVERROR(ENOMEM);
266 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
271 pkt->destruct = av_destruct_packet;
275 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
277 int ret= av_new_packet(pkt, size);
282 pkt->pos= url_ftell(s);
284 ret= get_buffer(s, pkt->data, size);
293 int av_dup_packet(AVPacket *pkt)
295 if (pkt->destruct != av_destruct_packet) {
297 /* We duplicate the packet and don't forget to add the padding again. */
298 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
299 return AVERROR(ENOMEM);
300 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
302 return AVERROR(ENOMEM);
304 memcpy(data, pkt->data, pkt->size);
305 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
307 pkt->destruct = av_destruct_packet;
312 int av_filename_number_test(const char *filename)
315 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
318 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
320 AVInputFormat *fmt1, *fmt;
324 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
325 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
328 if (fmt1->read_probe) {
329 score = fmt1->read_probe(pd);
330 } else if (fmt1->extensions) {
331 if (match_ext(pd->filename, fmt1->extensions)) {
335 if (score > *score_max) {
338 }else if (score == *score_max)
344 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
346 return av_probe_input_format2(pd, is_opened, &score);
349 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
352 fmt = av_probe_input_format2(pd, 1, &score);
355 if (!strcmp(fmt->name, "mp3"))
356 st->codec->codec_id = CODEC_ID_MP3;
357 else if (!strcmp(fmt->name, "ac3"))
358 st->codec->codec_id = CODEC_ID_AC3;
359 else if (!strcmp(fmt->name, "mpegvideo"))
360 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
361 else if (!strcmp(fmt->name, "h264"))
362 st->codec->codec_id = CODEC_ID_H264;
367 /************************************************************/
368 /* input media file */
371 * Open a media file from an IO stream. 'fmt' must be specified.
373 static const char* format_to_name(void* ptr)
375 AVFormatContext* fc = (AVFormatContext*) ptr;
376 if(fc->iformat) return fc->iformat->name;
377 else if(fc->oformat) return fc->oformat->name;
381 #define OFFSET(x) offsetof(AVFormatContext,x)
382 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
383 //these names are too long to be readable
384 #define E AV_OPT_FLAG_ENCODING_PARAM
385 #define D AV_OPT_FLAG_DECODING_PARAM
387 static const AVOption options[]={
388 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
389 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
390 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
391 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
392 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
393 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
394 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
395 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
396 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
397 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
398 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
399 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
400 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
401 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
409 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
411 static void avformat_get_context_defaults(AVFormatContext *s)
413 memset(s, 0, sizeof(AVFormatContext));
415 s->av_class = &av_format_context_class;
417 av_opt_set_defaults(s);
420 AVFormatContext *av_alloc_format_context(void)
423 ic = av_malloc(sizeof(AVFormatContext));
425 avformat_get_context_defaults(ic);
426 ic->av_class = &av_format_context_class;
430 int av_open_input_stream(AVFormatContext **ic_ptr,
431 ByteIOContext *pb, const char *filename,
432 AVInputFormat *fmt, AVFormatParameters *ap)
436 AVFormatParameters default_ap;
440 memset(ap, 0, sizeof(default_ap));
443 if(!ap->prealloced_context)
444 ic = av_alloc_format_context();
448 err = AVERROR(ENOMEM);
453 ic->duration = AV_NOPTS_VALUE;
454 ic->start_time = AV_NOPTS_VALUE;
455 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
457 /* allocate private data */
458 if (fmt->priv_data_size > 0) {
459 ic->priv_data = av_mallocz(fmt->priv_data_size);
460 if (!ic->priv_data) {
461 err = AVERROR(ENOMEM);
465 ic->priv_data = NULL;
468 if (ic->iformat->read_header) {
469 err = ic->iformat->read_header(ic, ap);
474 if (pb && !ic->data_offset)
475 ic->data_offset = url_ftell(ic->pb);
482 av_freep(&ic->priv_data);
483 for(i=0;i<ic->nb_streams;i++) {
484 AVStream *st = ic->streams[i];
486 av_free(st->priv_data);
487 av_free(st->codec->extradata);
497 /** size of probe buffer, for guessing file type from file contents */
498 #define PROBE_BUF_MIN 2048
499 #define PROBE_BUF_MAX (1<<20)
501 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
504 AVFormatParameters *ap)
507 AVProbeData probe_data, *pd = &probe_data;
508 ByteIOContext *pb = NULL;
512 pd->filename = filename;
517 /* guess format if no file can be opened */
518 fmt = av_probe_input_format(pd, 0);
521 /* Do not open file if the format does not need it. XXX: specific
522 hack needed to handle RTSP/TCP */
523 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
524 /* if no file needed do not try to open one */
525 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
529 url_setbufsize(pb, buf_size);
532 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
533 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
534 /* read probe data */
535 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
536 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
537 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
538 if (url_fseek(pb, 0, SEEK_SET) < 0) {
540 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
546 /* guess file format */
547 fmt = av_probe_input_format2(pd, 1, &score);
552 /* if still no format found, error */
558 /* check filename in case an image number is expected */
559 if (fmt->flags & AVFMT_NEEDNUMBER) {
560 if (!av_filename_number_test(filename)) {
561 err = AVERROR_NUMEXPECTED;
565 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
578 /*******************************************************/
580 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
581 AVPacketList **plast_pktl){
582 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
587 (*plast_pktl)->next = pktl;
589 *packet_buffer = pktl;
591 /* add the packet in the buffered packet list */
597 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
603 AVPacketList *pktl = s->raw_packet_buffer;
607 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
608 s->raw_packet_buffer = pktl->next;
615 ret= s->iformat->read_packet(s, pkt);
618 st= s->streams[pkt->stream_index];
620 switch(st->codec->codec_type){
621 case CODEC_TYPE_VIDEO:
622 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
624 case CODEC_TYPE_AUDIO:
625 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
627 case CODEC_TYPE_SUBTITLE:
628 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
632 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
635 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
637 if(st->codec->codec_id == CODEC_ID_PROBE){
638 AVProbeData *pd = &st->probe_data;
640 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
641 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
642 pd->buf_size += pkt->size;
643 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
645 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
646 set_codec_from_probe_data(st, pd, 1);
647 if(st->codec->codec_id != CODEC_ID_PROBE){
656 /**********************************************************/
659 * Get the number of samples of an audio frame. Return -1 on error.
661 static int get_audio_frame_size(AVCodecContext *enc, int size)
665 if(enc->codec_id == CODEC_ID_VORBIS)
668 if (enc->frame_size <= 1) {
669 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
671 if (bits_per_sample) {
672 if (enc->channels == 0)
674 frame_size = (size << 3) / (bits_per_sample * enc->channels);
676 /* used for example by ADPCM codecs */
677 if (enc->bit_rate == 0)
679 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
682 frame_size = enc->frame_size;
689 * Return the frame duration in seconds. Return 0 if not available.
691 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
692 AVCodecParserContext *pc, AVPacket *pkt)
698 switch(st->codec->codec_type) {
699 case CODEC_TYPE_VIDEO:
700 if(st->time_base.num*1000LL > st->time_base.den){
701 *pnum = st->time_base.num;
702 *pden = st->time_base.den;
703 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
704 *pnum = st->codec->time_base.num;
705 *pden = st->codec->time_base.den;
706 if (pc && pc->repeat_pict) {
708 *pnum = (*pnum) * (2 + pc->repeat_pict);
712 case CODEC_TYPE_AUDIO:
713 frame_size = get_audio_frame_size(st->codec, pkt->size);
717 *pden = st->codec->sample_rate;
724 static int is_intra_only(AVCodecContext *enc){
725 if(enc->codec_type == CODEC_TYPE_AUDIO){
727 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
728 switch(enc->codec_id){
730 case CODEC_ID_MJPEGB:
732 case CODEC_ID_RAWVIDEO:
733 case CODEC_ID_DVVIDEO:
734 case CODEC_ID_HUFFYUV:
735 case CODEC_ID_FFVHUFF:
746 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
747 int64_t dts, int64_t pts)
749 AVStream *st= s->streams[stream_index];
750 AVPacketList *pktl= s->packet_buffer;
752 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
755 st->first_dts= dts - st->cur_dts;
758 for(; pktl; pktl= pktl->next){
759 if(pktl->pkt.stream_index != stream_index)
761 //FIXME think more about this check
762 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
763 pktl->pkt.pts += st->first_dts;
765 if(pktl->pkt.dts != AV_NOPTS_VALUE)
766 pktl->pkt.dts += st->first_dts;
768 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
769 st->start_time= pktl->pkt.pts;
771 if (st->start_time == AV_NOPTS_VALUE)
772 st->start_time = pts;
775 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
777 AVPacketList *pktl= s->packet_buffer;
780 if(st->first_dts != AV_NOPTS_VALUE){
781 cur_dts= st->first_dts;
782 for(; pktl; pktl= pktl->next){
783 if(pktl->pkt.stream_index == pkt->stream_index){
784 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
786 cur_dts -= pkt->duration;
789 pktl= s->packet_buffer;
790 st->first_dts = cur_dts;
791 }else if(st->cur_dts)
794 for(; pktl; pktl= pktl->next){
795 if(pktl->pkt.stream_index != pkt->stream_index)
797 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
798 && !pktl->pkt.duration){
799 pktl->pkt.dts= cur_dts;
800 if(!st->codec->has_b_frames)
801 pktl->pkt.pts= cur_dts;
802 cur_dts += pkt->duration;
803 pktl->pkt.duration= pkt->duration;
807 if(st->first_dts == AV_NOPTS_VALUE)
808 st->cur_dts= cur_dts;
811 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
812 AVCodecParserContext *pc, AVPacket *pkt)
814 int num, den, presentation_delayed, delay, i;
817 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
818 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
819 pkt->dts -= 1LL<<st->pts_wrap_bits;
822 if (pkt->duration == 0) {
823 compute_frame_duration(&num, &den, st, pc, pkt);
825 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
827 if(pkt->duration != 0 && s->packet_buffer)
828 update_initial_durations(s, st, pkt);
832 /* correct timestamps with byte offset if demuxers only have timestamps
833 on packet boundaries */
834 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
835 /* this will estimate bitrate based on this frame's duration and size */
836 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
837 if(pkt->pts != AV_NOPTS_VALUE)
839 if(pkt->dts != AV_NOPTS_VALUE)
843 /* do we have a video B-frame ? */
844 delay= st->codec->has_b_frames;
845 presentation_delayed = 0;
846 /* XXX: need has_b_frame, but cannot get it if the codec is
849 pc && pc->pict_type != FF_B_TYPE)
850 presentation_delayed = 1;
851 /* This may be redundant, but it should not hurt. */
852 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
853 presentation_delayed = 1;
855 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
856 /* interpolate PTS and DTS if they are not present */
857 if(delay==0 || (delay==1 && pc)){
858 if (presentation_delayed) {
859 /* DTS = decompression timestamp */
860 /* PTS = presentation timestamp */
861 if (pkt->dts == AV_NOPTS_VALUE)
862 pkt->dts = st->last_IP_pts;
863 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
864 if (pkt->dts == AV_NOPTS_VALUE)
865 pkt->dts = st->cur_dts;
867 /* this is tricky: the dts must be incremented by the duration
868 of the frame we are displaying, i.e. the last I- or P-frame */
869 if (st->last_IP_duration == 0)
870 st->last_IP_duration = pkt->duration;
871 if(pkt->dts != AV_NOPTS_VALUE)
872 st->cur_dts = pkt->dts + st->last_IP_duration;
873 st->last_IP_duration = pkt->duration;
874 st->last_IP_pts= pkt->pts;
875 /* cannot compute PTS if not present (we can compute it only
876 by knowing the future */
877 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
878 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
879 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
880 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
881 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
882 pkt->pts += pkt->duration;
883 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
887 /* presentation is not delayed : PTS and DTS are the same */
888 if(pkt->pts == AV_NOPTS_VALUE)
890 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
891 if(pkt->pts == AV_NOPTS_VALUE)
892 pkt->pts = st->cur_dts;
894 if(pkt->pts != AV_NOPTS_VALUE)
895 st->cur_dts = pkt->pts + pkt->duration;
899 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
900 st->pts_buffer[0]= pkt->pts;
901 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
902 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
903 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
904 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
905 if(pkt->dts == AV_NOPTS_VALUE)
906 pkt->dts= st->pts_buffer[0];
908 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
910 if(pkt->dts > st->cur_dts)
911 st->cur_dts = pkt->dts;
914 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
917 if(is_intra_only(st->codec))
918 pkt->flags |= PKT_FLAG_KEY;
921 /* keyframe computation */
922 if (pc->pict_type == FF_I_TYPE)
923 pkt->flags |= PKT_FLAG_KEY;
927 void av_destruct_packet_nofree(AVPacket *pkt)
929 pkt->data = NULL; pkt->size = 0;
932 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
940 /* select current input stream component */
943 if (!st->need_parsing || !st->parser) {
944 /* no parsing needed: we just output the packet as is */
945 /* raw data support */
947 compute_pkt_fields(s, st, NULL, pkt);
950 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
951 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
952 s->cur_ptr, s->cur_len,
953 s->cur_pkt.pts, s->cur_pkt.dts);
954 s->cur_pkt.pts = AV_NOPTS_VALUE;
955 s->cur_pkt.dts = AV_NOPTS_VALUE;
956 /* increment read pointer */
960 /* return packet if any */
963 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
965 pkt->stream_index = st->index;
966 pkt->pts = st->parser->pts;
967 pkt->dts = st->parser->dts;
968 pkt->destruct = av_destruct_packet_nofree;
969 compute_pkt_fields(s, st, st->parser, pkt);
971 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
972 ff_reduce_index(s, st->index);
973 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
974 0, 0, AVINDEX_KEYFRAME);
981 av_free_packet(&s->cur_pkt);
985 /* read next packet */
986 ret = av_read_packet(s, &s->cur_pkt);
988 if (ret == AVERROR(EAGAIN))
990 /* return the last frames, if any */
991 for(i = 0; i < s->nb_streams; i++) {
993 if (st->parser && st->need_parsing) {
994 av_parser_parse(st->parser, st->codec,
995 &pkt->data, &pkt->size,
997 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1002 /* no more packets: really terminate parsing */
1006 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
1007 s->cur_pkt.dts != AV_NOPTS_VALUE &&
1008 s->cur_pkt.pts < s->cur_pkt.dts){
1009 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1010 s->cur_pkt.stream_index,
1014 // av_free_packet(&s->cur_pkt);
1018 st = s->streams[s->cur_pkt.stream_index];
1019 if(s->debug & FF_FDEBUG_TS)
1020 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1021 s->cur_pkt.stream_index,
1028 s->cur_ptr = s->cur_pkt.data;
1029 s->cur_len = s->cur_pkt.size;
1030 if (st->need_parsing && !st->parser) {
1031 st->parser = av_parser_init(st->codec->codec_id);
1033 /* no parser available: just output the raw packets */
1034 st->need_parsing = AVSTREAM_PARSE_NONE;
1035 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1036 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1038 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1039 st->parser->next_frame_offset=
1040 st->parser->cur_offset= s->cur_pkt.pos;
1045 if(s->debug & FF_FDEBUG_TS)
1046 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1056 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1060 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1063 pktl = s->packet_buffer;
1065 AVPacket *next_pkt= &pktl->pkt;
1067 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1068 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1069 if( pktl->pkt.stream_index == next_pkt->stream_index
1070 && next_pkt->dts < pktl->pkt.dts
1071 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1072 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1073 next_pkt->pts= pktl->pkt.dts;
1077 pktl = s->packet_buffer;
1080 if( next_pkt->pts != AV_NOPTS_VALUE
1081 || next_pkt->dts == AV_NOPTS_VALUE
1083 /* read packet from packet buffer, if there is data */
1085 s->packet_buffer = pktl->next;
1091 int ret= av_read_frame_internal(s, pkt);
1093 if(pktl && ret != AVERROR(EAGAIN)){
1100 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1101 &s->packet_buffer_end)) < 0)
1102 return AVERROR(ENOMEM);
1104 assert(!s->packet_buffer);
1105 return av_read_frame_internal(s, pkt);
1110 /* XXX: suppress the packet queue */
1111 static void flush_packet_queue(AVFormatContext *s)
1116 pktl = s->packet_buffer;
1119 s->packet_buffer = pktl->next;
1120 av_free_packet(&pktl->pkt);
1125 /*******************************************************/
1128 int av_find_default_stream_index(AVFormatContext *s)
1130 int first_audio_index = -1;
1134 if (s->nb_streams <= 0)
1136 for(i = 0; i < s->nb_streams; i++) {
1138 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1141 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1142 first_audio_index = i;
1144 return first_audio_index >= 0 ? first_audio_index : 0;
1148 * Flush the frame reader.
1150 static void av_read_frame_flush(AVFormatContext *s)
1155 flush_packet_queue(s);
1157 /* free previous packet */
1159 if (s->cur_st->parser)
1160 av_free_packet(&s->cur_pkt);
1167 /* for each stream, reset read state */
1168 for(i = 0; i < s->nb_streams; i++) {
1172 av_parser_close(st->parser);
1175 st->last_IP_pts = AV_NOPTS_VALUE;
1176 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1180 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1183 for(i = 0; i < s->nb_streams; i++) {
1184 AVStream *st = s->streams[i];
1186 st->cur_dts = av_rescale(timestamp,
1187 st->time_base.den * (int64_t)ref_st->time_base.num,
1188 st->time_base.num * (int64_t)ref_st->time_base.den);
1192 void ff_reduce_index(AVFormatContext *s, int stream_index)
1194 AVStream *st= s->streams[stream_index];
1195 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1197 if((unsigned)st->nb_index_entries >= max_entries){
1199 for(i=0; 2*i<st->nb_index_entries; i++)
1200 st->index_entries[i]= st->index_entries[2*i];
1201 st->nb_index_entries= i;
1205 int av_add_index_entry(AVStream *st,
1206 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1208 AVIndexEntry *entries, *ie;
1211 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1214 entries = av_fast_realloc(st->index_entries,
1215 &st->index_entries_allocated_size,
1216 (st->nb_index_entries + 1) *
1217 sizeof(AVIndexEntry));
1221 st->index_entries= entries;
1223 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1226 index= st->nb_index_entries++;
1227 ie= &entries[index];
1228 assert(index==0 || ie[-1].timestamp < timestamp);
1230 ie= &entries[index];
1231 if(ie->timestamp != timestamp){
1232 if(ie->timestamp <= timestamp)
1234 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1235 st->nb_index_entries++;
1236 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1237 distance= ie->min_distance;
1241 ie->timestamp = timestamp;
1242 ie->min_distance= distance;
1249 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1252 AVIndexEntry *entries= st->index_entries;
1253 int nb_entries= st->nb_index_entries;
1262 timestamp = entries[m].timestamp;
1263 if(timestamp >= wanted_timestamp)
1265 if(timestamp <= wanted_timestamp)
1268 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1270 if(!(flags & AVSEEK_FLAG_ANY)){
1271 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1272 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1283 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1284 AVInputFormat *avif= s->iformat;
1285 int64_t pos_min, pos_max, pos, pos_limit;
1286 int64_t ts_min, ts_max, ts;
1290 if (stream_index < 0)
1294 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1298 ts_min= AV_NOPTS_VALUE;
1299 pos_limit= -1; //gcc falsely says it may be uninitialized
1301 st= s->streams[stream_index];
1302 if(st->index_entries){
1305 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1306 index= FFMAX(index, 0);
1307 e= &st->index_entries[index];
1309 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1311 ts_min= e->timestamp;
1313 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1320 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1321 assert(index < st->nb_index_entries);
1323 e= &st->index_entries[index];
1324 assert(e->timestamp >= target_ts);
1326 ts_max= e->timestamp;
1327 pos_limit= pos_max - e->min_distance;
1329 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1330 pos_max,pos_limit, ts_max);
1335 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1340 url_fseek(s->pb, pos, SEEK_SET);
1342 av_update_cur_dts(s, st, ts);
1347 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1349 int64_t start_pos, filesize;
1353 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1356 if(ts_min == AV_NOPTS_VALUE){
1357 pos_min = s->data_offset;
1358 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1359 if (ts_min == AV_NOPTS_VALUE)
1363 if(ts_max == AV_NOPTS_VALUE){
1365 filesize = url_fsize(s->pb);
1366 pos_max = filesize - 1;
1369 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1371 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1372 if (ts_max == AV_NOPTS_VALUE)
1376 int64_t tmp_pos= pos_max + 1;
1377 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1378 if(tmp_ts == AV_NOPTS_VALUE)
1382 if(tmp_pos >= filesize)
1388 if(ts_min > ts_max){
1390 }else if(ts_min == ts_max){
1395 while (pos_min < pos_limit) {
1397 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1401 assert(pos_limit <= pos_max);
1404 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1405 // interpolate position (better than dichotomy)
1406 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1407 + pos_min - approximate_keyframe_distance;
1408 }else if(no_change==1){
1409 // bisection, if interpolation failed to change min or max pos last time
1410 pos = (pos_min + pos_limit)>>1;
1412 /* linear search if bisection failed, can only happen if there
1413 are very few or no keyframes between min/max */
1418 else if(pos > pos_limit)
1422 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1428 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1430 if(ts == AV_NOPTS_VALUE){
1431 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1434 assert(ts != AV_NOPTS_VALUE);
1435 if (target_ts <= ts) {
1436 pos_limit = start_pos - 1;
1440 if (target_ts >= ts) {
1446 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1447 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1450 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1452 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1453 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1454 pos, ts_min, target_ts, ts_max);
1460 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1461 int64_t pos_min, pos_max;
1465 if (stream_index < 0)
1468 st= s->streams[stream_index];
1471 pos_min = s->data_offset;
1472 pos_max = url_fsize(s->pb) - 1;
1474 if (pos < pos_min) pos= pos_min;
1475 else if(pos > pos_max) pos= pos_max;
1477 url_fseek(s->pb, pos, SEEK_SET);
1480 av_update_cur_dts(s, st, ts);
1485 static int av_seek_frame_generic(AVFormatContext *s,
1486 int stream_index, int64_t timestamp, int flags)
1492 st = s->streams[stream_index];
1494 index = av_index_search_timestamp(st, timestamp, flags);
1496 if(index < 0 || index==st->nb_index_entries-1){
1500 if(st->nb_index_entries){
1501 assert(st->index_entries);
1502 ie= &st->index_entries[st->nb_index_entries-1];
1503 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1505 av_update_cur_dts(s, st, ie->timestamp);
1507 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1511 int ret = av_read_frame(s, &pkt);
1514 av_free_packet(&pkt);
1515 if(stream_index == pkt.stream_index){
1516 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1520 index = av_index_search_timestamp(st, timestamp, flags);
1525 av_read_frame_flush(s);
1526 if (s->iformat->read_seek){
1527 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1530 ie = &st->index_entries[index];
1531 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1533 av_update_cur_dts(s, st, ie->timestamp);
1538 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1543 av_read_frame_flush(s);
1545 if(flags & AVSEEK_FLAG_BYTE)
1546 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1548 if(stream_index < 0){
1549 stream_index= av_find_default_stream_index(s);
1550 if(stream_index < 0)
1553 st= s->streams[stream_index];
1554 /* timestamp for default must be expressed in AV_TIME_BASE units */
1555 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1557 st= s->streams[stream_index];
1559 /* first, we try the format specific seek */
1560 if (s->iformat->read_seek)
1561 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1568 if(s->iformat->read_timestamp)
1569 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1571 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1574 /*******************************************************/
1577 * Returns TRUE if the stream has accurate duration in any stream.
1579 * @return TRUE if the stream has accurate duration for at least one component.
1581 static int av_has_duration(AVFormatContext *ic)
1586 for(i = 0;i < ic->nb_streams; i++) {
1587 st = ic->streams[i];
1588 if (st->duration != AV_NOPTS_VALUE)
1595 * Estimate the stream timings from the one of each components.
1597 * Also computes the global bitrate if possible.
1599 static void av_update_stream_timings(AVFormatContext *ic)
1601 int64_t start_time, start_time1, end_time, end_time1;
1602 int64_t duration, duration1;
1606 start_time = INT64_MAX;
1607 end_time = INT64_MIN;
1608 duration = INT64_MIN;
1609 for(i = 0;i < ic->nb_streams; i++) {
1610 st = ic->streams[i];
1611 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1612 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1613 if (start_time1 < start_time)
1614 start_time = start_time1;
1615 if (st->duration != AV_NOPTS_VALUE) {
1616 end_time1 = start_time1
1617 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1618 if (end_time1 > end_time)
1619 end_time = end_time1;
1622 if (st->duration != AV_NOPTS_VALUE) {
1623 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1624 if (duration1 > duration)
1625 duration = duration1;
1628 if (start_time != INT64_MAX) {
1629 ic->start_time = start_time;
1630 if (end_time != INT64_MIN) {
1631 if (end_time - start_time > duration)
1632 duration = end_time - start_time;
1635 if (duration != INT64_MIN) {
1636 ic->duration = duration;
1637 if (ic->file_size > 0) {
1638 /* compute the bitrate */
1639 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1640 (double)ic->duration;
1645 static void fill_all_stream_timings(AVFormatContext *ic)
1650 av_update_stream_timings(ic);
1651 for(i = 0;i < ic->nb_streams; i++) {
1652 st = ic->streams[i];
1653 if (st->start_time == AV_NOPTS_VALUE) {
1654 if(ic->start_time != AV_NOPTS_VALUE)
1655 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1656 if(ic->duration != AV_NOPTS_VALUE)
1657 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1662 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1664 int64_t filesize, duration;
1668 /* if bit_rate is already set, we believe it */
1669 if (ic->bit_rate == 0) {
1671 for(i=0;i<ic->nb_streams;i++) {
1672 st = ic->streams[i];
1673 bit_rate += st->codec->bit_rate;
1675 ic->bit_rate = bit_rate;
1678 /* if duration is already set, we believe it */
1679 if (ic->duration == AV_NOPTS_VALUE &&
1680 ic->bit_rate != 0 &&
1681 ic->file_size != 0) {
1682 filesize = ic->file_size;
1684 for(i = 0; i < ic->nb_streams; i++) {
1685 st = ic->streams[i];
1686 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1687 if (st->duration == AV_NOPTS_VALUE)
1688 st->duration = duration;
1694 #define DURATION_MAX_READ_SIZE 250000
1696 /* only usable for MPEG-PS streams */
1697 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1699 AVPacket pkt1, *pkt = &pkt1;
1701 int read_size, i, ret;
1703 int64_t filesize, offset, duration;
1705 /* free previous packet */
1706 if (ic->cur_st && ic->cur_st->parser)
1707 av_free_packet(&ic->cur_pkt);
1710 /* flush packet queue */
1711 flush_packet_queue(ic);
1713 for(i=0;i<ic->nb_streams;i++) {
1714 st = ic->streams[i];
1716 av_parser_close(st->parser);
1721 /* we read the first packets to get the first PTS (not fully
1722 accurate, but it is enough now) */
1723 url_fseek(ic->pb, 0, SEEK_SET);
1726 if (read_size >= DURATION_MAX_READ_SIZE)
1728 /* if all info is available, we can stop */
1729 for(i = 0;i < ic->nb_streams; i++) {
1730 st = ic->streams[i];
1731 if (st->start_time == AV_NOPTS_VALUE)
1734 if (i == ic->nb_streams)
1737 ret = av_read_packet(ic, pkt);
1740 read_size += pkt->size;
1741 st = ic->streams[pkt->stream_index];
1742 if (pkt->pts != AV_NOPTS_VALUE) {
1743 if (st->start_time == AV_NOPTS_VALUE)
1744 st->start_time = pkt->pts;
1746 av_free_packet(pkt);
1749 /* estimate the end time (duration) */
1750 /* XXX: may need to support wrapping */
1751 filesize = ic->file_size;
1752 offset = filesize - DURATION_MAX_READ_SIZE;
1756 url_fseek(ic->pb, offset, SEEK_SET);
1759 if (read_size >= DURATION_MAX_READ_SIZE)
1762 ret = av_read_packet(ic, pkt);
1765 read_size += pkt->size;
1766 st = ic->streams[pkt->stream_index];
1767 if (pkt->pts != AV_NOPTS_VALUE &&
1768 st->start_time != AV_NOPTS_VALUE) {
1769 end_time = pkt->pts;
1770 duration = end_time - st->start_time;
1772 if (st->duration == AV_NOPTS_VALUE ||
1773 st->duration < duration)
1774 st->duration = duration;
1777 av_free_packet(pkt);
1780 fill_all_stream_timings(ic);
1782 url_fseek(ic->pb, old_offset, SEEK_SET);
1783 for(i=0; i<ic->nb_streams; i++){
1785 st->cur_dts= st->first_dts;
1786 st->last_IP_pts = AV_NOPTS_VALUE;
1790 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1794 /* get the file size, if possible */
1795 if (ic->iformat->flags & AVFMT_NOFILE) {
1798 file_size = url_fsize(ic->pb);
1802 ic->file_size = file_size;
1804 if ((!strcmp(ic->iformat->name, "mpeg") ||
1805 !strcmp(ic->iformat->name, "mpegts")) &&
1806 file_size && !url_is_streamed(ic->pb)) {
1807 /* get accurate estimate from the PTSes */
1808 av_estimate_timings_from_pts(ic, old_offset);
1809 } else if (av_has_duration(ic)) {
1810 /* at least one component has timings - we use them for all
1812 fill_all_stream_timings(ic);
1814 /* less precise: use bitrate info */
1815 av_estimate_timings_from_bit_rate(ic);
1817 av_update_stream_timings(ic);
1823 for(i = 0;i < ic->nb_streams; i++) {
1824 st = ic->streams[i];
1825 printf("%d: start_time: %0.3f duration: %0.3f\n",
1826 i, (double)st->start_time / AV_TIME_BASE,
1827 (double)st->duration / AV_TIME_BASE);
1829 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1830 (double)ic->start_time / AV_TIME_BASE,
1831 (double)ic->duration / AV_TIME_BASE,
1832 ic->bit_rate / 1000);
1837 static int has_codec_parameters(AVCodecContext *enc)
1840 switch(enc->codec_type) {
1841 case CODEC_TYPE_AUDIO:
1842 val = enc->sample_rate && enc->channels;
1843 if(!enc->frame_size &&
1844 (enc->codec_id == CODEC_ID_VORBIS ||
1845 enc->codec_id == CODEC_ID_AAC))
1848 case CODEC_TYPE_VIDEO:
1849 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1855 return enc->codec_id != CODEC_ID_NONE && val != 0;
1858 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1862 int got_picture, data_size, ret=0;
1865 if(!st->codec->codec){
1866 codec = avcodec_find_decoder(st->codec->codec_id);
1869 ret = avcodec_open(st->codec, codec);
1874 if(!has_codec_parameters(st->codec)){
1875 switch(st->codec->codec_type) {
1876 case CODEC_TYPE_VIDEO:
1877 ret = avcodec_decode_video(st->codec, &picture,
1878 &got_picture, data, size);
1880 case CODEC_TYPE_AUDIO:
1881 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1882 samples = av_malloc(data_size);
1885 ret = avcodec_decode_audio2(st->codec, samples,
1886 &data_size, data, size);
1897 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1899 while (tags->id != CODEC_ID_NONE) {
1907 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1910 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1911 if(tag == tags[i].tag)
1914 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1915 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1916 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1917 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1918 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1921 return CODEC_ID_NONE;
1924 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1927 for(i=0; tags && tags[i]; i++){
1928 int tag= codec_get_tag(tags[i], id);
1934 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1937 for(i=0; tags && tags[i]; i++){
1938 enum CodecID id= codec_get_id(tags[i], tag);
1939 if(id!=CODEC_ID_NONE) return id;
1941 return CODEC_ID_NONE;
1944 static void compute_chapters_end(AVFormatContext *s)
1948 for (i=0; i+1<s->nb_chapters; i++)
1949 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1950 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1951 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1952 s->chapters[i]->end = s->chapters[i+1]->start;
1955 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1956 assert(s->start_time != AV_NOPTS_VALUE);
1957 assert(s->duration > 0);
1958 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1960 s->chapters[i]->time_base);
1964 /* absolute maximum size we read until we abort */
1965 #define MAX_READ_SIZE 5000000
1967 #define MAX_STD_TIMEBASES (60*12+5)
1968 static int get_std_framerate(int i){
1969 if(i<60*12) return i*1001;
1970 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1974 * Is the time base unreliable.
1975 * This is a heuristic to balance between quick acceptance of the values in
1976 * the headers vs. some extra checks.
1977 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1978 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1979 * And there are "variable" fps files this needs to detect as well.
1981 static int tb_unreliable(AVCodecContext *c){
1982 if( c->time_base.den >= 101L*c->time_base.num
1983 || c->time_base.den < 5L*c->time_base.num
1984 /* || c->codec_tag == ff_get_fourcc("DIVX")
1985 || c->codec_tag == ff_get_fourcc("XVID")*/
1986 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1991 int av_find_stream_info(AVFormatContext *ic)
1993 int i, count, ret, read_size, j;
1995 AVPacket pkt1, *pkt;
1996 int64_t last_dts[MAX_STREAMS];
1997 int duration_count[MAX_STREAMS]={0};
1998 double (*duration_error)[MAX_STD_TIMEBASES];
1999 offset_t old_offset = url_ftell(ic->pb);
2000 int64_t codec_info_duration[MAX_STREAMS]={0};
2001 int codec_info_nb_frames[MAX_STREAMS]={0};
2003 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2004 if (!duration_error) return AVERROR(ENOMEM);
2006 for(i=0;i<ic->nb_streams;i++) {
2007 st = ic->streams[i];
2008 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2009 /* if(!st->time_base.num)
2011 if(!st->codec->time_base.num)
2012 st->codec->time_base= st->time_base;
2014 //only for the split stuff
2016 st->parser = av_parser_init(st->codec->codec_id);
2017 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2018 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2023 for(i=0;i<MAX_STREAMS;i++){
2024 last_dts[i]= AV_NOPTS_VALUE;
2030 /* check if one codec still needs to be handled */
2031 for(i=0;i<ic->nb_streams;i++) {
2032 st = ic->streams[i];
2033 if (!has_codec_parameters(st->codec))
2035 /* variable fps and no guess at the real fps */
2036 if( tb_unreliable(st->codec)
2037 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2039 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2041 if(st->first_dts == AV_NOPTS_VALUE)
2044 if (i == ic->nb_streams) {
2045 /* NOTE: if the format has no header, then we need to read
2046 some packets to get most of the streams, so we cannot
2048 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2049 /* if we found the info for all the codecs, we can stop */
2054 /* we did not get all the codec info, but we read too much data */
2055 if (read_size >= MAX_READ_SIZE) {
2060 /* NOTE: a new stream can be added there if no header in file
2061 (AVFMTCTX_NOHEADER) */
2062 ret = av_read_frame_internal(ic, &pkt1);
2065 ret = -1; /* we could not have all the codec parameters before EOF */
2066 for(i=0;i<ic->nb_streams;i++) {
2067 st = ic->streams[i];
2068 if (!has_codec_parameters(st->codec)){
2070 avcodec_string(buf, sizeof(buf), st->codec, 0);
2071 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2079 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2080 if(av_dup_packet(pkt) < 0) {
2081 av_free(duration_error);
2082 return AVERROR(ENOMEM);
2085 read_size += pkt->size;
2087 st = ic->streams[pkt->stream_index];
2088 if(codec_info_nb_frames[st->index]>1)
2089 codec_info_duration[st->index] += pkt->duration;
2090 if (pkt->duration != 0)
2091 codec_info_nb_frames[st->index]++;
2094 int index= pkt->stream_index;
2095 int64_t last= last_dts[index];
2096 int64_t duration= pkt->dts - last;
2098 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2099 double dur= duration * av_q2d(st->time_base);
2101 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2102 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2103 if(duration_count[index] < 2)
2104 memset(duration_error[index], 0, sizeof(*duration_error));
2105 for(i=1; i<MAX_STD_TIMEBASES; i++){
2106 int framerate= get_std_framerate(i);
2107 int ticks= lrintf(dur*framerate/(1001*12));
2108 double error= dur - ticks*1001*12/(double)framerate;
2109 duration_error[index][i] += error*error;
2111 duration_count[index]++;
2113 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2114 last_dts[pkt->stream_index]= pkt->dts;
2116 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2117 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2119 st->codec->extradata_size= i;
2120 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2121 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2122 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2126 /* if still no information, we try to open the codec and to
2127 decompress the frame. We try to avoid that in most cases as
2128 it takes longer and uses more memory. For MPEG-4, we need to
2129 decompress for QuickTime. */
2130 if (!has_codec_parameters(st->codec) /*&&
2131 (st->codec->codec_id == CODEC_ID_FLV1 ||
2132 st->codec->codec_id == CODEC_ID_H264 ||
2133 st->codec->codec_id == CODEC_ID_H263 ||
2134 st->codec->codec_id == CODEC_ID_H261 ||
2135 st->codec->codec_id == CODEC_ID_VORBIS ||
2136 st->codec->codec_id == CODEC_ID_MJPEG ||
2137 st->codec->codec_id == CODEC_ID_PNG ||
2138 st->codec->codec_id == CODEC_ID_PAM ||
2139 st->codec->codec_id == CODEC_ID_PGM ||
2140 st->codec->codec_id == CODEC_ID_PGMYUV ||
2141 st->codec->codec_id == CODEC_ID_PBM ||
2142 st->codec->codec_id == CODEC_ID_PPM ||
2143 st->codec->codec_id == CODEC_ID_SHORTEN ||
2144 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2145 try_decode_frame(st, pkt->data, pkt->size);
2147 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2153 // close codecs which were opened in try_decode_frame()
2154 for(i=0;i<ic->nb_streams;i++) {
2155 st = ic->streams[i];
2156 if(st->codec->codec)
2157 avcodec_close(st->codec);
2159 for(i=0;i<ic->nb_streams;i++) {
2160 st = ic->streams[i];
2161 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2162 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2163 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2165 if(duration_count[i]
2166 && tb_unreliable(st->codec) /*&&
2167 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2168 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2169 double best_error= 2*av_q2d(st->time_base);
2170 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2172 for(j=1; j<MAX_STD_TIMEBASES; j++){
2173 double error= duration_error[i][j] * get_std_framerate(j);
2174 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2175 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2176 if(error < best_error){
2178 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2183 if (!st->r_frame_rate.num){
2184 if( st->codec->time_base.den * (int64_t)st->time_base.num
2185 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2186 st->r_frame_rate.num = st->codec->time_base.den;
2187 st->r_frame_rate.den = st->codec->time_base.num;
2189 st->r_frame_rate.num = st->time_base.den;
2190 st->r_frame_rate.den = st->time_base.num;
2193 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2194 if(!st->codec->bits_per_sample)
2195 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2199 av_estimate_timings(ic, old_offset);
2201 compute_chapters_end(ic);
2204 /* correct DTS for B-frame streams with no timestamps */
2205 for(i=0;i<ic->nb_streams;i++) {
2206 st = ic->streams[i];
2207 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2209 ppktl = &ic->packet_buffer;
2211 if(ppkt1->stream_index != i)
2213 if(ppkt1->pkt->dts < 0)
2215 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2217 ppkt1->pkt->dts -= delta;
2222 st->cur_dts -= delta;
2228 av_free(duration_error);
2233 /*******************************************************/
2235 int av_read_play(AVFormatContext *s)
2237 if (s->iformat->read_play)
2238 return s->iformat->read_play(s);
2240 return av_url_read_fpause(s->pb, 0);
2241 return AVERROR(ENOSYS);
2244 int av_read_pause(AVFormatContext *s)
2246 if (s->iformat->read_pause)
2247 return s->iformat->read_pause(s);
2249 return av_url_read_fpause(s->pb, 1);
2250 return AVERROR(ENOSYS);
2253 void av_close_input_stream(AVFormatContext *s)
2258 /* free previous packet */
2259 if (s->cur_st && s->cur_st->parser)
2260 av_free_packet(&s->cur_pkt);
2262 if (s->iformat->read_close)
2263 s->iformat->read_close(s);
2264 for(i=0;i<s->nb_streams;i++) {
2265 /* free all data in a stream component */
2268 av_parser_close(st->parser);
2270 av_free(st->index_entries);
2271 av_free(st->codec->extradata);
2273 av_free(st->filename);
2274 av_free(st->priv_data);
2277 for(i=s->nb_programs-1; i>=0; i--) {
2278 av_freep(&s->programs[i]->provider_name);
2279 av_freep(&s->programs[i]->name);
2280 av_freep(&s->programs[i]->stream_index);
2281 av_freep(&s->programs[i]);
2283 av_freep(&s->programs);
2284 flush_packet_queue(s);
2285 av_freep(&s->priv_data);
2286 while(s->nb_chapters--) {
2287 av_free(s->chapters[s->nb_chapters]->title);
2288 av_free(s->chapters[s->nb_chapters]);
2290 av_freep(&s->chapters);
2294 void av_close_input_file(AVFormatContext *s)
2296 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2297 av_close_input_stream(s);
2302 AVStream *av_new_stream(AVFormatContext *s, int id)
2307 if (s->nb_streams >= MAX_STREAMS)
2310 st = av_mallocz(sizeof(AVStream));
2314 st->codec= avcodec_alloc_context();
2316 /* no default bitrate if decoding */
2317 st->codec->bit_rate = 0;
2319 st->index = s->nb_streams;
2321 st->start_time = AV_NOPTS_VALUE;
2322 st->duration = AV_NOPTS_VALUE;
2323 /* we set the current DTS to 0 so that formats without any timestamps
2324 but durations get some timestamps, formats with some unknown
2325 timestamps have their first few packets buffered and the
2326 timestamps corrected before they are returned to the user */
2328 st->first_dts = AV_NOPTS_VALUE;
2330 /* default pts setting is MPEG-like */
2331 av_set_pts_info(st, 33, 1, 90000);
2332 st->last_IP_pts = AV_NOPTS_VALUE;
2333 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2334 st->pts_buffer[i]= AV_NOPTS_VALUE;
2336 s->streams[s->nb_streams++] = st;
2340 AVProgram *av_new_program(AVFormatContext *ac, int id)
2342 AVProgram *program=NULL;
2346 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2349 for(i=0; i<ac->nb_programs; i++)
2350 if(ac->programs[i]->id == id)
2351 program = ac->programs[i];
2354 program = av_mallocz(sizeof(AVProgram));
2357 dynarray_add(&ac->programs, &ac->nb_programs, program);
2358 program->discard = AVDISCARD_NONE;
2365 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2367 assert(!provider_name == !name);
2369 av_free(program->provider_name);
2370 av_free(program-> name);
2371 program->provider_name = av_strdup(provider_name);
2372 program-> name = av_strdup( name);
2376 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2378 AVChapter *chapter = NULL;
2381 for(i=0; i<s->nb_chapters; i++)
2382 if(s->chapters[i]->id == id)
2383 chapter = s->chapters[i];
2386 chapter= av_mallocz(sizeof(AVChapter));
2389 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2391 av_free(chapter->title);
2392 chapter->title = av_strdup(title);
2394 chapter->time_base= time_base;
2395 chapter->start = start;
2401 /************************************************************/
2402 /* output media file */
2404 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2408 if (s->oformat->priv_data_size > 0) {
2409 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2411 return AVERROR(ENOMEM);
2413 s->priv_data = NULL;
2415 if (s->oformat->set_parameters) {
2416 ret = s->oformat->set_parameters(s, ap);
2423 int av_write_header(AVFormatContext *s)
2428 // some sanity checks
2429 for(i=0;i<s->nb_streams;i++) {
2432 switch (st->codec->codec_type) {
2433 case CODEC_TYPE_AUDIO:
2434 if(st->codec->sample_rate<=0){
2435 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2439 case CODEC_TYPE_VIDEO:
2440 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2441 av_log(s, AV_LOG_ERROR, "time base not set\n");
2444 if(st->codec->width<=0 || st->codec->height<=0){
2445 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2451 if(s->oformat->codec_tag){
2452 if(st->codec->codec_tag){
2454 //check that tag + id is in the table
2455 //if neither is in the table -> OK
2456 //if tag is in the table with another id -> FAIL
2457 //if id is in the table with another tag -> FAIL unless strict < ?
2459 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2463 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2464 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2466 return AVERROR(ENOMEM);
2469 if(s->oformat->write_header){
2470 ret = s->oformat->write_header(s);
2475 /* init PTS generation */
2476 for(i=0;i<s->nb_streams;i++) {
2477 int64_t den = AV_NOPTS_VALUE;
2480 switch (st->codec->codec_type) {
2481 case CODEC_TYPE_AUDIO:
2482 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2484 case CODEC_TYPE_VIDEO:
2485 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2490 if (den != AV_NOPTS_VALUE) {
2492 return AVERROR_INVALIDDATA;
2493 av_frac_init(&st->pts, 0, 0, den);
2499 //FIXME merge with compute_pkt_fields
2500 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2501 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2502 int num, den, frame_size, i;
2504 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2506 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2509 /* duration field */
2510 if (pkt->duration == 0) {
2511 compute_frame_duration(&num, &den, st, NULL, pkt);
2513 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2517 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2520 //XXX/FIXME this is a temporary hack until all encoders output pts
2521 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2523 // pkt->pts= st->cur_dts;
2524 pkt->pts= st->pts.val;
2527 //calculate dts from pts
2528 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2529 st->pts_buffer[0]= pkt->pts;
2530 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2531 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2532 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2533 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2535 pkt->dts= st->pts_buffer[0];
2538 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2539 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2542 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2543 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2547 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2548 st->cur_dts= pkt->dts;
2549 st->pts.val= pkt->dts;
2552 switch (st->codec->codec_type) {
2553 case CODEC_TYPE_AUDIO:
2554 frame_size = get_audio_frame_size(st->codec, pkt->size);
2556 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2557 likely equal to the encoder delay, but it would be better if we
2558 had the real timestamps from the encoder */
2559 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2560 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2563 case CODEC_TYPE_VIDEO:
2564 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2572 static void truncate_ts(AVStream *st, AVPacket *pkt){
2573 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2576 // pkt->dts= 0; //this happens for low_delay=0 and B-frames, FIXME, needs further investigation about what we should do here
2578 if (pkt->pts != AV_NOPTS_VALUE)
2579 pkt->pts &= pts_mask;
2580 if (pkt->dts != AV_NOPTS_VALUE)
2581 pkt->dts &= pts_mask;
2584 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2586 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2588 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2591 truncate_ts(s->streams[pkt->stream_index], pkt);
2593 ret= s->oformat->write_packet(s, pkt);
2595 ret= url_ferror(s->pb);
2599 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2600 AVPacketList *pktl, **next_point, *this_pktl;
2602 int streams[MAX_STREAMS];
2605 AVStream *st= s->streams[ pkt->stream_index];
2607 // assert(pkt->destruct != av_destruct_packet); //FIXME
2609 this_pktl = av_mallocz(sizeof(AVPacketList));
2610 this_pktl->pkt= *pkt;
2611 if(pkt->destruct == av_destruct_packet)
2612 pkt->destruct= NULL; // not shared -> must keep original from being freed
2614 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2616 next_point = &s->packet_buffer;
2618 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2619 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2620 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2621 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2623 next_point= &(*next_point)->next;
2625 this_pktl->next= *next_point;
2626 *next_point= this_pktl;
2629 memset(streams, 0, sizeof(streams));
2630 pktl= s->packet_buffer;
2632 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2633 if(streams[ pktl->pkt.stream_index ] == 0)
2635 streams[ pktl->pkt.stream_index ]++;
2639 if(stream_count && (s->nb_streams == stream_count || flush)){
2640 pktl= s->packet_buffer;
2643 s->packet_buffer= pktl->next;
2647 av_init_packet(out);
2653 * Interleaves an AVPacket correctly so it can be muxed.
2654 * @param out the interleaved packet will be output here
2655 * @param in the input packet
2656 * @param flush 1 if no further packets are available as input and all
2657 * remaining packets should be output
2658 * @return 1 if a packet was output, 0 if no packet could be output,
2659 * < 0 if an error occurred
2661 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2662 if(s->oformat->interleave_packet)
2663 return s->oformat->interleave_packet(s, out, in, flush);
2665 return av_interleave_packet_per_dts(s, out, in, flush);
2668 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2669 AVStream *st= s->streams[ pkt->stream_index];
2671 //FIXME/XXX/HACK drop zero sized packets
2672 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2675 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2676 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2679 if(pkt->dts == AV_NOPTS_VALUE)
2684 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2685 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2688 truncate_ts(s->streams[opkt.stream_index], &opkt);
2689 ret= s->oformat->write_packet(s, &opkt);
2691 av_free_packet(&opkt);
2696 if(url_ferror(s->pb))
2697 return url_ferror(s->pb);
2701 int av_write_trailer(AVFormatContext *s)
2707 ret= av_interleave_packet(s, &pkt, NULL, 1);
2708 if(ret<0) //FIXME cleanup needed for ret<0 ?
2713 truncate_ts(s->streams[pkt.stream_index], &pkt);
2714 ret= s->oformat->write_packet(s, &pkt);
2716 av_free_packet(&pkt);
2720 if(url_ferror(s->pb))
2724 if(s->oformat->write_trailer)
2725 ret = s->oformat->write_trailer(s);
2728 ret=url_ferror(s->pb);
2729 for(i=0;i<s->nb_streams;i++)
2730 av_freep(&s->streams[i]->priv_data);
2731 av_freep(&s->priv_data);
2735 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2738 AVProgram *program=NULL;
2741 for(i=0; i<ac->nb_programs; i++){
2742 if(ac->programs[i]->id != progid)
2744 program = ac->programs[i];
2745 for(j=0; j<program->nb_stream_indexes; j++)
2746 if(program->stream_index[j] == idx)
2749 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2752 program->stream_index = tmp;
2753 program->stream_index[program->nb_stream_indexes++] = idx;
2758 /* "user interface" functions */
2759 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2762 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2763 AVStream *st = ic->streams[i];
2764 int g = ff_gcd(st->time_base.num, st->time_base.den);
2765 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2766 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2767 /* the pid is an important information, so we display it */
2768 /* XXX: add a generic system */
2769 if (flags & AVFMT_SHOW_IDS)
2770 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2771 if (strlen(st->language) > 0)
2772 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2773 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2774 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2775 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2776 if(st->r_frame_rate.den && st->r_frame_rate.num)
2777 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2778 /* else if(st->time_base.den && st->time_base.num)
2779 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2781 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2783 av_log(NULL, AV_LOG_INFO, "\n");
2786 void dump_format(AVFormatContext *ic,
2793 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2794 is_output ? "Output" : "Input",
2796 is_output ? ic->oformat->name : ic->iformat->name,
2797 is_output ? "to" : "from", url);
2799 av_log(NULL, AV_LOG_INFO, " Duration: ");
2800 if (ic->duration != AV_NOPTS_VALUE) {
2801 int hours, mins, secs, us;
2802 secs = ic->duration / AV_TIME_BASE;
2803 us = ic->duration % AV_TIME_BASE;
2808 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2809 (100 * us) / AV_TIME_BASE);
2811 av_log(NULL, AV_LOG_INFO, "N/A");
2813 if (ic->start_time != AV_NOPTS_VALUE) {
2815 av_log(NULL, AV_LOG_INFO, ", start: ");
2816 secs = ic->start_time / AV_TIME_BASE;
2817 us = ic->start_time % AV_TIME_BASE;
2818 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2819 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2821 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2823 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2825 av_log(NULL, AV_LOG_INFO, "N/A");
2827 av_log(NULL, AV_LOG_INFO, "\n");
2829 if(ic->nb_programs) {
2831 for(j=0; j<ic->nb_programs; j++) {
2832 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2833 ic->programs[j]->name ? ic->programs[j]->name : "");
2834 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2835 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2838 for(i=0;i<ic->nb_streams;i++)
2839 dump_stream_format(ic, i, index, is_output);
2842 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2844 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2847 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2849 AVRational frame_rate;
2850 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2851 *frame_rate_num= frame_rate.num;
2852 *frame_rate_den= frame_rate.den;
2857 * Gets the current time in microseconds.
2859 int64_t av_gettime(void)
2862 gettimeofday(&tv,NULL);
2863 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2866 int64_t parse_date(const char *datestr, int duration)
2872 static const char *date_fmt[] = {
2876 static const char *time_fmt[] = {
2886 time_t now = time(0);
2888 len = strlen(datestr);
2890 lastch = datestr[len - 1];
2893 is_utc = (lastch == 'z' || lastch == 'Z');
2895 memset(&dt, 0, sizeof(dt));
2900 /* parse the year-month-day part */
2901 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2902 q = small_strptime(p, date_fmt[i], &dt);
2908 /* if the year-month-day part is missing, then take the
2909 * current year-month-day time */
2914 dt = *localtime(&now);
2916 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2921 if (*p == 'T' || *p == 't' || *p == ' ')
2924 /* parse the hour-minute-second part */
2925 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2926 q = small_strptime(p, time_fmt[i], &dt);
2932 /* parse datestr as a duration */
2937 /* parse datestr as HH:MM:SS */
2938 q = small_strptime(p, time_fmt[0], &dt);
2940 /* parse datestr as S+ */
2941 dt.tm_sec = strtol(p, (char **)&q, 10);
2943 /* the parsing didn't succeed */
2950 /* Now we have all the fields that we can get */
2956 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2958 dt.tm_isdst = -1; /* unknown */
2968 /* parse the .m... part */
2972 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2975 val += n * (*q - '0');
2979 return negative ? -t : t;
2982 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2992 while (*p != '\0' && *p != '=' && *p != '&') {
2993 if ((q - tag) < sizeof(tag) - 1)
3001 while (*p != '&' && *p != '\0') {
3002 if ((q - arg) < arg_size - 1) {
3012 if (!strcmp(tag, tag1))
3021 int av_get_frame_filename(char *buf, int buf_size,
3022 const char *path, int number)
3025 char *q, buf1[20], c;
3026 int nd, len, percentd_found;
3038 while (isdigit(*p)) {
3039 nd = nd * 10 + *p++ - '0';
3042 } while (isdigit(c));
3051 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3053 if ((q - buf + len) > buf_size - 1)
3055 memcpy(q, buf1, len);
3063 if ((q - buf) < buf_size - 1)
3067 if (!percentd_found)
3076 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3079 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3081 for(i=0;i<size;i+=16) {
3088 PRINT(" %02x", buf[i+j]);
3093 for(j=0;j<len;j++) {
3095 if (c < ' ' || c > '~')
3104 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3106 hex_dump_internal(NULL, f, 0, buf, size);
3109 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3111 hex_dump_internal(avcl, NULL, level, buf, size);
3114 //FIXME needs to know the time_base
3115 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3117 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3118 PRINT("stream #%d:\n", pkt->stream_index);
3119 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3120 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3121 /* DTS is _always_ valid after av_read_frame() */
3123 if (pkt->dts == AV_NOPTS_VALUE)
3126 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3127 /* PTS may not be known if B-frames are present. */
3129 if (pkt->pts == AV_NOPTS_VALUE)
3132 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3134 PRINT(" size=%d\n", pkt->size);
3137 av_hex_dump(f, pkt->data, pkt->size);
3140 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3142 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3145 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3147 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3150 void url_split(char *proto, int proto_size,
3151 char *authorization, int authorization_size,
3152 char *hostname, int hostname_size,
3154 char *path, int path_size,
3157 const char *p, *ls, *at, *col, *brk;
3159 if (port_ptr) *port_ptr = -1;
3160 if (proto_size > 0) proto[0] = 0;
3161 if (authorization_size > 0) authorization[0] = 0;
3162 if (hostname_size > 0) hostname[0] = 0;
3163 if (path_size > 0) path[0] = 0;
3165 /* parse protocol */
3166 if ((p = strchr(url, ':'))) {
3167 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3172 /* no protocol means plain filename */
3173 av_strlcpy(path, url, path_size);
3177 /* separate path from hostname */
3178 ls = strchr(p, '/');
3180 ls = strchr(p, '?');
3182 av_strlcpy(path, ls, path_size);
3184 ls = &p[strlen(p)]; // XXX
3186 /* the rest is hostname, use that to parse auth/port */
3188 /* authorization (user[:pass]@hostname) */
3189 if ((at = strchr(p, '@')) && at < ls) {
3190 av_strlcpy(authorization, p,
3191 FFMIN(authorization_size, at + 1 - p));
3192 p = at + 1; /* skip '@' */
3195 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3197 av_strlcpy(hostname, p + 1,
3198 FFMIN(hostname_size, brk - p));
3199 if (brk[1] == ':' && port_ptr)
3200 *port_ptr = atoi(brk + 2);
3201 } else if ((col = strchr(p, ':')) && col < ls) {
3202 av_strlcpy(hostname, p,
3203 FFMIN(col + 1 - p, hostname_size));
3204 if (port_ptr) *port_ptr = atoi(col + 1);
3206 av_strlcpy(hostname, p,
3207 FFMIN(ls + 1 - p, hostname_size));
3211 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3212 int pts_num, int pts_den)
3214 unsigned int gcd= ff_gcd(pts_num, pts_den);
3215 s->pts_wrap_bits = pts_wrap_bits;
3216 s->time_base.num = pts_num/gcd;
3217 s->time_base.den = pts_den/gcd;
3220 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);