2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavcodec/opt.h"
23 #include "libavutil/avstring.h"
33 * @file libavformat/utils.c
34 * various utility functions for use within FFmpeg
37 unsigned avformat_version(void)
39 return LIBAVFORMAT_VERSION_INT;
42 /* fraction handling */
45 * f = val + (num / den) + 0.5.
47 * 'num' is normalized so that it is such as 0 <= num < den.
49 * @param f fractional number
50 * @param val integer value
51 * @param num must be >= 0
52 * @param den must be >= 1
54 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
67 * Fractional addition to f: f = f + (incr / f->den).
69 * @param f fractional number
70 * @param incr increment, can be positive or negative
72 static void av_frac_add(AVFrac *f, int64_t incr)
85 } else if (num >= den) {
92 /** head of registered input format linked list */
93 AVInputFormat *first_iformat = NULL;
94 /** head of registered output format linked list */
95 AVOutputFormat *first_oformat = NULL;
97 AVInputFormat *av_iformat_next(AVInputFormat *f)
100 else return first_iformat;
103 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
105 if(f) return f->next;
106 else return first_oformat;
109 void av_register_input_format(AVInputFormat *format)
113 while (*p != NULL) p = &(*p)->next;
118 void av_register_output_format(AVOutputFormat *format)
122 while (*p != NULL) p = &(*p)->next;
127 int match_ext(const char *filename, const char *extensions)
135 ext = strrchr(filename, '.');
141 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
144 if (!strcasecmp(ext1, ext))
154 AVOutputFormat *guess_format(const char *short_name, const char *filename,
155 const char *mime_type)
157 AVOutputFormat *fmt, *fmt_found;
158 int score_max, score;
160 /* specific test for image sequences */
161 #ifdef CONFIG_IMAGE2_MUXER
162 if (!short_name && filename &&
163 av_filename_number_test(filename) &&
164 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
165 return guess_format("image2", NULL, NULL);
168 /* Find the proper file type. */
172 while (fmt != NULL) {
174 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
176 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
178 if (filename && fmt->extensions &&
179 match_ext(filename, fmt->extensions)) {
182 if (score > score_max) {
191 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
192 const char *mime_type)
194 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
197 AVOutputFormat *stream_fmt;
198 char stream_format_name[64];
200 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
201 stream_fmt = guess_format(stream_format_name, NULL, NULL);
210 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
211 const char *filename, const char *mime_type, enum CodecType type){
212 if(type == CODEC_TYPE_VIDEO){
213 enum CodecID codec_id= CODEC_ID_NONE;
215 #ifdef CONFIG_IMAGE2_MUXER
216 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
217 codec_id= av_guess_image2_codec(filename);
220 if(codec_id == CODEC_ID_NONE)
221 codec_id= fmt->video_codec;
223 }else if(type == CODEC_TYPE_AUDIO)
224 return fmt->audio_codec;
226 return CODEC_ID_NONE;
229 AVInputFormat *av_find_input_format(const char *short_name)
232 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
233 if (!strcmp(fmt->name, short_name))
239 /* memory handling */
241 void av_destruct_packet(AVPacket *pkt)
244 pkt->data = NULL; pkt->size = 0;
247 void av_init_packet(AVPacket *pkt)
249 pkt->pts = AV_NOPTS_VALUE;
250 pkt->dts = AV_NOPTS_VALUE;
254 pkt->stream_index = 0;
255 pkt->destruct= av_destruct_packet_nofree;
258 int av_new_packet(AVPacket *pkt, int size)
261 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
262 return AVERROR(ENOMEM);
263 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
265 return AVERROR(ENOMEM);
266 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
271 pkt->destruct = av_destruct_packet;
275 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
277 int ret= av_new_packet(pkt, size);
282 pkt->pos= url_ftell(s);
284 ret= get_buffer(s, pkt->data, size);
293 int av_dup_packet(AVPacket *pkt)
295 if (pkt->destruct != av_destruct_packet) {
297 /* We duplicate the packet and don't forget to add the padding again. */
298 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
299 return AVERROR(ENOMEM);
300 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
302 return AVERROR(ENOMEM);
304 memcpy(data, pkt->data, pkt->size);
305 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
307 pkt->destruct = av_destruct_packet;
312 int av_filename_number_test(const char *filename)
315 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
318 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
320 AVInputFormat *fmt1, *fmt;
324 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
325 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
328 if (fmt1->read_probe) {
329 score = fmt1->read_probe(pd);
330 } else if (fmt1->extensions) {
331 if (match_ext(pd->filename, fmt1->extensions)) {
335 if (score > *score_max) {
338 }else if (score == *score_max)
344 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
346 return av_probe_input_format2(pd, is_opened, &score);
349 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
352 fmt = av_probe_input_format2(pd, 1, &score);
355 if (!strcmp(fmt->name, "mp3"))
356 st->codec->codec_id = CODEC_ID_MP3;
357 else if (!strcmp(fmt->name, "ac3"))
358 st->codec->codec_id = CODEC_ID_AC3;
359 else if (!strcmp(fmt->name, "mpegvideo"))
360 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
361 else if (!strcmp(fmt->name, "h264"))
362 st->codec->codec_id = CODEC_ID_H264;
367 /************************************************************/
368 /* input media file */
371 * Open a media file from an IO stream. 'fmt' must be specified.
373 static const char* format_to_name(void* ptr)
375 AVFormatContext* fc = (AVFormatContext*) ptr;
376 if(fc->iformat) return fc->iformat->name;
377 else if(fc->oformat) return fc->oformat->name;
381 #define OFFSET(x) offsetof(AVFormatContext,x)
382 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
383 //these names are too long to be readable
384 #define E AV_OPT_FLAG_ENCODING_PARAM
385 #define D AV_OPT_FLAG_DECODING_PARAM
387 static const AVOption options[]={
388 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
389 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
390 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
391 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
392 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
393 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
394 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
395 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
396 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
397 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
398 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
399 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
400 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
401 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
409 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
411 static void avformat_get_context_defaults(AVFormatContext *s)
413 memset(s, 0, sizeof(AVFormatContext));
415 s->av_class = &av_format_context_class;
417 av_opt_set_defaults(s);
420 AVFormatContext *av_alloc_format_context(void)
423 ic = av_malloc(sizeof(AVFormatContext));
425 avformat_get_context_defaults(ic);
426 ic->av_class = &av_format_context_class;
430 int av_open_input_stream(AVFormatContext **ic_ptr,
431 ByteIOContext *pb, const char *filename,
432 AVInputFormat *fmt, AVFormatParameters *ap)
436 AVFormatParameters default_ap;
440 memset(ap, 0, sizeof(default_ap));
443 if(!ap->prealloced_context)
444 ic = av_alloc_format_context();
448 err = AVERROR(ENOMEM);
453 ic->duration = AV_NOPTS_VALUE;
454 ic->start_time = AV_NOPTS_VALUE;
455 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
457 /* allocate private data */
458 if (fmt->priv_data_size > 0) {
459 ic->priv_data = av_mallocz(fmt->priv_data_size);
460 if (!ic->priv_data) {
461 err = AVERROR(ENOMEM);
465 ic->priv_data = NULL;
468 if (ic->iformat->read_header) {
469 err = ic->iformat->read_header(ic, ap);
474 if (pb && !ic->data_offset)
475 ic->data_offset = url_ftell(ic->pb);
482 av_freep(&ic->priv_data);
483 for(i=0;i<ic->nb_streams;i++) {
484 AVStream *st = ic->streams[i];
486 av_free(st->priv_data);
487 av_free(st->codec->extradata);
497 /** size of probe buffer, for guessing file type from file contents */
498 #define PROBE_BUF_MIN 2048
499 #define PROBE_BUF_MAX (1<<20)
501 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
504 AVFormatParameters *ap)
507 AVProbeData probe_data, *pd = &probe_data;
508 ByteIOContext *pb = NULL;
512 pd->filename = filename;
517 /* guess format if no file can be opened */
518 fmt = av_probe_input_format(pd, 0);
521 /* Do not open file if the format does not need it. XXX: specific
522 hack needed to handle RTSP/TCP */
523 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
524 /* if no file needed do not try to open one */
525 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
529 url_setbufsize(pb, buf_size);
532 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
533 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
534 /* read probe data */
535 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
536 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
537 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
538 if (url_fseek(pb, 0, SEEK_SET) < 0) {
540 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
546 /* guess file format */
547 fmt = av_probe_input_format2(pd, 1, &score);
552 /* if still no format found, error */
558 /* check filename in case an image number is expected */
559 if (fmt->flags & AVFMT_NEEDNUMBER) {
560 if (!av_filename_number_test(filename)) {
561 err = AVERROR_NUMEXPECTED;
565 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
578 /*******************************************************/
580 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
581 AVPacketList **plast_pktl){
582 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
587 (*plast_pktl)->next = pktl;
589 *packet_buffer = pktl;
591 /* add the packet in the buffered packet list */
597 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
603 AVPacketList *pktl = s->raw_packet_buffer;
607 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
608 s->raw_packet_buffer = pktl->next;
615 ret= s->iformat->read_packet(s, pkt);
618 st= s->streams[pkt->stream_index];
620 switch(st->codec->codec_type){
621 case CODEC_TYPE_VIDEO:
622 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
624 case CODEC_TYPE_AUDIO:
625 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
627 case CODEC_TYPE_SUBTITLE:
628 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
632 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
635 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
637 if(st->codec->codec_id == CODEC_ID_PROBE){
638 AVProbeData *pd = &st->probe_data;
640 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
641 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
642 pd->buf_size += pkt->size;
643 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
645 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
646 set_codec_from_probe_data(st, pd, 1);
647 if(st->codec->codec_id != CODEC_ID_PROBE){
656 /**********************************************************/
659 * Get the number of samples of an audio frame. Return -1 on error.
661 static int get_audio_frame_size(AVCodecContext *enc, int size)
665 if(enc->codec_id == CODEC_ID_VORBIS)
668 if (enc->frame_size <= 1) {
669 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
671 if (bits_per_sample) {
672 if (enc->channels == 0)
674 frame_size = (size << 3) / (bits_per_sample * enc->channels);
676 /* used for example by ADPCM codecs */
677 if (enc->bit_rate == 0)
679 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
682 frame_size = enc->frame_size;
689 * Return the frame duration in seconds. Return 0 if not available.
691 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
692 AVCodecParserContext *pc, AVPacket *pkt)
698 switch(st->codec->codec_type) {
699 case CODEC_TYPE_VIDEO:
700 if(st->time_base.num*1000LL > st->time_base.den){
701 *pnum = st->time_base.num;
702 *pden = st->time_base.den;
703 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
704 *pnum = st->codec->time_base.num;
705 *pden = st->codec->time_base.den;
706 if (pc && pc->repeat_pict) {
708 *pnum = (*pnum) * (2 + pc->repeat_pict);
712 case CODEC_TYPE_AUDIO:
713 frame_size = get_audio_frame_size(st->codec, pkt->size);
717 *pden = st->codec->sample_rate;
724 static int is_intra_only(AVCodecContext *enc){
725 if(enc->codec_type == CODEC_TYPE_AUDIO){
727 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
728 switch(enc->codec_id){
730 case CODEC_ID_MJPEGB:
732 case CODEC_ID_RAWVIDEO:
733 case CODEC_ID_DVVIDEO:
734 case CODEC_ID_HUFFYUV:
735 case CODEC_ID_FFVHUFF:
746 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
747 int64_t dts, int64_t pts)
749 AVStream *st= s->streams[stream_index];
750 AVPacketList *pktl= s->packet_buffer;
752 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
755 st->first_dts= dts - st->cur_dts;
758 for(; pktl; pktl= pktl->next){
759 if(pktl->pkt.stream_index != stream_index)
761 //FIXME think more about this check
762 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
763 pktl->pkt.pts += st->first_dts;
765 if(pktl->pkt.dts != AV_NOPTS_VALUE)
766 pktl->pkt.dts += st->first_dts;
768 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
769 st->start_time= pktl->pkt.pts;
771 if (st->start_time == AV_NOPTS_VALUE)
772 st->start_time = pts;
775 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
777 AVPacketList *pktl= s->packet_buffer;
780 if(st->first_dts != AV_NOPTS_VALUE){
781 cur_dts= st->first_dts;
782 for(; pktl; pktl= pktl->next){
783 if(pktl->pkt.stream_index == pkt->stream_index){
784 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
786 cur_dts -= pkt->duration;
789 pktl= s->packet_buffer;
790 st->first_dts = cur_dts;
791 }else if(st->cur_dts)
794 for(; pktl; pktl= pktl->next){
795 if(pktl->pkt.stream_index != pkt->stream_index)
797 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
798 && !pktl->pkt.duration){
799 pktl->pkt.dts= cur_dts;
800 if(!st->codec->has_b_frames)
801 pktl->pkt.pts= cur_dts;
802 cur_dts += pkt->duration;
803 pktl->pkt.duration= pkt->duration;
807 if(st->first_dts == AV_NOPTS_VALUE)
808 st->cur_dts= cur_dts;
811 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
812 AVCodecParserContext *pc, AVPacket *pkt)
814 int num, den, presentation_delayed, delay, i;
817 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
818 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
819 pkt->dts -= 1LL<<st->pts_wrap_bits;
822 if (pkt->duration == 0) {
823 compute_frame_duration(&num, &den, st, pc, pkt);
825 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
827 if(pkt->duration != 0 && s->packet_buffer)
828 update_initial_durations(s, st, pkt);
832 /* correct timestamps with byte offset if demuxers only have timestamps
833 on packet boundaries */
834 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
835 /* this will estimate bitrate based on this frame's duration and size */
836 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
837 if(pkt->pts != AV_NOPTS_VALUE)
839 if(pkt->dts != AV_NOPTS_VALUE)
843 /* do we have a video B-frame ? */
844 delay= st->codec->has_b_frames;
845 presentation_delayed = 0;
846 /* XXX: need has_b_frame, but cannot get it if the codec is
849 pc && pc->pict_type != FF_B_TYPE)
850 presentation_delayed = 1;
851 /* This may be redundant, but it should not hurt. */
852 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
853 presentation_delayed = 1;
855 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
856 /* interpolate PTS and DTS if they are not present */
857 if(delay==0 || (delay==1 && pc)){
858 if (presentation_delayed) {
859 /* DTS = decompression timestamp */
860 /* PTS = presentation timestamp */
861 if (pkt->dts == AV_NOPTS_VALUE)
862 pkt->dts = st->last_IP_pts;
863 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
864 if (pkt->dts == AV_NOPTS_VALUE)
865 pkt->dts = st->cur_dts;
867 /* this is tricky: the dts must be incremented by the duration
868 of the frame we are displaying, i.e. the last I- or P-frame */
869 if (st->last_IP_duration == 0)
870 st->last_IP_duration = pkt->duration;
871 if(pkt->dts != AV_NOPTS_VALUE)
872 st->cur_dts = pkt->dts + st->last_IP_duration;
873 st->last_IP_duration = pkt->duration;
874 st->last_IP_pts= pkt->pts;
875 /* cannot compute PTS if not present (we can compute it only
876 by knowing the future */
877 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
878 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
879 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
880 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
881 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
882 pkt->pts += pkt->duration;
883 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
887 /* presentation is not delayed : PTS and DTS are the same */
888 if(pkt->pts == AV_NOPTS_VALUE)
890 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
891 if(pkt->pts == AV_NOPTS_VALUE)
892 pkt->pts = st->cur_dts;
894 if(pkt->pts != AV_NOPTS_VALUE)
895 st->cur_dts = pkt->pts + pkt->duration;
899 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
900 st->pts_buffer[0]= pkt->pts;
901 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
902 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
903 if(pkt->dts == AV_NOPTS_VALUE)
904 pkt->dts= st->pts_buffer[0];
906 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
908 if(pkt->dts > st->cur_dts)
909 st->cur_dts = pkt->dts;
912 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
915 if(is_intra_only(st->codec))
916 pkt->flags |= PKT_FLAG_KEY;
919 /* keyframe computation */
920 if (pc->pict_type == FF_I_TYPE)
921 pkt->flags |= PKT_FLAG_KEY;
925 void av_destruct_packet_nofree(AVPacket *pkt)
927 pkt->data = NULL; pkt->size = 0;
930 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
938 /* select current input stream component */
941 if (!st->need_parsing || !st->parser) {
942 /* no parsing needed: we just output the packet as is */
943 /* raw data support */
945 compute_pkt_fields(s, st, NULL, pkt);
948 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
949 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
950 s->cur_ptr, s->cur_len,
951 s->cur_pkt.pts, s->cur_pkt.dts);
952 s->cur_pkt.pts = AV_NOPTS_VALUE;
953 s->cur_pkt.dts = AV_NOPTS_VALUE;
954 /* increment read pointer */
958 /* return packet if any */
961 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
963 pkt->stream_index = st->index;
964 pkt->pts = st->parser->pts;
965 pkt->dts = st->parser->dts;
966 pkt->destruct = av_destruct_packet_nofree;
967 compute_pkt_fields(s, st, st->parser, pkt);
969 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
970 ff_reduce_index(s, st->index);
971 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
972 0, 0, AVINDEX_KEYFRAME);
979 av_free_packet(&s->cur_pkt);
983 /* read next packet */
984 ret = av_read_packet(s, &s->cur_pkt);
986 if (ret == AVERROR(EAGAIN))
988 /* return the last frames, if any */
989 for(i = 0; i < s->nb_streams; i++) {
991 if (st->parser && st->need_parsing) {
992 av_parser_parse(st->parser, st->codec,
993 &pkt->data, &pkt->size,
995 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1000 /* no more packets: really terminate parsing */
1004 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
1005 s->cur_pkt.dts != AV_NOPTS_VALUE &&
1006 s->cur_pkt.pts < s->cur_pkt.dts){
1007 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1008 s->cur_pkt.stream_index,
1012 // av_free_packet(&s->cur_pkt);
1016 st = s->streams[s->cur_pkt.stream_index];
1017 if(s->debug & FF_FDEBUG_TS)
1018 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1019 s->cur_pkt.stream_index,
1026 s->cur_ptr = s->cur_pkt.data;
1027 s->cur_len = s->cur_pkt.size;
1028 if (st->need_parsing && !st->parser) {
1029 st->parser = av_parser_init(st->codec->codec_id);
1031 /* no parser available: just output the raw packets */
1032 st->need_parsing = AVSTREAM_PARSE_NONE;
1033 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1034 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1036 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1037 st->parser->next_frame_offset=
1038 st->parser->cur_offset= s->cur_pkt.pos;
1043 if(s->debug & FF_FDEBUG_TS)
1044 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1054 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1058 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1061 pktl = s->packet_buffer;
1063 AVPacket *next_pkt= &pktl->pkt;
1065 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1066 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1067 if( pktl->pkt.stream_index == next_pkt->stream_index
1068 && next_pkt->dts < pktl->pkt.dts
1069 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1070 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1071 next_pkt->pts= pktl->pkt.dts;
1075 pktl = s->packet_buffer;
1078 if( next_pkt->pts != AV_NOPTS_VALUE
1079 || next_pkt->dts == AV_NOPTS_VALUE
1081 /* read packet from packet buffer, if there is data */
1083 s->packet_buffer = pktl->next;
1089 int ret= av_read_frame_internal(s, pkt);
1091 if(pktl && ret != AVERROR(EAGAIN)){
1098 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1099 &s->packet_buffer_end)) < 0)
1100 return AVERROR(ENOMEM);
1102 assert(!s->packet_buffer);
1103 return av_read_frame_internal(s, pkt);
1108 /* XXX: suppress the packet queue */
1109 static void flush_packet_queue(AVFormatContext *s)
1114 pktl = s->packet_buffer;
1117 s->packet_buffer = pktl->next;
1118 av_free_packet(&pktl->pkt);
1123 /*******************************************************/
1126 int av_find_default_stream_index(AVFormatContext *s)
1128 int first_audio_index = -1;
1132 if (s->nb_streams <= 0)
1134 for(i = 0; i < s->nb_streams; i++) {
1136 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1139 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1140 first_audio_index = i;
1142 return first_audio_index >= 0 ? first_audio_index : 0;
1146 * Flush the frame reader.
1148 static void av_read_frame_flush(AVFormatContext *s)
1153 flush_packet_queue(s);
1155 /* free previous packet */
1157 if (s->cur_st->parser)
1158 av_free_packet(&s->cur_pkt);
1165 /* for each stream, reset read state */
1166 for(i = 0; i < s->nb_streams; i++) {
1170 av_parser_close(st->parser);
1173 st->last_IP_pts = AV_NOPTS_VALUE;
1174 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1178 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1181 for(i = 0; i < s->nb_streams; i++) {
1182 AVStream *st = s->streams[i];
1184 st->cur_dts = av_rescale(timestamp,
1185 st->time_base.den * (int64_t)ref_st->time_base.num,
1186 st->time_base.num * (int64_t)ref_st->time_base.den);
1190 void ff_reduce_index(AVFormatContext *s, int stream_index)
1192 AVStream *st= s->streams[stream_index];
1193 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1195 if((unsigned)st->nb_index_entries >= max_entries){
1197 for(i=0; 2*i<st->nb_index_entries; i++)
1198 st->index_entries[i]= st->index_entries[2*i];
1199 st->nb_index_entries= i;
1203 int av_add_index_entry(AVStream *st,
1204 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1206 AVIndexEntry *entries, *ie;
1209 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1212 entries = av_fast_realloc(st->index_entries,
1213 &st->index_entries_allocated_size,
1214 (st->nb_index_entries + 1) *
1215 sizeof(AVIndexEntry));
1219 st->index_entries= entries;
1221 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1224 index= st->nb_index_entries++;
1225 ie= &entries[index];
1226 assert(index==0 || ie[-1].timestamp < timestamp);
1228 ie= &entries[index];
1229 if(ie->timestamp != timestamp){
1230 if(ie->timestamp <= timestamp)
1232 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1233 st->nb_index_entries++;
1234 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1235 distance= ie->min_distance;
1239 ie->timestamp = timestamp;
1240 ie->min_distance= distance;
1247 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1250 AVIndexEntry *entries= st->index_entries;
1251 int nb_entries= st->nb_index_entries;
1260 timestamp = entries[m].timestamp;
1261 if(timestamp >= wanted_timestamp)
1263 if(timestamp <= wanted_timestamp)
1266 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1268 if(!(flags & AVSEEK_FLAG_ANY)){
1269 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1270 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1281 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1282 AVInputFormat *avif= s->iformat;
1283 int64_t pos_min, pos_max, pos, pos_limit;
1284 int64_t ts_min, ts_max, ts;
1288 if (stream_index < 0)
1292 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1296 ts_min= AV_NOPTS_VALUE;
1297 pos_limit= -1; //gcc falsely says it may be uninitialized
1299 st= s->streams[stream_index];
1300 if(st->index_entries){
1303 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1304 index= FFMAX(index, 0);
1305 e= &st->index_entries[index];
1307 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1309 ts_min= e->timestamp;
1311 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1318 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1319 assert(index < st->nb_index_entries);
1321 e= &st->index_entries[index];
1322 assert(e->timestamp >= target_ts);
1324 ts_max= e->timestamp;
1325 pos_limit= pos_max - e->min_distance;
1327 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1328 pos_max,pos_limit, ts_max);
1333 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1338 url_fseek(s->pb, pos, SEEK_SET);
1340 av_update_cur_dts(s, st, ts);
1345 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1347 int64_t start_pos, filesize;
1351 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1354 if(ts_min == AV_NOPTS_VALUE){
1355 pos_min = s->data_offset;
1356 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1357 if (ts_min == AV_NOPTS_VALUE)
1361 if(ts_max == AV_NOPTS_VALUE){
1363 filesize = url_fsize(s->pb);
1364 pos_max = filesize - 1;
1367 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1369 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1370 if (ts_max == AV_NOPTS_VALUE)
1374 int64_t tmp_pos= pos_max + 1;
1375 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1376 if(tmp_ts == AV_NOPTS_VALUE)
1380 if(tmp_pos >= filesize)
1386 if(ts_min > ts_max){
1388 }else if(ts_min == ts_max){
1393 while (pos_min < pos_limit) {
1395 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1399 assert(pos_limit <= pos_max);
1402 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1403 // interpolate position (better than dichotomy)
1404 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1405 + pos_min - approximate_keyframe_distance;
1406 }else if(no_change==1){
1407 // bisection, if interpolation failed to change min or max pos last time
1408 pos = (pos_min + pos_limit)>>1;
1410 /* linear search if bisection failed, can only happen if there
1411 are very few or no keyframes between min/max */
1416 else if(pos > pos_limit)
1420 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1426 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1428 if(ts == AV_NOPTS_VALUE){
1429 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1432 assert(ts != AV_NOPTS_VALUE);
1433 if (target_ts <= ts) {
1434 pos_limit = start_pos - 1;
1438 if (target_ts >= ts) {
1444 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1445 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1448 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1450 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1451 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1452 pos, ts_min, target_ts, ts_max);
1458 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1459 int64_t pos_min, pos_max;
1463 if (stream_index < 0)
1466 st= s->streams[stream_index];
1469 pos_min = s->data_offset;
1470 pos_max = url_fsize(s->pb) - 1;
1472 if (pos < pos_min) pos= pos_min;
1473 else if(pos > pos_max) pos= pos_max;
1475 url_fseek(s->pb, pos, SEEK_SET);
1478 av_update_cur_dts(s, st, ts);
1483 static int av_seek_frame_generic(AVFormatContext *s,
1484 int stream_index, int64_t timestamp, int flags)
1490 st = s->streams[stream_index];
1492 index = av_index_search_timestamp(st, timestamp, flags);
1494 if(index < 0 || index==st->nb_index_entries-1){
1498 if(st->nb_index_entries){
1499 assert(st->index_entries);
1500 ie= &st->index_entries[st->nb_index_entries-1];
1501 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1503 av_update_cur_dts(s, st, ie->timestamp);
1505 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1509 int ret = av_read_frame(s, &pkt);
1512 av_free_packet(&pkt);
1513 if(stream_index == pkt.stream_index){
1514 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1518 index = av_index_search_timestamp(st, timestamp, flags);
1523 av_read_frame_flush(s);
1524 if (s->iformat->read_seek){
1525 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1528 ie = &st->index_entries[index];
1529 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1531 av_update_cur_dts(s, st, ie->timestamp);
1536 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1541 av_read_frame_flush(s);
1543 if(flags & AVSEEK_FLAG_BYTE)
1544 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1546 if(stream_index < 0){
1547 stream_index= av_find_default_stream_index(s);
1548 if(stream_index < 0)
1551 st= s->streams[stream_index];
1552 /* timestamp for default must be expressed in AV_TIME_BASE units */
1553 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1555 st= s->streams[stream_index];
1557 /* first, we try the format specific seek */
1558 if (s->iformat->read_seek)
1559 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1566 if(s->iformat->read_timestamp)
1567 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1569 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1572 /*******************************************************/
1575 * Returns TRUE if the stream has accurate duration in any stream.
1577 * @return TRUE if the stream has accurate duration for at least one component.
1579 static int av_has_duration(AVFormatContext *ic)
1584 for(i = 0;i < ic->nb_streams; i++) {
1585 st = ic->streams[i];
1586 if (st->duration != AV_NOPTS_VALUE)
1593 * Estimate the stream timings from the one of each components.
1595 * Also computes the global bitrate if possible.
1597 static void av_update_stream_timings(AVFormatContext *ic)
1599 int64_t start_time, start_time1, end_time, end_time1;
1600 int64_t duration, duration1;
1604 start_time = INT64_MAX;
1605 end_time = INT64_MIN;
1606 duration = INT64_MIN;
1607 for(i = 0;i < ic->nb_streams; i++) {
1608 st = ic->streams[i];
1609 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1610 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1611 if (start_time1 < start_time)
1612 start_time = start_time1;
1613 if (st->duration != AV_NOPTS_VALUE) {
1614 end_time1 = start_time1
1615 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1616 if (end_time1 > end_time)
1617 end_time = end_time1;
1620 if (st->duration != AV_NOPTS_VALUE) {
1621 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1622 if (duration1 > duration)
1623 duration = duration1;
1626 if (start_time != INT64_MAX) {
1627 ic->start_time = start_time;
1628 if (end_time != INT64_MIN) {
1629 if (end_time - start_time > duration)
1630 duration = end_time - start_time;
1633 if (duration != INT64_MIN) {
1634 ic->duration = duration;
1635 if (ic->file_size > 0) {
1636 /* compute the bitrate */
1637 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1638 (double)ic->duration;
1643 static void fill_all_stream_timings(AVFormatContext *ic)
1648 av_update_stream_timings(ic);
1649 for(i = 0;i < ic->nb_streams; i++) {
1650 st = ic->streams[i];
1651 if (st->start_time == AV_NOPTS_VALUE) {
1652 if(ic->start_time != AV_NOPTS_VALUE)
1653 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1654 if(ic->duration != AV_NOPTS_VALUE)
1655 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1660 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1662 int64_t filesize, duration;
1666 /* if bit_rate is already set, we believe it */
1667 if (ic->bit_rate == 0) {
1669 for(i=0;i<ic->nb_streams;i++) {
1670 st = ic->streams[i];
1671 bit_rate += st->codec->bit_rate;
1673 ic->bit_rate = bit_rate;
1676 /* if duration is already set, we believe it */
1677 if (ic->duration == AV_NOPTS_VALUE &&
1678 ic->bit_rate != 0 &&
1679 ic->file_size != 0) {
1680 filesize = ic->file_size;
1682 for(i = 0; i < ic->nb_streams; i++) {
1683 st = ic->streams[i];
1684 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1685 if (st->duration == AV_NOPTS_VALUE)
1686 st->duration = duration;
1692 #define DURATION_MAX_READ_SIZE 250000
1694 /* only usable for MPEG-PS streams */
1695 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1697 AVPacket pkt1, *pkt = &pkt1;
1699 int read_size, i, ret;
1701 int64_t filesize, offset, duration;
1703 /* free previous packet */
1704 if (ic->cur_st && ic->cur_st->parser)
1705 av_free_packet(&ic->cur_pkt);
1708 /* flush packet queue */
1709 flush_packet_queue(ic);
1711 for(i=0;i<ic->nb_streams;i++) {
1712 st = ic->streams[i];
1714 av_parser_close(st->parser);
1719 /* we read the first packets to get the first PTS (not fully
1720 accurate, but it is enough now) */
1721 url_fseek(ic->pb, 0, SEEK_SET);
1724 if (read_size >= DURATION_MAX_READ_SIZE)
1726 /* if all info is available, we can stop */
1727 for(i = 0;i < ic->nb_streams; i++) {
1728 st = ic->streams[i];
1729 if (st->start_time == AV_NOPTS_VALUE)
1732 if (i == ic->nb_streams)
1735 ret = av_read_packet(ic, pkt);
1738 read_size += pkt->size;
1739 st = ic->streams[pkt->stream_index];
1740 if (pkt->pts != AV_NOPTS_VALUE) {
1741 if (st->start_time == AV_NOPTS_VALUE)
1742 st->start_time = pkt->pts;
1744 av_free_packet(pkt);
1747 /* estimate the end time (duration) */
1748 /* XXX: may need to support wrapping */
1749 filesize = ic->file_size;
1750 offset = filesize - DURATION_MAX_READ_SIZE;
1754 url_fseek(ic->pb, offset, SEEK_SET);
1757 if (read_size >= DURATION_MAX_READ_SIZE)
1760 ret = av_read_packet(ic, pkt);
1763 read_size += pkt->size;
1764 st = ic->streams[pkt->stream_index];
1765 if (pkt->pts != AV_NOPTS_VALUE &&
1766 st->start_time != AV_NOPTS_VALUE) {
1767 end_time = pkt->pts;
1768 duration = end_time - st->start_time;
1770 if (st->duration == AV_NOPTS_VALUE ||
1771 st->duration < duration)
1772 st->duration = duration;
1775 av_free_packet(pkt);
1778 fill_all_stream_timings(ic);
1780 url_fseek(ic->pb, old_offset, SEEK_SET);
1781 for(i=0; i<ic->nb_streams; i++){
1783 st->cur_dts= st->first_dts;
1784 st->last_IP_pts = AV_NOPTS_VALUE;
1788 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1792 /* get the file size, if possible */
1793 if (ic->iformat->flags & AVFMT_NOFILE) {
1796 file_size = url_fsize(ic->pb);
1800 ic->file_size = file_size;
1802 if ((!strcmp(ic->iformat->name, "mpeg") ||
1803 !strcmp(ic->iformat->name, "mpegts")) &&
1804 file_size && !url_is_streamed(ic->pb)) {
1805 /* get accurate estimate from the PTSes */
1806 av_estimate_timings_from_pts(ic, old_offset);
1807 } else if (av_has_duration(ic)) {
1808 /* at least one component has timings - we use them for all
1810 fill_all_stream_timings(ic);
1812 /* less precise: use bitrate info */
1813 av_estimate_timings_from_bit_rate(ic);
1815 av_update_stream_timings(ic);
1821 for(i = 0;i < ic->nb_streams; i++) {
1822 st = ic->streams[i];
1823 printf("%d: start_time: %0.3f duration: %0.3f\n",
1824 i, (double)st->start_time / AV_TIME_BASE,
1825 (double)st->duration / AV_TIME_BASE);
1827 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1828 (double)ic->start_time / AV_TIME_BASE,
1829 (double)ic->duration / AV_TIME_BASE,
1830 ic->bit_rate / 1000);
1835 static int has_codec_parameters(AVCodecContext *enc)
1838 switch(enc->codec_type) {
1839 case CODEC_TYPE_AUDIO:
1840 val = enc->sample_rate && enc->channels;
1841 if(!enc->frame_size &&
1842 (enc->codec_id == CODEC_ID_VORBIS ||
1843 enc->codec_id == CODEC_ID_AAC))
1846 case CODEC_TYPE_VIDEO:
1847 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1853 return enc->codec_id != CODEC_ID_NONE && val != 0;
1856 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1860 int got_picture, data_size, ret=0;
1863 if(!st->codec->codec){
1864 codec = avcodec_find_decoder(st->codec->codec_id);
1867 ret = avcodec_open(st->codec, codec);
1872 if(!has_codec_parameters(st->codec)){
1873 switch(st->codec->codec_type) {
1874 case CODEC_TYPE_VIDEO:
1875 ret = avcodec_decode_video(st->codec, &picture,
1876 &got_picture, data, size);
1878 case CODEC_TYPE_AUDIO:
1879 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1880 samples = av_malloc(data_size);
1883 ret = avcodec_decode_audio2(st->codec, samples,
1884 &data_size, data, size);
1895 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1897 while (tags->id != CODEC_ID_NONE) {
1905 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1908 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1909 if(tag == tags[i].tag)
1912 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1913 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1914 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1915 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1916 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1919 return CODEC_ID_NONE;
1922 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1925 for(i=0; tags && tags[i]; i++){
1926 int tag= codec_get_tag(tags[i], id);
1932 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1935 for(i=0; tags && tags[i]; i++){
1936 enum CodecID id= codec_get_id(tags[i], tag);
1937 if(id!=CODEC_ID_NONE) return id;
1939 return CODEC_ID_NONE;
1942 static void compute_chapters_end(AVFormatContext *s)
1946 for (i=0; i+1<s->nb_chapters; i++)
1947 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1948 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1949 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1950 s->chapters[i]->end = s->chapters[i+1]->start;
1953 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1954 assert(s->start_time != AV_NOPTS_VALUE);
1955 assert(s->duration > 0);
1956 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1958 s->chapters[i]->time_base);
1962 /* absolute maximum size we read until we abort */
1963 #define MAX_READ_SIZE 5000000
1965 #define MAX_STD_TIMEBASES (60*12+5)
1966 static int get_std_framerate(int i){
1967 if(i<60*12) return i*1001;
1968 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1972 * Is the time base unreliable.
1973 * This is a heuristic to balance between quick acceptance of the values in
1974 * the headers vs. some extra checks.
1975 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1976 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1977 * And there are "variable" fps files this needs to detect as well.
1979 static int tb_unreliable(AVCodecContext *c){
1980 if( c->time_base.den >= 101L*c->time_base.num
1981 || c->time_base.den < 5L*c->time_base.num
1982 /* || c->codec_tag == ff_get_fourcc("DIVX")
1983 || c->codec_tag == ff_get_fourcc("XVID")*/
1984 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1989 int av_find_stream_info(AVFormatContext *ic)
1991 int i, count, ret, read_size, j;
1993 AVPacket pkt1, *pkt;
1994 int64_t last_dts[MAX_STREAMS];
1995 int duration_count[MAX_STREAMS]={0};
1996 double (*duration_error)[MAX_STD_TIMEBASES];
1997 offset_t old_offset = url_ftell(ic->pb);
1998 int64_t codec_info_duration[MAX_STREAMS]={0};
1999 int codec_info_nb_frames[MAX_STREAMS]={0};
2001 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2002 if (!duration_error) return AVERROR(ENOMEM);
2004 for(i=0;i<ic->nb_streams;i++) {
2005 st = ic->streams[i];
2006 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2007 /* if(!st->time_base.num)
2009 if(!st->codec->time_base.num)
2010 st->codec->time_base= st->time_base;
2012 //only for the split stuff
2014 st->parser = av_parser_init(st->codec->codec_id);
2015 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2016 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2021 for(i=0;i<MAX_STREAMS;i++){
2022 last_dts[i]= AV_NOPTS_VALUE;
2028 /* check if one codec still needs to be handled */
2029 for(i=0;i<ic->nb_streams;i++) {
2030 st = ic->streams[i];
2031 if (!has_codec_parameters(st->codec))
2033 /* variable fps and no guess at the real fps */
2034 if( tb_unreliable(st->codec)
2035 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2037 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2039 if(st->first_dts == AV_NOPTS_VALUE)
2042 if (i == ic->nb_streams) {
2043 /* NOTE: if the format has no header, then we need to read
2044 some packets to get most of the streams, so we cannot
2046 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2047 /* if we found the info for all the codecs, we can stop */
2052 /* we did not get all the codec info, but we read too much data */
2053 if (read_size >= MAX_READ_SIZE) {
2058 /* NOTE: a new stream can be added there if no header in file
2059 (AVFMTCTX_NOHEADER) */
2060 ret = av_read_frame_internal(ic, &pkt1);
2063 ret = -1; /* we could not have all the codec parameters before EOF */
2064 for(i=0;i<ic->nb_streams;i++) {
2065 st = ic->streams[i];
2066 if (!has_codec_parameters(st->codec)){
2068 avcodec_string(buf, sizeof(buf), st->codec, 0);
2069 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2077 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2078 if(av_dup_packet(pkt) < 0) {
2079 av_free(duration_error);
2080 return AVERROR(ENOMEM);
2083 read_size += pkt->size;
2085 st = ic->streams[pkt->stream_index];
2086 if(codec_info_nb_frames[st->index]>1)
2087 codec_info_duration[st->index] += pkt->duration;
2088 if (pkt->duration != 0)
2089 codec_info_nb_frames[st->index]++;
2092 int index= pkt->stream_index;
2093 int64_t last= last_dts[index];
2094 int64_t duration= pkt->dts - last;
2096 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2097 double dur= duration * av_q2d(st->time_base);
2099 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2100 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2101 if(duration_count[index] < 2)
2102 memset(duration_error[index], 0, sizeof(*duration_error));
2103 for(i=1; i<MAX_STD_TIMEBASES; i++){
2104 int framerate= get_std_framerate(i);
2105 int ticks= lrintf(dur*framerate/(1001*12));
2106 double error= dur - ticks*1001*12/(double)framerate;
2107 duration_error[index][i] += error*error;
2109 duration_count[index]++;
2111 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2112 last_dts[pkt->stream_index]= pkt->dts;
2114 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2115 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2117 st->codec->extradata_size= i;
2118 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2119 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2120 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2124 /* if still no information, we try to open the codec and to
2125 decompress the frame. We try to avoid that in most cases as
2126 it takes longer and uses more memory. For MPEG-4, we need to
2127 decompress for QuickTime. */
2128 if (!has_codec_parameters(st->codec) /*&&
2129 (st->codec->codec_id == CODEC_ID_FLV1 ||
2130 st->codec->codec_id == CODEC_ID_H264 ||
2131 st->codec->codec_id == CODEC_ID_H263 ||
2132 st->codec->codec_id == CODEC_ID_H261 ||
2133 st->codec->codec_id == CODEC_ID_VORBIS ||
2134 st->codec->codec_id == CODEC_ID_MJPEG ||
2135 st->codec->codec_id == CODEC_ID_PNG ||
2136 st->codec->codec_id == CODEC_ID_PAM ||
2137 st->codec->codec_id == CODEC_ID_PGM ||
2138 st->codec->codec_id == CODEC_ID_PGMYUV ||
2139 st->codec->codec_id == CODEC_ID_PBM ||
2140 st->codec->codec_id == CODEC_ID_PPM ||
2141 st->codec->codec_id == CODEC_ID_SHORTEN ||
2142 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2143 try_decode_frame(st, pkt->data, pkt->size);
2145 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2151 // close codecs which were opened in try_decode_frame()
2152 for(i=0;i<ic->nb_streams;i++) {
2153 st = ic->streams[i];
2154 if(st->codec->codec)
2155 avcodec_close(st->codec);
2157 for(i=0;i<ic->nb_streams;i++) {
2158 st = ic->streams[i];
2159 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2160 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2161 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2163 if(duration_count[i]
2164 && tb_unreliable(st->codec) /*&&
2165 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2166 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2167 double best_error= 2*av_q2d(st->time_base);
2168 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2170 for(j=1; j<MAX_STD_TIMEBASES; j++){
2171 double error= duration_error[i][j] * get_std_framerate(j);
2172 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2173 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2174 if(error < best_error){
2176 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2181 if (!st->r_frame_rate.num){
2182 if( st->codec->time_base.den * (int64_t)st->time_base.num
2183 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2184 st->r_frame_rate.num = st->codec->time_base.den;
2185 st->r_frame_rate.den = st->codec->time_base.num;
2187 st->r_frame_rate.num = st->time_base.den;
2188 st->r_frame_rate.den = st->time_base.num;
2191 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2192 if(!st->codec->bits_per_sample)
2193 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2197 av_estimate_timings(ic, old_offset);
2199 compute_chapters_end(ic);
2202 /* correct DTS for B-frame streams with no timestamps */
2203 for(i=0;i<ic->nb_streams;i++) {
2204 st = ic->streams[i];
2205 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2207 ppktl = &ic->packet_buffer;
2209 if(ppkt1->stream_index != i)
2211 if(ppkt1->pkt->dts < 0)
2213 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2215 ppkt1->pkt->dts -= delta;
2220 st->cur_dts -= delta;
2226 av_free(duration_error);
2231 /*******************************************************/
2233 int av_read_play(AVFormatContext *s)
2235 if (s->iformat->read_play)
2236 return s->iformat->read_play(s);
2238 return av_url_read_fpause(s->pb, 0);
2239 return AVERROR(ENOSYS);
2242 int av_read_pause(AVFormatContext *s)
2244 if (s->iformat->read_pause)
2245 return s->iformat->read_pause(s);
2247 return av_url_read_fpause(s->pb, 1);
2248 return AVERROR(ENOSYS);
2251 void av_close_input_stream(AVFormatContext *s)
2256 /* free previous packet */
2257 if (s->cur_st && s->cur_st->parser)
2258 av_free_packet(&s->cur_pkt);
2260 if (s->iformat->read_close)
2261 s->iformat->read_close(s);
2262 for(i=0;i<s->nb_streams;i++) {
2263 /* free all data in a stream component */
2266 av_parser_close(st->parser);
2268 av_free(st->index_entries);
2269 av_free(st->codec->extradata);
2271 av_free(st->filename);
2272 av_free(st->priv_data);
2275 for(i=s->nb_programs-1; i>=0; i--) {
2276 av_freep(&s->programs[i]->provider_name);
2277 av_freep(&s->programs[i]->name);
2278 av_freep(&s->programs[i]->stream_index);
2279 av_freep(&s->programs[i]);
2281 av_freep(&s->programs);
2282 flush_packet_queue(s);
2283 av_freep(&s->priv_data);
2284 while(s->nb_chapters--) {
2285 av_free(s->chapters[s->nb_chapters]->title);
2286 av_free(s->chapters[s->nb_chapters]);
2288 av_freep(&s->chapters);
2292 void av_close_input_file(AVFormatContext *s)
2294 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2295 av_close_input_stream(s);
2300 AVStream *av_new_stream(AVFormatContext *s, int id)
2305 if (s->nb_streams >= MAX_STREAMS)
2308 st = av_mallocz(sizeof(AVStream));
2312 st->codec= avcodec_alloc_context();
2314 /* no default bitrate if decoding */
2315 st->codec->bit_rate = 0;
2317 st->index = s->nb_streams;
2319 st->start_time = AV_NOPTS_VALUE;
2320 st->duration = AV_NOPTS_VALUE;
2321 /* we set the current DTS to 0 so that formats without any timestamps
2322 but durations get some timestamps, formats with some unknown
2323 timestamps have their first few packets buffered and the
2324 timestamps corrected before they are returned to the user */
2326 st->first_dts = AV_NOPTS_VALUE;
2328 /* default pts setting is MPEG-like */
2329 av_set_pts_info(st, 33, 1, 90000);
2330 st->last_IP_pts = AV_NOPTS_VALUE;
2331 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2332 st->pts_buffer[i]= AV_NOPTS_VALUE;
2334 s->streams[s->nb_streams++] = st;
2338 AVProgram *av_new_program(AVFormatContext *ac, int id)
2340 AVProgram *program=NULL;
2344 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2347 for(i=0; i<ac->nb_programs; i++)
2348 if(ac->programs[i]->id == id)
2349 program = ac->programs[i];
2352 program = av_mallocz(sizeof(AVProgram));
2355 dynarray_add(&ac->programs, &ac->nb_programs, program);
2356 program->discard = AVDISCARD_NONE;
2363 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2365 assert(!provider_name == !name);
2367 av_free(program->provider_name);
2368 av_free(program-> name);
2369 program->provider_name = av_strdup(provider_name);
2370 program-> name = av_strdup( name);
2374 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2376 AVChapter *chapter = NULL;
2379 for(i=0; i<s->nb_chapters; i++)
2380 if(s->chapters[i]->id == id)
2381 chapter = s->chapters[i];
2384 chapter= av_mallocz(sizeof(AVChapter));
2387 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2389 av_free(chapter->title);
2390 chapter->title = av_strdup(title);
2392 chapter->time_base= time_base;
2393 chapter->start = start;
2399 /************************************************************/
2400 /* output media file */
2402 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2406 if (s->oformat->priv_data_size > 0) {
2407 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2409 return AVERROR(ENOMEM);
2411 s->priv_data = NULL;
2413 if (s->oformat->set_parameters) {
2414 ret = s->oformat->set_parameters(s, ap);
2421 int av_write_header(AVFormatContext *s)
2426 // some sanity checks
2427 for(i=0;i<s->nb_streams;i++) {
2430 switch (st->codec->codec_type) {
2431 case CODEC_TYPE_AUDIO:
2432 if(st->codec->sample_rate<=0){
2433 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2437 case CODEC_TYPE_VIDEO:
2438 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2439 av_log(s, AV_LOG_ERROR, "time base not set\n");
2442 if(st->codec->width<=0 || st->codec->height<=0){
2443 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2449 if(s->oformat->codec_tag){
2450 if(st->codec->codec_tag){
2452 //check that tag + id is in the table
2453 //if neither is in the table -> OK
2454 //if tag is in the table with another id -> FAIL
2455 //if id is in the table with another tag -> FAIL unless strict < ?
2457 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2461 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2462 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2464 return AVERROR(ENOMEM);
2467 if(s->oformat->write_header){
2468 ret = s->oformat->write_header(s);
2473 /* init PTS generation */
2474 for(i=0;i<s->nb_streams;i++) {
2475 int64_t den = AV_NOPTS_VALUE;
2478 switch (st->codec->codec_type) {
2479 case CODEC_TYPE_AUDIO:
2480 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2482 case CODEC_TYPE_VIDEO:
2483 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2488 if (den != AV_NOPTS_VALUE) {
2490 return AVERROR_INVALIDDATA;
2491 av_frac_init(&st->pts, 0, 0, den);
2497 //FIXME merge with compute_pkt_fields
2498 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2499 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2500 int num, den, frame_size, i;
2502 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2504 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2507 /* duration field */
2508 if (pkt->duration == 0) {
2509 compute_frame_duration(&num, &den, st, NULL, pkt);
2511 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2515 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2518 //XXX/FIXME this is a temporary hack until all encoders output pts
2519 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2521 // pkt->pts= st->cur_dts;
2522 pkt->pts= st->pts.val;
2525 //calculate dts from pts
2526 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2527 st->pts_buffer[0]= pkt->pts;
2528 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2529 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2530 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2531 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2533 pkt->dts= st->pts_buffer[0];
2536 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2537 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2540 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2541 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2545 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2546 st->cur_dts= pkt->dts;
2547 st->pts.val= pkt->dts;
2550 switch (st->codec->codec_type) {
2551 case CODEC_TYPE_AUDIO:
2552 frame_size = get_audio_frame_size(st->codec, pkt->size);
2554 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2555 likely equal to the encoder delay, but it would be better if we
2556 had the real timestamps from the encoder */
2557 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2558 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2561 case CODEC_TYPE_VIDEO:
2562 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2570 static void truncate_ts(AVStream *st, AVPacket *pkt){
2571 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2574 // pkt->dts= 0; //this happens for low_delay=0 and B-frames, FIXME, needs further investigation about what we should do here
2576 if (pkt->pts != AV_NOPTS_VALUE)
2577 pkt->pts &= pts_mask;
2578 if (pkt->dts != AV_NOPTS_VALUE)
2579 pkt->dts &= pts_mask;
2582 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2584 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2586 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2589 truncate_ts(s->streams[pkt->stream_index], pkt);
2591 ret= s->oformat->write_packet(s, pkt);
2593 ret= url_ferror(s->pb);
2597 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2598 AVPacketList *pktl, **next_point, *this_pktl;
2600 int streams[MAX_STREAMS];
2603 AVStream *st= s->streams[ pkt->stream_index];
2605 // assert(pkt->destruct != av_destruct_packet); //FIXME
2607 this_pktl = av_mallocz(sizeof(AVPacketList));
2608 this_pktl->pkt= *pkt;
2609 if(pkt->destruct == av_destruct_packet)
2610 pkt->destruct= NULL; // not shared -> must keep original from being freed
2612 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2614 next_point = &s->packet_buffer;
2616 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2617 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2618 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2619 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2621 next_point= &(*next_point)->next;
2623 this_pktl->next= *next_point;
2624 *next_point= this_pktl;
2627 memset(streams, 0, sizeof(streams));
2628 pktl= s->packet_buffer;
2630 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2631 if(streams[ pktl->pkt.stream_index ] == 0)
2633 streams[ pktl->pkt.stream_index ]++;
2637 if(stream_count && (s->nb_streams == stream_count || flush)){
2638 pktl= s->packet_buffer;
2641 s->packet_buffer= pktl->next;
2645 av_init_packet(out);
2651 * Interleaves an AVPacket correctly so it can be muxed.
2652 * @param out the interleaved packet will be output here
2653 * @param in the input packet
2654 * @param flush 1 if no further packets are available as input and all
2655 * remaining packets should be output
2656 * @return 1 if a packet was output, 0 if no packet could be output,
2657 * < 0 if an error occurred
2659 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2660 if(s->oformat->interleave_packet)
2661 return s->oformat->interleave_packet(s, out, in, flush);
2663 return av_interleave_packet_per_dts(s, out, in, flush);
2666 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2667 AVStream *st= s->streams[ pkt->stream_index];
2669 //FIXME/XXX/HACK drop zero sized packets
2670 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2673 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2674 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2677 if(pkt->dts == AV_NOPTS_VALUE)
2682 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2683 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2686 truncate_ts(s->streams[opkt.stream_index], &opkt);
2687 ret= s->oformat->write_packet(s, &opkt);
2689 av_free_packet(&opkt);
2694 if(url_ferror(s->pb))
2695 return url_ferror(s->pb);
2699 int av_write_trailer(AVFormatContext *s)
2705 ret= av_interleave_packet(s, &pkt, NULL, 1);
2706 if(ret<0) //FIXME cleanup needed for ret<0 ?
2711 truncate_ts(s->streams[pkt.stream_index], &pkt);
2712 ret= s->oformat->write_packet(s, &pkt);
2714 av_free_packet(&pkt);
2718 if(url_ferror(s->pb))
2722 if(s->oformat->write_trailer)
2723 ret = s->oformat->write_trailer(s);
2726 ret=url_ferror(s->pb);
2727 for(i=0;i<s->nb_streams;i++)
2728 av_freep(&s->streams[i]->priv_data);
2729 av_freep(&s->priv_data);
2733 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2736 AVProgram *program=NULL;
2739 for(i=0; i<ac->nb_programs; i++){
2740 if(ac->programs[i]->id != progid)
2742 program = ac->programs[i];
2743 for(j=0; j<program->nb_stream_indexes; j++)
2744 if(program->stream_index[j] == idx)
2747 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2750 program->stream_index = tmp;
2751 program->stream_index[program->nb_stream_indexes++] = idx;
2756 /* "user interface" functions */
2757 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2760 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2761 AVStream *st = ic->streams[i];
2762 int g = ff_gcd(st->time_base.num, st->time_base.den);
2763 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2764 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2765 /* the pid is an important information, so we display it */
2766 /* XXX: add a generic system */
2767 if (flags & AVFMT_SHOW_IDS)
2768 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2769 if (strlen(st->language) > 0)
2770 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2771 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2772 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2773 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2774 if(st->r_frame_rate.den && st->r_frame_rate.num)
2775 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2776 /* else if(st->time_base.den && st->time_base.num)
2777 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2779 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2781 av_log(NULL, AV_LOG_INFO, "\n");
2784 void dump_format(AVFormatContext *ic,
2791 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2792 is_output ? "Output" : "Input",
2794 is_output ? ic->oformat->name : ic->iformat->name,
2795 is_output ? "to" : "from", url);
2797 av_log(NULL, AV_LOG_INFO, " Duration: ");
2798 if (ic->duration != AV_NOPTS_VALUE) {
2799 int hours, mins, secs, us;
2800 secs = ic->duration / AV_TIME_BASE;
2801 us = ic->duration % AV_TIME_BASE;
2806 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2807 (100 * us) / AV_TIME_BASE);
2809 av_log(NULL, AV_LOG_INFO, "N/A");
2811 if (ic->start_time != AV_NOPTS_VALUE) {
2813 av_log(NULL, AV_LOG_INFO, ", start: ");
2814 secs = ic->start_time / AV_TIME_BASE;
2815 us = ic->start_time % AV_TIME_BASE;
2816 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2817 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2819 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2821 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2823 av_log(NULL, AV_LOG_INFO, "N/A");
2825 av_log(NULL, AV_LOG_INFO, "\n");
2827 if(ic->nb_programs) {
2829 for(j=0; j<ic->nb_programs; j++) {
2830 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2831 ic->programs[j]->name ? ic->programs[j]->name : "");
2832 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2833 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2836 for(i=0;i<ic->nb_streams;i++)
2837 dump_stream_format(ic, i, index, is_output);
2840 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2842 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2845 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2847 AVRational frame_rate;
2848 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2849 *frame_rate_num= frame_rate.num;
2850 *frame_rate_den= frame_rate.den;
2855 * Gets the current time in microseconds.
2857 int64_t av_gettime(void)
2860 gettimeofday(&tv,NULL);
2861 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2864 int64_t parse_date(const char *datestr, int duration)
2870 static const char *date_fmt[] = {
2874 static const char *time_fmt[] = {
2884 time_t now = time(0);
2886 len = strlen(datestr);
2888 lastch = datestr[len - 1];
2891 is_utc = (lastch == 'z' || lastch == 'Z');
2893 memset(&dt, 0, sizeof(dt));
2898 /* parse the year-month-day part */
2899 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2900 q = small_strptime(p, date_fmt[i], &dt);
2906 /* if the year-month-day part is missing, then take the
2907 * current year-month-day time */
2912 dt = *localtime(&now);
2914 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2919 if (*p == 'T' || *p == 't' || *p == ' ')
2922 /* parse the hour-minute-second part */
2923 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2924 q = small_strptime(p, time_fmt[i], &dt);
2930 /* parse datestr as a duration */
2935 /* parse datestr as HH:MM:SS */
2936 q = small_strptime(p, time_fmt[0], &dt);
2938 /* parse datestr as S+ */
2939 dt.tm_sec = strtol(p, (char **)&q, 10);
2941 /* the parsing didn't succeed */
2948 /* Now we have all the fields that we can get */
2954 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2956 dt.tm_isdst = -1; /* unknown */
2966 /* parse the .m... part */
2970 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2973 val += n * (*q - '0');
2977 return negative ? -t : t;
2980 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2990 while (*p != '\0' && *p != '=' && *p != '&') {
2991 if ((q - tag) < sizeof(tag) - 1)
2999 while (*p != '&' && *p != '\0') {
3000 if ((q - arg) < arg_size - 1) {
3010 if (!strcmp(tag, tag1))
3019 int av_get_frame_filename(char *buf, int buf_size,
3020 const char *path, int number)
3023 char *q, buf1[20], c;
3024 int nd, len, percentd_found;
3036 while (isdigit(*p)) {
3037 nd = nd * 10 + *p++ - '0';
3040 } while (isdigit(c));
3049 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3051 if ((q - buf + len) > buf_size - 1)
3053 memcpy(q, buf1, len);
3061 if ((q - buf) < buf_size - 1)
3065 if (!percentd_found)
3074 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3077 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3079 for(i=0;i<size;i+=16) {
3086 PRINT(" %02x", buf[i+j]);
3091 for(j=0;j<len;j++) {
3093 if (c < ' ' || c > '~')
3102 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3104 hex_dump_internal(NULL, f, 0, buf, size);
3107 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3109 hex_dump_internal(avcl, NULL, level, buf, size);
3112 //FIXME needs to know the time_base
3113 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3115 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3116 PRINT("stream #%d:\n", pkt->stream_index);
3117 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3118 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3119 /* DTS is _always_ valid after av_read_frame() */
3121 if (pkt->dts == AV_NOPTS_VALUE)
3124 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3125 /* PTS may not be known if B-frames are present. */
3127 if (pkt->pts == AV_NOPTS_VALUE)
3130 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3132 PRINT(" size=%d\n", pkt->size);
3135 av_hex_dump(f, pkt->data, pkt->size);
3138 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3140 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3143 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3145 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3148 void url_split(char *proto, int proto_size,
3149 char *authorization, int authorization_size,
3150 char *hostname, int hostname_size,
3152 char *path, int path_size,
3155 const char *p, *ls, *at, *col, *brk;
3157 if (port_ptr) *port_ptr = -1;
3158 if (proto_size > 0) proto[0] = 0;
3159 if (authorization_size > 0) authorization[0] = 0;
3160 if (hostname_size > 0) hostname[0] = 0;
3161 if (path_size > 0) path[0] = 0;
3163 /* parse protocol */
3164 if ((p = strchr(url, ':'))) {
3165 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3170 /* no protocol means plain filename */
3171 av_strlcpy(path, url, path_size);
3175 /* separate path from hostname */
3176 ls = strchr(p, '/');
3178 ls = strchr(p, '?');
3180 av_strlcpy(path, ls, path_size);
3182 ls = &p[strlen(p)]; // XXX
3184 /* the rest is hostname, use that to parse auth/port */
3186 /* authorization (user[:pass]@hostname) */
3187 if ((at = strchr(p, '@')) && at < ls) {
3188 av_strlcpy(authorization, p,
3189 FFMIN(authorization_size, at + 1 - p));
3190 p = at + 1; /* skip '@' */
3193 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3195 av_strlcpy(hostname, p + 1,
3196 FFMIN(hostname_size, brk - p));
3197 if (brk[1] == ':' && port_ptr)
3198 *port_ptr = atoi(brk + 2);
3199 } else if ((col = strchr(p, ':')) && col < ls) {
3200 av_strlcpy(hostname, p,
3201 FFMIN(col + 1 - p, hostname_size));
3202 if (port_ptr) *port_ptr = atoi(col + 1);
3204 av_strlcpy(hostname, p,
3205 FFMIN(ls + 1 - p, hostname_size));
3209 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3210 int pts_num, int pts_den)
3212 unsigned int gcd= ff_gcd(pts_num, pts_den);
3213 s->pts_wrap_bits = pts_wrap_bits;
3214 s->time_base.num = pts_num/gcd;
3215 s->time_base.den = pts_den/gcd;
3218 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);