2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
87 } else if (num >= den) {
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
115 while (*p != NULL) p = &(*p)->next;
120 void av_register_output_format(AVOutputFormat *format)
124 while (*p != NULL) p = &(*p)->next;
129 int match_ext(const char *filename, const char *extensions)
137 ext = strrchr(filename, '.');
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
146 if (!strcasecmp(ext1, ext))
156 AVOutputFormat *guess_format(const char *short_name, const char *filename,
157 const char *mime_type)
159 AVOutputFormat *fmt, *fmt_found;
160 int score_max, score;
162 /* specific test for image sequences */
163 #ifdef CONFIG_IMAGE2_MUXER
164 if (!short_name && filename &&
165 av_filename_number_test(filename) &&
166 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
167 return guess_format("image2", NULL, NULL);
170 /* Find the proper file type. */
174 while (fmt != NULL) {
176 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
178 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
180 if (filename && fmt->extensions &&
181 match_ext(filename, fmt->extensions)) {
184 if (score > score_max) {
193 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
194 const char *mime_type)
196 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
199 AVOutputFormat *stream_fmt;
200 char stream_format_name[64];
202 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
203 stream_fmt = guess_format(stream_format_name, NULL, NULL);
212 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
213 const char *filename, const char *mime_type, enum CodecType type){
214 if(type == CODEC_TYPE_VIDEO){
215 enum CodecID codec_id= CODEC_ID_NONE;
217 #ifdef CONFIG_IMAGE2_MUXER
218 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
219 codec_id= av_guess_image2_codec(filename);
222 if(codec_id == CODEC_ID_NONE)
223 codec_id= fmt->video_codec;
225 }else if(type == CODEC_TYPE_AUDIO)
226 return fmt->audio_codec;
228 return CODEC_ID_NONE;
231 AVInputFormat *av_find_input_format(const char *short_name)
234 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
235 if (!strcmp(fmt->name, short_name))
241 /* memory handling */
243 void av_destruct_packet(AVPacket *pkt)
246 pkt->data = NULL; pkt->size = 0;
249 void av_init_packet(AVPacket *pkt)
251 pkt->pts = AV_NOPTS_VALUE;
252 pkt->dts = AV_NOPTS_VALUE;
255 pkt->convergence_duration = 0;
257 pkt->stream_index = 0;
258 pkt->destruct= av_destruct_packet_nofree;
261 int av_new_packet(AVPacket *pkt, int size)
264 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
265 return AVERROR(ENOMEM);
266 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
268 return AVERROR(ENOMEM);
269 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
274 pkt->destruct = av_destruct_packet;
278 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
280 int ret= av_new_packet(pkt, size);
285 pkt->pos= url_ftell(s);
287 ret= get_buffer(s, pkt->data, size);
296 int av_dup_packet(AVPacket *pkt)
298 if (pkt->destruct != av_destruct_packet) {
300 /* We duplicate the packet and don't forget to add the padding again. */
301 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
302 return AVERROR(ENOMEM);
303 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
305 return AVERROR(ENOMEM);
307 memcpy(data, pkt->data, pkt->size);
308 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
310 pkt->destruct = av_destruct_packet;
315 int av_filename_number_test(const char *filename)
318 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
321 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
323 AVInputFormat *fmt1, *fmt;
327 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
328 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
331 if (fmt1->read_probe) {
332 score = fmt1->read_probe(pd);
333 } else if (fmt1->extensions) {
334 if (match_ext(pd->filename, fmt1->extensions)) {
338 if (score > *score_max) {
341 }else if (score == *score_max)
347 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
349 return av_probe_input_format2(pd, is_opened, &score);
352 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
355 fmt = av_probe_input_format2(pd, 1, &score);
358 if (!strcmp(fmt->name, "mp3")) {
359 st->codec->codec_id = CODEC_ID_MP3;
360 st->codec->codec_type = CODEC_TYPE_AUDIO;
361 } else if (!strcmp(fmt->name, "ac3")) {
362 st->codec->codec_id = CODEC_ID_AC3;
363 st->codec->codec_type = CODEC_TYPE_AUDIO;
364 } else if (!strcmp(fmt->name, "mpegvideo")) {
365 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
366 st->codec->codec_type = CODEC_TYPE_VIDEO;
367 } else if (!strcmp(fmt->name, "m4v")) {
368 st->codec->codec_id = CODEC_ID_MPEG4;
369 st->codec->codec_type = CODEC_TYPE_VIDEO;
370 } else if (!strcmp(fmt->name, "h264")) {
371 st->codec->codec_id = CODEC_ID_H264;
372 st->codec->codec_type = CODEC_TYPE_VIDEO;
378 /************************************************************/
379 /* input media file */
382 * Open a media file from an IO stream. 'fmt' must be specified.
384 static const char* format_to_name(void* ptr)
386 AVFormatContext* fc = (AVFormatContext*) ptr;
387 if(fc->iformat) return fc->iformat->name;
388 else if(fc->oformat) return fc->oformat->name;
392 #define OFFSET(x) offsetof(AVFormatContext,x)
393 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
394 //these names are too long to be readable
395 #define E AV_OPT_FLAG_ENCODING_PARAM
396 #define D AV_OPT_FLAG_DECODING_PARAM
398 static const AVOption options[]={
399 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
400 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
401 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
402 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
403 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
404 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
405 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
406 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
407 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
408 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
409 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
410 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
411 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
412 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
420 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
422 static void avformat_get_context_defaults(AVFormatContext *s)
424 memset(s, 0, sizeof(AVFormatContext));
426 s->av_class = &av_format_context_class;
428 av_opt_set_defaults(s);
431 AVFormatContext *av_alloc_format_context(void)
434 ic = av_malloc(sizeof(AVFormatContext));
436 avformat_get_context_defaults(ic);
437 ic->av_class = &av_format_context_class;
441 int av_open_input_stream(AVFormatContext **ic_ptr,
442 ByteIOContext *pb, const char *filename,
443 AVInputFormat *fmt, AVFormatParameters *ap)
447 AVFormatParameters default_ap;
451 memset(ap, 0, sizeof(default_ap));
454 if(!ap->prealloced_context)
455 ic = av_alloc_format_context();
459 err = AVERROR(ENOMEM);
464 ic->duration = AV_NOPTS_VALUE;
465 ic->start_time = AV_NOPTS_VALUE;
466 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
468 /* allocate private data */
469 if (fmt->priv_data_size > 0) {
470 ic->priv_data = av_mallocz(fmt->priv_data_size);
471 if (!ic->priv_data) {
472 err = AVERROR(ENOMEM);
476 ic->priv_data = NULL;
479 if (ic->iformat->read_header) {
480 err = ic->iformat->read_header(ic, ap);
485 if (pb && !ic->data_offset)
486 ic->data_offset = url_ftell(ic->pb);
493 av_freep(&ic->priv_data);
494 for(i=0;i<ic->nb_streams;i++) {
495 AVStream *st = ic->streams[i];
497 av_free(st->priv_data);
498 av_free(st->codec->extradata);
508 /** size of probe buffer, for guessing file type from file contents */
509 #define PROBE_BUF_MIN 2048
510 #define PROBE_BUF_MAX (1<<20)
512 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
515 AVFormatParameters *ap)
518 AVProbeData probe_data, *pd = &probe_data;
519 ByteIOContext *pb = NULL;
523 pd->filename = filename;
528 /* guess format if no file can be opened */
529 fmt = av_probe_input_format(pd, 0);
532 /* Do not open file if the format does not need it. XXX: specific
533 hack needed to handle RTSP/TCP */
534 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
535 /* if no file needed do not try to open one */
536 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
540 url_setbufsize(pb, buf_size);
543 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
544 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
545 /* read probe data */
546 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
547 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
548 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
549 if (url_fseek(pb, 0, SEEK_SET) < 0) {
551 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
557 /* guess file format */
558 fmt = av_probe_input_format2(pd, 1, &score);
563 /* if still no format found, error */
569 /* check filename in case an image number is expected */
570 if (fmt->flags & AVFMT_NEEDNUMBER) {
571 if (!av_filename_number_test(filename)) {
572 err = AVERROR_NUMEXPECTED;
576 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
589 /*******************************************************/
591 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
592 AVPacketList **plast_pktl){
593 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
598 (*plast_pktl)->next = pktl;
600 *packet_buffer = pktl;
602 /* add the packet in the buffered packet list */
608 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
614 AVPacketList *pktl = s->raw_packet_buffer;
618 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
619 s->raw_packet_buffer = pktl->next;
626 ret= s->iformat->read_packet(s, pkt);
629 st= s->streams[pkt->stream_index];
631 switch(st->codec->codec_type){
632 case CODEC_TYPE_VIDEO:
633 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
635 case CODEC_TYPE_AUDIO:
636 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
638 case CODEC_TYPE_SUBTITLE:
639 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
643 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
646 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
648 if(st->codec->codec_id == CODEC_ID_PROBE){
649 AVProbeData *pd = &st->probe_data;
651 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
652 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
653 pd->buf_size += pkt->size;
654 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
656 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
657 set_codec_from_probe_data(st, pd, 1);
658 if(st->codec->codec_id != CODEC_ID_PROBE){
667 /**********************************************************/
670 * Get the number of samples of an audio frame. Return -1 on error.
672 static int get_audio_frame_size(AVCodecContext *enc, int size)
676 if(enc->codec_id == CODEC_ID_VORBIS)
679 if (enc->frame_size <= 1) {
680 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
682 if (bits_per_sample) {
683 if (enc->channels == 0)
685 frame_size = (size << 3) / (bits_per_sample * enc->channels);
687 /* used for example by ADPCM codecs */
688 if (enc->bit_rate == 0)
690 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
693 frame_size = enc->frame_size;
700 * Return the frame duration in seconds. Return 0 if not available.
702 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
703 AVCodecParserContext *pc, AVPacket *pkt)
709 switch(st->codec->codec_type) {
710 case CODEC_TYPE_VIDEO:
711 if(st->time_base.num*1000LL > st->time_base.den){
712 *pnum = st->time_base.num;
713 *pden = st->time_base.den;
714 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
715 *pnum = st->codec->time_base.num;
716 *pden = st->codec->time_base.den;
717 if (pc && pc->repeat_pict) {
719 *pnum = (*pnum) * (2 + pc->repeat_pict);
723 case CODEC_TYPE_AUDIO:
724 frame_size = get_audio_frame_size(st->codec, pkt->size);
728 *pden = st->codec->sample_rate;
735 static int is_intra_only(AVCodecContext *enc){
736 if(enc->codec_type == CODEC_TYPE_AUDIO){
738 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
739 switch(enc->codec_id){
741 case CODEC_ID_MJPEGB:
743 case CODEC_ID_RAWVIDEO:
744 case CODEC_ID_DVVIDEO:
745 case CODEC_ID_HUFFYUV:
746 case CODEC_ID_FFVHUFF:
758 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
759 int64_t dts, int64_t pts)
761 AVStream *st= s->streams[stream_index];
762 AVPacketList *pktl= s->packet_buffer;
764 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
767 st->first_dts= dts - st->cur_dts;
770 for(; pktl; pktl= pktl->next){
771 if(pktl->pkt.stream_index != stream_index)
773 //FIXME think more about this check
774 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
775 pktl->pkt.pts += st->first_dts;
777 if(pktl->pkt.dts != AV_NOPTS_VALUE)
778 pktl->pkt.dts += st->first_dts;
780 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
781 st->start_time= pktl->pkt.pts;
783 if (st->start_time == AV_NOPTS_VALUE)
784 st->start_time = pts;
787 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
789 AVPacketList *pktl= s->packet_buffer;
792 if(st->first_dts != AV_NOPTS_VALUE){
793 cur_dts= st->first_dts;
794 for(; pktl; pktl= pktl->next){
795 if(pktl->pkt.stream_index == pkt->stream_index){
796 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
798 cur_dts -= pkt->duration;
801 pktl= s->packet_buffer;
802 st->first_dts = cur_dts;
803 }else if(st->cur_dts)
806 for(; pktl; pktl= pktl->next){
807 if(pktl->pkt.stream_index != pkt->stream_index)
809 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
810 && !pktl->pkt.duration){
811 pktl->pkt.dts= cur_dts;
812 if(!st->codec->has_b_frames)
813 pktl->pkt.pts= cur_dts;
814 cur_dts += pkt->duration;
815 pktl->pkt.duration= pkt->duration;
819 if(st->first_dts == AV_NOPTS_VALUE)
820 st->cur_dts= cur_dts;
823 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
824 AVCodecParserContext *pc, AVPacket *pkt)
826 int num, den, presentation_delayed, delay, i;
829 /* do we have a video B-frame ? */
830 delay= st->codec->has_b_frames;
831 presentation_delayed = 0;
832 /* XXX: need has_b_frame, but cannot get it if the codec is
835 pc && pc->pict_type != FF_B_TYPE)
836 presentation_delayed = 1;
838 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
839 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
840 pkt->dts -= 1LL<<st->pts_wrap_bits;
843 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
844 // we take the conservative approach and discard both
845 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
846 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
847 av_log(s, AV_LOG_ERROR, "invalid dts/pts combination\n");
848 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
851 if (pkt->duration == 0) {
852 compute_frame_duration(&num, &den, st, pc, pkt);
854 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
856 if(pkt->duration != 0 && s->packet_buffer)
857 update_initial_durations(s, st, pkt);
861 /* correct timestamps with byte offset if demuxers only have timestamps
862 on packet boundaries */
863 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
864 /* this will estimate bitrate based on this frame's duration and size */
865 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
866 if(pkt->pts != AV_NOPTS_VALUE)
868 if(pkt->dts != AV_NOPTS_VALUE)
872 /* This may be redundant, but it should not hurt. */
873 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
874 presentation_delayed = 1;
876 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
877 /* interpolate PTS and DTS if they are not present */
878 if(delay==0 || (delay==1 && pc)){
879 if (presentation_delayed) {
880 /* DTS = decompression timestamp */
881 /* PTS = presentation timestamp */
882 if (pkt->dts == AV_NOPTS_VALUE)
883 pkt->dts = st->last_IP_pts;
884 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
885 if (pkt->dts == AV_NOPTS_VALUE)
886 pkt->dts = st->cur_dts;
888 /* this is tricky: the dts must be incremented by the duration
889 of the frame we are displaying, i.e. the last I- or P-frame */
890 if (st->last_IP_duration == 0)
891 st->last_IP_duration = pkt->duration;
892 if(pkt->dts != AV_NOPTS_VALUE)
893 st->cur_dts = pkt->dts + st->last_IP_duration;
894 st->last_IP_duration = pkt->duration;
895 st->last_IP_pts= pkt->pts;
896 /* cannot compute PTS if not present (we can compute it only
897 by knowing the future */
898 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
899 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
900 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
901 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
902 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
903 pkt->pts += pkt->duration;
904 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
908 /* presentation is not delayed : PTS and DTS are the same */
909 if(pkt->pts == AV_NOPTS_VALUE)
911 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
912 if(pkt->pts == AV_NOPTS_VALUE)
913 pkt->pts = st->cur_dts;
915 if(pkt->pts != AV_NOPTS_VALUE)
916 st->cur_dts = pkt->pts + pkt->duration;
920 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
921 st->pts_buffer[0]= pkt->pts;
922 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
923 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
924 if(pkt->dts == AV_NOPTS_VALUE)
925 pkt->dts= st->pts_buffer[0];
927 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
929 if(pkt->dts > st->cur_dts)
930 st->cur_dts = pkt->dts;
933 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
936 if(is_intra_only(st->codec))
937 pkt->flags |= PKT_FLAG_KEY;
940 /* keyframe computation */
941 if (pc->pict_type == FF_I_TYPE)
942 pkt->flags |= PKT_FLAG_KEY;
946 void av_destruct_packet_nofree(AVPacket *pkt)
948 pkt->data = NULL; pkt->size = 0;
951 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
959 /* select current input stream component */
962 if (!st->need_parsing || !st->parser) {
963 /* no parsing needed: we just output the packet as is */
964 /* raw data support */
966 compute_pkt_fields(s, st, NULL, pkt);
969 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
970 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
971 s->cur_ptr, s->cur_len,
972 s->cur_pkt.pts, s->cur_pkt.dts);
973 s->cur_pkt.pts = AV_NOPTS_VALUE;
974 s->cur_pkt.dts = AV_NOPTS_VALUE;
975 /* increment read pointer */
979 /* return packet if any */
982 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
984 pkt->stream_index = st->index;
985 pkt->pts = st->parser->pts;
986 pkt->dts = st->parser->dts;
987 pkt->destruct = av_destruct_packet_nofree;
988 compute_pkt_fields(s, st, st->parser, pkt);
990 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
991 ff_reduce_index(s, st->index);
992 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
993 0, 0, AVINDEX_KEYFRAME);
1000 av_free_packet(&s->cur_pkt);
1004 /* read next packet */
1005 ret = av_read_packet(s, &s->cur_pkt);
1007 if (ret == AVERROR(EAGAIN))
1009 /* return the last frames, if any */
1010 for(i = 0; i < s->nb_streams; i++) {
1012 if (st->parser && st->need_parsing) {
1013 av_parser_parse(st->parser, st->codec,
1014 &pkt->data, &pkt->size,
1016 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1021 /* no more packets: really terminate parsing */
1025 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
1026 s->cur_pkt.dts != AV_NOPTS_VALUE &&
1027 s->cur_pkt.pts < s->cur_pkt.dts){
1028 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1029 s->cur_pkt.stream_index,
1033 // av_free_packet(&s->cur_pkt);
1037 st = s->streams[s->cur_pkt.stream_index];
1038 if(s->debug & FF_FDEBUG_TS)
1039 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1040 s->cur_pkt.stream_index,
1047 s->cur_ptr = s->cur_pkt.data;
1048 s->cur_len = s->cur_pkt.size;
1049 if (st->need_parsing && !st->parser) {
1050 st->parser = av_parser_init(st->codec->codec_id);
1052 /* no parser available: just output the raw packets */
1053 st->need_parsing = AVSTREAM_PARSE_NONE;
1054 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1055 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1057 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1058 st->parser->next_frame_offset=
1059 st->parser->cur_offset= s->cur_pkt.pos;
1064 if(s->debug & FF_FDEBUG_TS)
1065 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1075 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1079 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1082 pktl = s->packet_buffer;
1084 AVPacket *next_pkt= &pktl->pkt;
1086 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1087 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1088 if( pktl->pkt.stream_index == next_pkt->stream_index
1089 && next_pkt->dts < pktl->pkt.dts
1090 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1091 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1092 next_pkt->pts= pktl->pkt.dts;
1096 pktl = s->packet_buffer;
1099 if( next_pkt->pts != AV_NOPTS_VALUE
1100 || next_pkt->dts == AV_NOPTS_VALUE
1102 /* read packet from packet buffer, if there is data */
1104 s->packet_buffer = pktl->next;
1110 int ret= av_read_frame_internal(s, pkt);
1112 if(pktl && ret != AVERROR(EAGAIN)){
1119 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1120 &s->packet_buffer_end)) < 0)
1121 return AVERROR(ENOMEM);
1123 assert(!s->packet_buffer);
1124 return av_read_frame_internal(s, pkt);
1129 /* XXX: suppress the packet queue */
1130 static void flush_packet_queue(AVFormatContext *s)
1135 pktl = s->packet_buffer;
1138 s->packet_buffer = pktl->next;
1139 av_free_packet(&pktl->pkt);
1144 /*******************************************************/
1147 int av_find_default_stream_index(AVFormatContext *s)
1149 int first_audio_index = -1;
1153 if (s->nb_streams <= 0)
1155 for(i = 0; i < s->nb_streams; i++) {
1157 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1160 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1161 first_audio_index = i;
1163 return first_audio_index >= 0 ? first_audio_index : 0;
1167 * Flush the frame reader.
1169 static void av_read_frame_flush(AVFormatContext *s)
1174 flush_packet_queue(s);
1176 /* free previous packet */
1178 if (s->cur_st->parser)
1179 av_free_packet(&s->cur_pkt);
1186 /* for each stream, reset read state */
1187 for(i = 0; i < s->nb_streams; i++) {
1191 av_parser_close(st->parser);
1194 st->last_IP_pts = AV_NOPTS_VALUE;
1195 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1199 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1202 for(i = 0; i < s->nb_streams; i++) {
1203 AVStream *st = s->streams[i];
1205 st->cur_dts = av_rescale(timestamp,
1206 st->time_base.den * (int64_t)ref_st->time_base.num,
1207 st->time_base.num * (int64_t)ref_st->time_base.den);
1211 void ff_reduce_index(AVFormatContext *s, int stream_index)
1213 AVStream *st= s->streams[stream_index];
1214 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1216 if((unsigned)st->nb_index_entries >= max_entries){
1218 for(i=0; 2*i<st->nb_index_entries; i++)
1219 st->index_entries[i]= st->index_entries[2*i];
1220 st->nb_index_entries= i;
1224 int av_add_index_entry(AVStream *st,
1225 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1227 AVIndexEntry *entries, *ie;
1230 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1233 entries = av_fast_realloc(st->index_entries,
1234 &st->index_entries_allocated_size,
1235 (st->nb_index_entries + 1) *
1236 sizeof(AVIndexEntry));
1240 st->index_entries= entries;
1242 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1245 index= st->nb_index_entries++;
1246 ie= &entries[index];
1247 assert(index==0 || ie[-1].timestamp < timestamp);
1249 ie= &entries[index];
1250 if(ie->timestamp != timestamp){
1251 if(ie->timestamp <= timestamp)
1253 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1254 st->nb_index_entries++;
1255 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1256 distance= ie->min_distance;
1260 ie->timestamp = timestamp;
1261 ie->min_distance= distance;
1268 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1271 AVIndexEntry *entries= st->index_entries;
1272 int nb_entries= st->nb_index_entries;
1281 timestamp = entries[m].timestamp;
1282 if(timestamp >= wanted_timestamp)
1284 if(timestamp <= wanted_timestamp)
1287 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1289 if(!(flags & AVSEEK_FLAG_ANY)){
1290 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1291 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1302 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1303 AVInputFormat *avif= s->iformat;
1304 int64_t pos_min, pos_max, pos, pos_limit;
1305 int64_t ts_min, ts_max, ts;
1309 if (stream_index < 0)
1313 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1317 ts_min= AV_NOPTS_VALUE;
1318 pos_limit= -1; //gcc falsely says it may be uninitialized
1320 st= s->streams[stream_index];
1321 if(st->index_entries){
1324 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1325 index= FFMAX(index, 0);
1326 e= &st->index_entries[index];
1328 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1330 ts_min= e->timestamp;
1332 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1339 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1340 assert(index < st->nb_index_entries);
1342 e= &st->index_entries[index];
1343 assert(e->timestamp >= target_ts);
1345 ts_max= e->timestamp;
1346 pos_limit= pos_max - e->min_distance;
1348 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1349 pos_max,pos_limit, ts_max);
1354 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1359 url_fseek(s->pb, pos, SEEK_SET);
1361 av_update_cur_dts(s, st, ts);
1366 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1368 int64_t start_pos, filesize;
1372 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1375 if(ts_min == AV_NOPTS_VALUE){
1376 pos_min = s->data_offset;
1377 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1378 if (ts_min == AV_NOPTS_VALUE)
1382 if(ts_max == AV_NOPTS_VALUE){
1384 filesize = url_fsize(s->pb);
1385 pos_max = filesize - 1;
1388 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1390 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1391 if (ts_max == AV_NOPTS_VALUE)
1395 int64_t tmp_pos= pos_max + 1;
1396 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1397 if(tmp_ts == AV_NOPTS_VALUE)
1401 if(tmp_pos >= filesize)
1407 if(ts_min > ts_max){
1409 }else if(ts_min == ts_max){
1414 while (pos_min < pos_limit) {
1416 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1420 assert(pos_limit <= pos_max);
1423 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1424 // interpolate position (better than dichotomy)
1425 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1426 + pos_min - approximate_keyframe_distance;
1427 }else if(no_change==1){
1428 // bisection, if interpolation failed to change min or max pos last time
1429 pos = (pos_min + pos_limit)>>1;
1431 /* linear search if bisection failed, can only happen if there
1432 are very few or no keyframes between min/max */
1437 else if(pos > pos_limit)
1441 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1447 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1449 if(ts == AV_NOPTS_VALUE){
1450 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1453 assert(ts != AV_NOPTS_VALUE);
1454 if (target_ts <= ts) {
1455 pos_limit = start_pos - 1;
1459 if (target_ts >= ts) {
1465 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1466 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1469 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1471 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1472 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1473 pos, ts_min, target_ts, ts_max);
1479 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1480 int64_t pos_min, pos_max;
1484 if (stream_index < 0)
1487 st= s->streams[stream_index];
1490 pos_min = s->data_offset;
1491 pos_max = url_fsize(s->pb) - 1;
1493 if (pos < pos_min) pos= pos_min;
1494 else if(pos > pos_max) pos= pos_max;
1496 url_fseek(s->pb, pos, SEEK_SET);
1499 av_update_cur_dts(s, st, ts);
1504 static int av_seek_frame_generic(AVFormatContext *s,
1505 int stream_index, int64_t timestamp, int flags)
1511 st = s->streams[stream_index];
1513 index = av_index_search_timestamp(st, timestamp, flags);
1515 if(index < 0 || index==st->nb_index_entries-1){
1519 if(st->nb_index_entries){
1520 assert(st->index_entries);
1521 ie= &st->index_entries[st->nb_index_entries-1];
1522 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1524 av_update_cur_dts(s, st, ie->timestamp);
1526 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1530 int ret = av_read_frame(s, &pkt);
1533 av_free_packet(&pkt);
1534 if(stream_index == pkt.stream_index){
1535 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1539 index = av_index_search_timestamp(st, timestamp, flags);
1544 av_read_frame_flush(s);
1545 if (s->iformat->read_seek){
1546 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1549 ie = &st->index_entries[index];
1550 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1552 av_update_cur_dts(s, st, ie->timestamp);
1557 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1562 av_read_frame_flush(s);
1564 if(flags & AVSEEK_FLAG_BYTE)
1565 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1567 if(stream_index < 0){
1568 stream_index= av_find_default_stream_index(s);
1569 if(stream_index < 0)
1572 st= s->streams[stream_index];
1573 /* timestamp for default must be expressed in AV_TIME_BASE units */
1574 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1577 /* first, we try the format specific seek */
1578 if (s->iformat->read_seek)
1579 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1586 if(s->iformat->read_timestamp)
1587 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1589 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1592 /*******************************************************/
1595 * Returns TRUE if the stream has accurate duration in any stream.
1597 * @return TRUE if the stream has accurate duration for at least one component.
1599 static int av_has_duration(AVFormatContext *ic)
1604 for(i = 0;i < ic->nb_streams; i++) {
1605 st = ic->streams[i];
1606 if (st->duration != AV_NOPTS_VALUE)
1613 * Estimate the stream timings from the one of each components.
1615 * Also computes the global bitrate if possible.
1617 static void av_update_stream_timings(AVFormatContext *ic)
1619 int64_t start_time, start_time1, end_time, end_time1;
1620 int64_t duration, duration1;
1624 start_time = INT64_MAX;
1625 end_time = INT64_MIN;
1626 duration = INT64_MIN;
1627 for(i = 0;i < ic->nb_streams; i++) {
1628 st = ic->streams[i];
1629 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1630 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1631 if (start_time1 < start_time)
1632 start_time = start_time1;
1633 if (st->duration != AV_NOPTS_VALUE) {
1634 end_time1 = start_time1
1635 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1636 if (end_time1 > end_time)
1637 end_time = end_time1;
1640 if (st->duration != AV_NOPTS_VALUE) {
1641 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1642 if (duration1 > duration)
1643 duration = duration1;
1646 if (start_time != INT64_MAX) {
1647 ic->start_time = start_time;
1648 if (end_time != INT64_MIN) {
1649 if (end_time - start_time > duration)
1650 duration = end_time - start_time;
1653 if (duration != INT64_MIN) {
1654 ic->duration = duration;
1655 if (ic->file_size > 0) {
1656 /* compute the bitrate */
1657 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1658 (double)ic->duration;
1663 static void fill_all_stream_timings(AVFormatContext *ic)
1668 av_update_stream_timings(ic);
1669 for(i = 0;i < ic->nb_streams; i++) {
1670 st = ic->streams[i];
1671 if (st->start_time == AV_NOPTS_VALUE) {
1672 if(ic->start_time != AV_NOPTS_VALUE)
1673 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1674 if(ic->duration != AV_NOPTS_VALUE)
1675 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1680 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1682 int64_t filesize, duration;
1686 /* if bit_rate is already set, we believe it */
1687 if (ic->bit_rate == 0) {
1689 for(i=0;i<ic->nb_streams;i++) {
1690 st = ic->streams[i];
1691 bit_rate += st->codec->bit_rate;
1693 ic->bit_rate = bit_rate;
1696 /* if duration is already set, we believe it */
1697 if (ic->duration == AV_NOPTS_VALUE &&
1698 ic->bit_rate != 0 &&
1699 ic->file_size != 0) {
1700 filesize = ic->file_size;
1702 for(i = 0; i < ic->nb_streams; i++) {
1703 st = ic->streams[i];
1704 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1705 if (st->duration == AV_NOPTS_VALUE)
1706 st->duration = duration;
1712 #define DURATION_MAX_READ_SIZE 250000
1714 /* only usable for MPEG-PS streams */
1715 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1717 AVPacket pkt1, *pkt = &pkt1;
1719 int read_size, i, ret;
1721 int64_t filesize, offset, duration;
1723 /* free previous packet */
1724 if (ic->cur_st && ic->cur_st->parser)
1725 av_free_packet(&ic->cur_pkt);
1728 /* flush packet queue */
1729 flush_packet_queue(ic);
1731 for(i=0;i<ic->nb_streams;i++) {
1732 st = ic->streams[i];
1734 av_parser_close(st->parser);
1739 /* we read the first packets to get the first PTS (not fully
1740 accurate, but it is enough now) */
1741 url_fseek(ic->pb, 0, SEEK_SET);
1744 if (read_size >= DURATION_MAX_READ_SIZE)
1746 /* if all info is available, we can stop */
1747 for(i = 0;i < ic->nb_streams; i++) {
1748 st = ic->streams[i];
1749 if (st->start_time == AV_NOPTS_VALUE)
1752 if (i == ic->nb_streams)
1755 ret = av_read_packet(ic, pkt);
1758 read_size += pkt->size;
1759 st = ic->streams[pkt->stream_index];
1760 if (pkt->pts != AV_NOPTS_VALUE) {
1761 if (st->start_time == AV_NOPTS_VALUE)
1762 st->start_time = pkt->pts;
1764 av_free_packet(pkt);
1767 /* estimate the end time (duration) */
1768 /* XXX: may need to support wrapping */
1769 filesize = ic->file_size;
1770 offset = filesize - DURATION_MAX_READ_SIZE;
1774 url_fseek(ic->pb, offset, SEEK_SET);
1777 if (read_size >= DURATION_MAX_READ_SIZE)
1780 ret = av_read_packet(ic, pkt);
1783 read_size += pkt->size;
1784 st = ic->streams[pkt->stream_index];
1785 if (pkt->pts != AV_NOPTS_VALUE &&
1786 st->start_time != AV_NOPTS_VALUE) {
1787 end_time = pkt->pts;
1788 duration = end_time - st->start_time;
1790 if (st->duration == AV_NOPTS_VALUE ||
1791 st->duration < duration)
1792 st->duration = duration;
1795 av_free_packet(pkt);
1798 fill_all_stream_timings(ic);
1800 url_fseek(ic->pb, old_offset, SEEK_SET);
1801 for(i=0; i<ic->nb_streams; i++){
1803 st->cur_dts= st->first_dts;
1804 st->last_IP_pts = AV_NOPTS_VALUE;
1808 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1812 /* get the file size, if possible */
1813 if (ic->iformat->flags & AVFMT_NOFILE) {
1816 file_size = url_fsize(ic->pb);
1820 ic->file_size = file_size;
1822 if ((!strcmp(ic->iformat->name, "mpeg") ||
1823 !strcmp(ic->iformat->name, "mpegts")) &&
1824 file_size && !url_is_streamed(ic->pb)) {
1825 /* get accurate estimate from the PTSes */
1826 av_estimate_timings_from_pts(ic, old_offset);
1827 } else if (av_has_duration(ic)) {
1828 /* at least one component has timings - we use them for all
1830 fill_all_stream_timings(ic);
1832 /* less precise: use bitrate info */
1833 av_estimate_timings_from_bit_rate(ic);
1835 av_update_stream_timings(ic);
1841 for(i = 0;i < ic->nb_streams; i++) {
1842 st = ic->streams[i];
1843 printf("%d: start_time: %0.3f duration: %0.3f\n",
1844 i, (double)st->start_time / AV_TIME_BASE,
1845 (double)st->duration / AV_TIME_BASE);
1847 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1848 (double)ic->start_time / AV_TIME_BASE,
1849 (double)ic->duration / AV_TIME_BASE,
1850 ic->bit_rate / 1000);
1855 static int has_codec_parameters(AVCodecContext *enc)
1858 switch(enc->codec_type) {
1859 case CODEC_TYPE_AUDIO:
1860 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1861 if(!enc->frame_size &&
1862 (enc->codec_id == CODEC_ID_VORBIS ||
1863 enc->codec_id == CODEC_ID_AAC))
1866 case CODEC_TYPE_VIDEO:
1867 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1873 return enc->codec_id != CODEC_ID_NONE && val != 0;
1876 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1880 int got_picture, data_size, ret=0;
1883 if(!st->codec->codec){
1884 codec = avcodec_find_decoder(st->codec->codec_id);
1887 ret = avcodec_open(st->codec, codec);
1892 if(!has_codec_parameters(st->codec)){
1893 switch(st->codec->codec_type) {
1894 case CODEC_TYPE_VIDEO:
1895 ret = avcodec_decode_video(st->codec, &picture,
1896 &got_picture, data, size);
1898 case CODEC_TYPE_AUDIO:
1899 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1900 samples = av_malloc(data_size);
1903 ret = avcodec_decode_audio2(st->codec, samples,
1904 &data_size, data, size);
1915 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1917 while (tags->id != CODEC_ID_NONE) {
1925 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1928 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1929 if(tag == tags[i].tag)
1932 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1933 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1934 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1935 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1936 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1939 return CODEC_ID_NONE;
1942 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1945 for(i=0; tags && tags[i]; i++){
1946 int tag= codec_get_tag(tags[i], id);
1952 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1955 for(i=0; tags && tags[i]; i++){
1956 enum CodecID id= codec_get_id(tags[i], tag);
1957 if(id!=CODEC_ID_NONE) return id;
1959 return CODEC_ID_NONE;
1962 static void compute_chapters_end(AVFormatContext *s)
1966 for (i=0; i+1<s->nb_chapters; i++)
1967 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1968 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1969 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1970 s->chapters[i]->end = s->chapters[i+1]->start;
1973 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1974 assert(s->start_time != AV_NOPTS_VALUE);
1975 assert(s->duration > 0);
1976 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1978 s->chapters[i]->time_base);
1982 /* absolute maximum size we read until we abort */
1983 #define MAX_READ_SIZE 5000000
1985 #define MAX_STD_TIMEBASES (60*12+5)
1986 static int get_std_framerate(int i){
1987 if(i<60*12) return i*1001;
1988 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1992 * Is the time base unreliable.
1993 * This is a heuristic to balance between quick acceptance of the values in
1994 * the headers vs. some extra checks.
1995 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1996 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1997 * And there are "variable" fps files this needs to detect as well.
1999 static int tb_unreliable(AVCodecContext *c){
2000 if( c->time_base.den >= 101L*c->time_base.num
2001 || c->time_base.den < 5L*c->time_base.num
2002 /* || c->codec_tag == ff_get_fourcc("DIVX")
2003 || c->codec_tag == ff_get_fourcc("XVID")*/
2004 || c->codec_id == CODEC_ID_MPEG2VIDEO)
2009 int av_find_stream_info(AVFormatContext *ic)
2011 int i, count, ret, read_size, j;
2013 AVPacket pkt1, *pkt;
2014 int64_t last_dts[MAX_STREAMS];
2015 int duration_count[MAX_STREAMS]={0};
2016 double (*duration_error)[MAX_STD_TIMEBASES];
2017 int64_t old_offset = url_ftell(ic->pb);
2018 int64_t codec_info_duration[MAX_STREAMS]={0};
2019 int codec_info_nb_frames[MAX_STREAMS]={0};
2021 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2022 if (!duration_error) return AVERROR(ENOMEM);
2024 for(i=0;i<ic->nb_streams;i++) {
2025 st = ic->streams[i];
2026 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2027 /* if(!st->time_base.num)
2029 if(!st->codec->time_base.num)
2030 st->codec->time_base= st->time_base;
2032 //only for the split stuff
2034 st->parser = av_parser_init(st->codec->codec_id);
2035 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2036 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2041 for(i=0;i<MAX_STREAMS;i++){
2042 last_dts[i]= AV_NOPTS_VALUE;
2048 /* check if one codec still needs to be handled */
2049 for(i=0;i<ic->nb_streams;i++) {
2050 st = ic->streams[i];
2051 if (!has_codec_parameters(st->codec))
2053 /* variable fps and no guess at the real fps */
2054 if( tb_unreliable(st->codec)
2055 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2057 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2059 if(st->first_dts == AV_NOPTS_VALUE)
2062 if (i == ic->nb_streams) {
2063 /* NOTE: if the format has no header, then we need to read
2064 some packets to get most of the streams, so we cannot
2066 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2067 /* if we found the info for all the codecs, we can stop */
2072 /* we did not get all the codec info, but we read too much data */
2073 if (read_size >= MAX_READ_SIZE) {
2078 /* NOTE: a new stream can be added there if no header in file
2079 (AVFMTCTX_NOHEADER) */
2080 ret = av_read_frame_internal(ic, &pkt1);
2083 ret = -1; /* we could not have all the codec parameters before EOF */
2084 for(i=0;i<ic->nb_streams;i++) {
2085 st = ic->streams[i];
2086 if (!has_codec_parameters(st->codec)){
2088 avcodec_string(buf, sizeof(buf), st->codec, 0);
2089 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2097 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2098 if(av_dup_packet(pkt) < 0) {
2099 av_free(duration_error);
2100 return AVERROR(ENOMEM);
2103 read_size += pkt->size;
2105 st = ic->streams[pkt->stream_index];
2106 if(codec_info_nb_frames[st->index]>1)
2107 codec_info_duration[st->index] += pkt->duration;
2108 if (pkt->duration != 0)
2109 codec_info_nb_frames[st->index]++;
2112 int index= pkt->stream_index;
2113 int64_t last= last_dts[index];
2114 int64_t duration= pkt->dts - last;
2116 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2117 double dur= duration * av_q2d(st->time_base);
2119 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2120 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2121 if(duration_count[index] < 2)
2122 memset(duration_error[index], 0, sizeof(*duration_error));
2123 for(i=1; i<MAX_STD_TIMEBASES; i++){
2124 int framerate= get_std_framerate(i);
2125 int ticks= lrintf(dur*framerate/(1001*12));
2126 double error= dur - ticks*1001*12/(double)framerate;
2127 duration_error[index][i] += error*error;
2129 duration_count[index]++;
2131 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2132 last_dts[pkt->stream_index]= pkt->dts;
2134 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2135 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2137 st->codec->extradata_size= i;
2138 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2139 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2140 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2144 /* if still no information, we try to open the codec and to
2145 decompress the frame. We try to avoid that in most cases as
2146 it takes longer and uses more memory. For MPEG-4, we need to
2147 decompress for QuickTime. */
2148 if (!has_codec_parameters(st->codec) /*&&
2149 (st->codec->codec_id == CODEC_ID_FLV1 ||
2150 st->codec->codec_id == CODEC_ID_H264 ||
2151 st->codec->codec_id == CODEC_ID_H263 ||
2152 st->codec->codec_id == CODEC_ID_H261 ||
2153 st->codec->codec_id == CODEC_ID_VORBIS ||
2154 st->codec->codec_id == CODEC_ID_MJPEG ||
2155 st->codec->codec_id == CODEC_ID_PNG ||
2156 st->codec->codec_id == CODEC_ID_PAM ||
2157 st->codec->codec_id == CODEC_ID_PGM ||
2158 st->codec->codec_id == CODEC_ID_PGMYUV ||
2159 st->codec->codec_id == CODEC_ID_PBM ||
2160 st->codec->codec_id == CODEC_ID_PPM ||
2161 st->codec->codec_id == CODEC_ID_SHORTEN ||
2162 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2163 try_decode_frame(st, pkt->data, pkt->size);
2165 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2171 // close codecs which were opened in try_decode_frame()
2172 for(i=0;i<ic->nb_streams;i++) {
2173 st = ic->streams[i];
2174 if(st->codec->codec)
2175 avcodec_close(st->codec);
2177 for(i=0;i<ic->nb_streams;i++) {
2178 st = ic->streams[i];
2179 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2180 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2181 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2183 if(duration_count[i]
2184 && tb_unreliable(st->codec) /*&&
2185 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2186 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2187 double best_error= 2*av_q2d(st->time_base);
2188 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2190 for(j=1; j<MAX_STD_TIMEBASES; j++){
2191 double error= duration_error[i][j] * get_std_framerate(j);
2192 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2193 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2194 if(error < best_error){
2196 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2201 if (!st->r_frame_rate.num){
2202 if( st->codec->time_base.den * (int64_t)st->time_base.num
2203 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2204 st->r_frame_rate.num = st->codec->time_base.den;
2205 st->r_frame_rate.den = st->codec->time_base.num;
2207 st->r_frame_rate.num = st->time_base.den;
2208 st->r_frame_rate.den = st->time_base.num;
2211 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2212 if(!st->codec->bits_per_coded_sample)
2213 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2217 av_estimate_timings(ic, old_offset);
2219 compute_chapters_end(ic);
2222 /* correct DTS for B-frame streams with no timestamps */
2223 for(i=0;i<ic->nb_streams;i++) {
2224 st = ic->streams[i];
2225 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2227 ppktl = &ic->packet_buffer;
2229 if(ppkt1->stream_index != i)
2231 if(ppkt1->pkt->dts < 0)
2233 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2235 ppkt1->pkt->dts -= delta;
2240 st->cur_dts -= delta;
2246 av_free(duration_error);
2251 /*******************************************************/
2253 int av_read_play(AVFormatContext *s)
2255 if (s->iformat->read_play)
2256 return s->iformat->read_play(s);
2258 return av_url_read_fpause(s->pb, 0);
2259 return AVERROR(ENOSYS);
2262 int av_read_pause(AVFormatContext *s)
2264 if (s->iformat->read_pause)
2265 return s->iformat->read_pause(s);
2267 return av_url_read_fpause(s->pb, 1);
2268 return AVERROR(ENOSYS);
2271 void av_close_input_stream(AVFormatContext *s)
2276 /* free previous packet */
2277 if (s->cur_st && s->cur_st->parser)
2278 av_free_packet(&s->cur_pkt);
2280 if (s->iformat->read_close)
2281 s->iformat->read_close(s);
2282 for(i=0;i<s->nb_streams;i++) {
2283 /* free all data in a stream component */
2286 av_parser_close(st->parser);
2288 av_free(st->index_entries);
2289 av_free(st->codec->extradata);
2291 av_free(st->filename);
2292 av_free(st->priv_data);
2295 for(i=s->nb_programs-1; i>=0; i--) {
2296 av_freep(&s->programs[i]->provider_name);
2297 av_freep(&s->programs[i]->name);
2298 av_freep(&s->programs[i]->stream_index);
2299 av_freep(&s->programs[i]);
2301 av_freep(&s->programs);
2302 flush_packet_queue(s);
2303 av_freep(&s->priv_data);
2304 while(s->nb_chapters--) {
2305 av_free(s->chapters[s->nb_chapters]->title);
2306 av_free(s->chapters[s->nb_chapters]);
2308 av_freep(&s->chapters);
2310 while(s->meta_data->count--){
2311 av_freep(&s->meta_data->elems[s->meta_data->count].key);
2312 av_freep(&s->meta_data->elems[s->meta_data->count].value);
2314 av_freep(&s->meta_data->elems);
2316 av_freep(&s->meta_data);
2320 void av_close_input_file(AVFormatContext *s)
2322 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2323 av_close_input_stream(s);
2328 AVStream *av_new_stream(AVFormatContext *s, int id)
2333 if (s->nb_streams >= MAX_STREAMS)
2336 st = av_mallocz(sizeof(AVStream));
2340 st->codec= avcodec_alloc_context();
2342 /* no default bitrate if decoding */
2343 st->codec->bit_rate = 0;
2345 st->index = s->nb_streams;
2347 st->start_time = AV_NOPTS_VALUE;
2348 st->duration = AV_NOPTS_VALUE;
2349 /* we set the current DTS to 0 so that formats without any timestamps
2350 but durations get some timestamps, formats with some unknown
2351 timestamps have their first few packets buffered and the
2352 timestamps corrected before they are returned to the user */
2354 st->first_dts = AV_NOPTS_VALUE;
2356 /* default pts setting is MPEG-like */
2357 av_set_pts_info(st, 33, 1, 90000);
2358 st->last_IP_pts = AV_NOPTS_VALUE;
2359 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2360 st->pts_buffer[i]= AV_NOPTS_VALUE;
2362 st->sample_aspect_ratio = (AVRational){0,1};
2364 s->streams[s->nb_streams++] = st;
2368 AVProgram *av_new_program(AVFormatContext *ac, int id)
2370 AVProgram *program=NULL;
2374 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2377 for(i=0; i<ac->nb_programs; i++)
2378 if(ac->programs[i]->id == id)
2379 program = ac->programs[i];
2382 program = av_mallocz(sizeof(AVProgram));
2385 dynarray_add(&ac->programs, &ac->nb_programs, program);
2386 program->discard = AVDISCARD_NONE;
2393 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2395 assert(!provider_name == !name);
2397 av_free(program->provider_name);
2398 av_free(program-> name);
2399 program->provider_name = av_strdup(provider_name);
2400 program-> name = av_strdup( name);
2404 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2406 AVChapter *chapter = NULL;
2409 for(i=0; i<s->nb_chapters; i++)
2410 if(s->chapters[i]->id == id)
2411 chapter = s->chapters[i];
2414 chapter= av_mallocz(sizeof(AVChapter));
2417 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2419 av_free(chapter->title);
2420 chapter->title = av_strdup(title);
2422 chapter->time_base= time_base;
2423 chapter->start = start;
2429 /************************************************************/
2430 /* output media file */
2432 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2436 if (s->oformat->priv_data_size > 0) {
2437 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2439 return AVERROR(ENOMEM);
2441 s->priv_data = NULL;
2443 if (s->oformat->set_parameters) {
2444 ret = s->oformat->set_parameters(s, ap);
2451 int av_write_header(AVFormatContext *s)
2456 // some sanity checks
2457 for(i=0;i<s->nb_streams;i++) {
2460 switch (st->codec->codec_type) {
2461 case CODEC_TYPE_AUDIO:
2462 if(st->codec->sample_rate<=0){
2463 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2466 if(!st->codec->block_align)
2467 st->codec->block_align = st->codec->channels *
2468 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2470 case CODEC_TYPE_VIDEO:
2471 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2472 av_log(s, AV_LOG_ERROR, "time base not set\n");
2475 if(st->codec->width<=0 || st->codec->height<=0){
2476 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2479 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2480 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2486 if(s->oformat->codec_tag){
2487 if(st->codec->codec_tag){
2489 //check that tag + id is in the table
2490 //if neither is in the table -> OK
2491 //if tag is in the table with another id -> FAIL
2492 //if id is in the table with another tag -> FAIL unless strict < ?
2494 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2498 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2499 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2501 return AVERROR(ENOMEM);
2504 if(s->oformat->write_header){
2505 ret = s->oformat->write_header(s);
2510 /* init PTS generation */
2511 for(i=0;i<s->nb_streams;i++) {
2512 int64_t den = AV_NOPTS_VALUE;
2515 switch (st->codec->codec_type) {
2516 case CODEC_TYPE_AUDIO:
2517 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2519 case CODEC_TYPE_VIDEO:
2520 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2525 if (den != AV_NOPTS_VALUE) {
2527 return AVERROR_INVALIDDATA;
2528 av_frac_init(&st->pts, 0, 0, den);
2534 //FIXME merge with compute_pkt_fields
2535 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2536 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2537 int num, den, frame_size, i;
2539 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2541 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2544 /* duration field */
2545 if (pkt->duration == 0) {
2546 compute_frame_duration(&num, &den, st, NULL, pkt);
2548 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2552 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2555 //XXX/FIXME this is a temporary hack until all encoders output pts
2556 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2558 // pkt->pts= st->cur_dts;
2559 pkt->pts= st->pts.val;
2562 //calculate dts from pts
2563 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2564 st->pts_buffer[0]= pkt->pts;
2565 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2566 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2567 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2568 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2570 pkt->dts= st->pts_buffer[0];
2573 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2574 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2577 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2578 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2582 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2583 st->cur_dts= pkt->dts;
2584 st->pts.val= pkt->dts;
2587 switch (st->codec->codec_type) {
2588 case CODEC_TYPE_AUDIO:
2589 frame_size = get_audio_frame_size(st->codec, pkt->size);
2591 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2592 likely equal to the encoder delay, but it would be better if we
2593 had the real timestamps from the encoder */
2594 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2595 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2598 case CODEC_TYPE_VIDEO:
2599 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2607 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2609 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2611 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2614 ret= s->oformat->write_packet(s, pkt);
2616 ret= url_ferror(s->pb);
2620 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2621 AVPacketList *pktl, **next_point, *this_pktl;
2623 int streams[MAX_STREAMS];
2626 AVStream *st= s->streams[ pkt->stream_index];
2628 // assert(pkt->destruct != av_destruct_packet); //FIXME
2630 this_pktl = av_mallocz(sizeof(AVPacketList));
2631 this_pktl->pkt= *pkt;
2632 if(pkt->destruct == av_destruct_packet)
2633 pkt->destruct= NULL; // not shared -> must keep original from being freed
2635 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2637 next_point = &s->packet_buffer;
2639 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2640 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2641 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2642 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2644 next_point= &(*next_point)->next;
2646 this_pktl->next= *next_point;
2647 *next_point= this_pktl;
2650 memset(streams, 0, sizeof(streams));
2651 pktl= s->packet_buffer;
2653 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2654 if(streams[ pktl->pkt.stream_index ] == 0)
2656 streams[ pktl->pkt.stream_index ]++;
2660 if(stream_count && (s->nb_streams == stream_count || flush)){
2661 pktl= s->packet_buffer;
2664 s->packet_buffer= pktl->next;
2668 av_init_packet(out);
2674 * Interleaves an AVPacket correctly so it can be muxed.
2675 * @param out the interleaved packet will be output here
2676 * @param in the input packet
2677 * @param flush 1 if no further packets are available as input and all
2678 * remaining packets should be output
2679 * @return 1 if a packet was output, 0 if no packet could be output,
2680 * < 0 if an error occurred
2682 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2683 if(s->oformat->interleave_packet)
2684 return s->oformat->interleave_packet(s, out, in, flush);
2686 return av_interleave_packet_per_dts(s, out, in, flush);
2689 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2690 AVStream *st= s->streams[ pkt->stream_index];
2692 //FIXME/XXX/HACK drop zero sized packets
2693 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2696 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2697 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2700 if(pkt->dts == AV_NOPTS_VALUE)
2705 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2706 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2709 ret= s->oformat->write_packet(s, &opkt);
2711 av_free_packet(&opkt);
2716 if(url_ferror(s->pb))
2717 return url_ferror(s->pb);
2721 int av_write_trailer(AVFormatContext *s)
2727 ret= av_interleave_packet(s, &pkt, NULL, 1);
2728 if(ret<0) //FIXME cleanup needed for ret<0 ?
2733 ret= s->oformat->write_packet(s, &pkt);
2735 av_free_packet(&pkt);
2739 if(url_ferror(s->pb))
2743 if(s->oformat->write_trailer)
2744 ret = s->oformat->write_trailer(s);
2747 ret=url_ferror(s->pb);
2748 for(i=0;i<s->nb_streams;i++)
2749 av_freep(&s->streams[i]->priv_data);
2750 av_freep(&s->priv_data);
2754 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2757 AVProgram *program=NULL;
2760 for(i=0; i<ac->nb_programs; i++){
2761 if(ac->programs[i]->id != progid)
2763 program = ac->programs[i];
2764 for(j=0; j<program->nb_stream_indexes; j++)
2765 if(program->stream_index[j] == idx)
2768 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2771 program->stream_index = tmp;
2772 program->stream_index[program->nb_stream_indexes++] = idx;
2777 /* "user interface" functions */
2778 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2781 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2782 AVStream *st = ic->streams[i];
2783 int g = ff_gcd(st->time_base.num, st->time_base.den);
2784 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2785 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2786 /* the pid is an important information, so we display it */
2787 /* XXX: add a generic system */
2788 if (flags & AVFMT_SHOW_IDS)
2789 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2790 if (strlen(st->language) > 0)
2791 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2792 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2793 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2794 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2795 if(st->r_frame_rate.den && st->r_frame_rate.num)
2796 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2797 /* else if(st->time_base.den && st->time_base.num)
2798 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2800 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2802 av_log(NULL, AV_LOG_INFO, "\n");
2805 void dump_format(AVFormatContext *ic,
2812 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2813 is_output ? "Output" : "Input",
2815 is_output ? ic->oformat->name : ic->iformat->name,
2816 is_output ? "to" : "from", url);
2818 av_log(NULL, AV_LOG_INFO, " Duration: ");
2819 if (ic->duration != AV_NOPTS_VALUE) {
2820 int hours, mins, secs, us;
2821 secs = ic->duration / AV_TIME_BASE;
2822 us = ic->duration % AV_TIME_BASE;
2827 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2828 (100 * us) / AV_TIME_BASE);
2830 av_log(NULL, AV_LOG_INFO, "N/A");
2832 if (ic->start_time != AV_NOPTS_VALUE) {
2834 av_log(NULL, AV_LOG_INFO, ", start: ");
2835 secs = ic->start_time / AV_TIME_BASE;
2836 us = ic->start_time % AV_TIME_BASE;
2837 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2838 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2840 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2842 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2844 av_log(NULL, AV_LOG_INFO, "N/A");
2846 av_log(NULL, AV_LOG_INFO, "\n");
2848 if(ic->nb_programs) {
2850 for(j=0; j<ic->nb_programs; j++) {
2851 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2852 ic->programs[j]->name ? ic->programs[j]->name : "");
2853 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2854 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2857 for(i=0;i<ic->nb_streams;i++)
2858 dump_stream_format(ic, i, index, is_output);
2861 #if LIBAVFORMAT_VERSION_MAJOR < 53
2862 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2864 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2867 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2869 AVRational frame_rate;
2870 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2871 *frame_rate_num= frame_rate.num;
2872 *frame_rate_den= frame_rate.den;
2877 int64_t av_gettime(void)
2880 gettimeofday(&tv,NULL);
2881 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2884 int64_t parse_date(const char *datestr, int duration)
2890 static const char * const date_fmt[] = {
2894 static const char * const time_fmt[] = {
2904 time_t now = time(0);
2906 len = strlen(datestr);
2908 lastch = datestr[len - 1];
2911 is_utc = (lastch == 'z' || lastch == 'Z');
2913 memset(&dt, 0, sizeof(dt));
2918 /* parse the year-month-day part */
2919 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2920 q = small_strptime(p, date_fmt[i], &dt);
2926 /* if the year-month-day part is missing, then take the
2927 * current year-month-day time */
2932 dt = *localtime(&now);
2934 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2939 if (*p == 'T' || *p == 't' || *p == ' ')
2942 /* parse the hour-minute-second part */
2943 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2944 q = small_strptime(p, time_fmt[i], &dt);
2950 /* parse datestr as a duration */
2955 /* parse datestr as HH:MM:SS */
2956 q = small_strptime(p, time_fmt[0], &dt);
2958 /* parse datestr as S+ */
2959 dt.tm_sec = strtol(p, (char **)&q, 10);
2961 /* the parsing didn't succeed */
2968 /* Now we have all the fields that we can get */
2974 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2976 dt.tm_isdst = -1; /* unknown */
2986 /* parse the .m... part */
2990 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2993 val += n * (*q - '0');
2997 return negative ? -t : t;
3000 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3010 while (*p != '\0' && *p != '=' && *p != '&') {
3011 if ((q - tag) < sizeof(tag) - 1)
3019 while (*p != '&' && *p != '\0') {
3020 if ((q - arg) < arg_size - 1) {
3030 if (!strcmp(tag, tag1))
3039 int av_get_frame_filename(char *buf, int buf_size,
3040 const char *path, int number)
3043 char *q, buf1[20], c;
3044 int nd, len, percentd_found;
3056 while (isdigit(*p)) {
3057 nd = nd * 10 + *p++ - '0';
3060 } while (isdigit(c));
3069 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3071 if ((q - buf + len) > buf_size - 1)
3073 memcpy(q, buf1, len);
3081 if ((q - buf) < buf_size - 1)
3085 if (!percentd_found)
3094 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3097 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3099 for(i=0;i<size;i+=16) {
3106 PRINT(" %02x", buf[i+j]);
3111 for(j=0;j<len;j++) {
3113 if (c < ' ' || c > '~')
3122 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3124 hex_dump_internal(NULL, f, 0, buf, size);
3127 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3129 hex_dump_internal(avcl, NULL, level, buf, size);
3132 //FIXME needs to know the time_base
3133 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3135 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3136 PRINT("stream #%d:\n", pkt->stream_index);
3137 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3138 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3139 /* DTS is _always_ valid after av_read_frame() */
3141 if (pkt->dts == AV_NOPTS_VALUE)
3144 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3145 /* PTS may not be known if B-frames are present. */
3147 if (pkt->pts == AV_NOPTS_VALUE)
3150 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3152 PRINT(" size=%d\n", pkt->size);
3155 av_hex_dump(f, pkt->data, pkt->size);
3158 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3160 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3163 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3165 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3168 void url_split(char *proto, int proto_size,
3169 char *authorization, int authorization_size,
3170 char *hostname, int hostname_size,
3172 char *path, int path_size,
3175 const char *p, *ls, *at, *col, *brk;
3177 if (port_ptr) *port_ptr = -1;
3178 if (proto_size > 0) proto[0] = 0;
3179 if (authorization_size > 0) authorization[0] = 0;
3180 if (hostname_size > 0) hostname[0] = 0;
3181 if (path_size > 0) path[0] = 0;
3183 /* parse protocol */
3184 if ((p = strchr(url, ':'))) {
3185 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3190 /* no protocol means plain filename */
3191 av_strlcpy(path, url, path_size);
3195 /* separate path from hostname */
3196 ls = strchr(p, '/');
3198 ls = strchr(p, '?');
3200 av_strlcpy(path, ls, path_size);
3202 ls = &p[strlen(p)]; // XXX
3204 /* the rest is hostname, use that to parse auth/port */
3206 /* authorization (user[:pass]@hostname) */
3207 if ((at = strchr(p, '@')) && at < ls) {
3208 av_strlcpy(authorization, p,
3209 FFMIN(authorization_size, at + 1 - p));
3210 p = at + 1; /* skip '@' */
3213 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3215 av_strlcpy(hostname, p + 1,
3216 FFMIN(hostname_size, brk - p));
3217 if (brk[1] == ':' && port_ptr)
3218 *port_ptr = atoi(brk + 2);
3219 } else if ((col = strchr(p, ':')) && col < ls) {
3220 av_strlcpy(hostname, p,
3221 FFMIN(col + 1 - p, hostname_size));
3222 if (port_ptr) *port_ptr = atoi(col + 1);
3224 av_strlcpy(hostname, p,
3225 FFMIN(ls + 1 - p, hostname_size));
3229 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3232 static const char hex_table[16] = { '0', '1', '2', '3',
3235 'C', 'D', 'E', 'F' };
3237 for(i = 0; i < s; i++) {
3238 buff[i * 2] = hex_table[src[i] >> 4];
3239 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3245 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3246 int pts_num, int pts_den)
3248 unsigned int gcd= ff_gcd(pts_num, pts_den);
3249 s->pts_wrap_bits = pts_wrap_bits;
3250 s->time_base.num = pts_num/gcd;
3251 s->time_base.den = pts_den/gcd;
3254 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);