2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
87 } else if (num >= den) {
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
115 while (*p != NULL) p = &(*p)->next;
120 void av_register_output_format(AVOutputFormat *format)
124 while (*p != NULL) p = &(*p)->next;
129 int match_ext(const char *filename, const char *extensions)
137 ext = strrchr(filename, '.');
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
146 if (!strcasecmp(ext1, ext))
156 static int match_format(const char *name, const char *names)
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
188 /* Find the proper file type. */
192 while (fmt != NULL) {
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
202 if (score > score_max) {
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
259 /* memory handling */
262 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
264 int ret= av_new_packet(pkt, size);
269 pkt->pos= url_ftell(s);
271 ret= get_buffer(s, pkt->data, size);
275 av_shrink_packet(pkt, ret);
281 int av_filename_number_test(const char *filename)
284 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
287 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
289 AVInputFormat *fmt1, *fmt;
293 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
294 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
297 if (fmt1->read_probe) {
298 score = fmt1->read_probe(pd);
299 } else if (fmt1->extensions) {
300 if (match_ext(pd->filename, fmt1->extensions)) {
304 if (score > *score_max) {
307 }else if (score == *score_max)
313 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
315 return av_probe_input_format2(pd, is_opened, &score);
318 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
321 fmt = av_probe_input_format2(pd, 1, &score);
324 if (!strcmp(fmt->name, "mp3")) {
325 st->codec->codec_id = CODEC_ID_MP3;
326 st->codec->codec_type = CODEC_TYPE_AUDIO;
327 } else if (!strcmp(fmt->name, "ac3")) {
328 st->codec->codec_id = CODEC_ID_AC3;
329 st->codec->codec_type = CODEC_TYPE_AUDIO;
330 } else if (!strcmp(fmt->name, "mpegvideo")) {
331 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
332 st->codec->codec_type = CODEC_TYPE_VIDEO;
333 } else if (!strcmp(fmt->name, "m4v")) {
334 st->codec->codec_id = CODEC_ID_MPEG4;
335 st->codec->codec_type = CODEC_TYPE_VIDEO;
336 } else if (!strcmp(fmt->name, "h264")) {
337 st->codec->codec_id = CODEC_ID_H264;
338 st->codec->codec_type = CODEC_TYPE_VIDEO;
344 /************************************************************/
345 /* input media file */
348 * Open a media file from an IO stream. 'fmt' must be specified.
350 int av_open_input_stream(AVFormatContext **ic_ptr,
351 ByteIOContext *pb, const char *filename,
352 AVInputFormat *fmt, AVFormatParameters *ap)
356 AVFormatParameters default_ap;
360 memset(ap, 0, sizeof(default_ap));
363 if(!ap->prealloced_context)
364 ic = avformat_alloc_context();
368 err = AVERROR(ENOMEM);
373 ic->duration = AV_NOPTS_VALUE;
374 ic->start_time = AV_NOPTS_VALUE;
375 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
377 /* allocate private data */
378 if (fmt->priv_data_size > 0) {
379 ic->priv_data = av_mallocz(fmt->priv_data_size);
380 if (!ic->priv_data) {
381 err = AVERROR(ENOMEM);
385 ic->priv_data = NULL;
388 if (ic->iformat->read_header) {
389 err = ic->iformat->read_header(ic, ap);
394 if (pb && !ic->data_offset)
395 ic->data_offset = url_ftell(ic->pb);
397 #if LIBAVFORMAT_VERSION_MAJOR < 53
398 ff_metadata_demux_compat(ic);
406 av_freep(&ic->priv_data);
407 for(i=0;i<ic->nb_streams;i++) {
408 AVStream *st = ic->streams[i];
410 av_free(st->priv_data);
411 av_free(st->codec->extradata);
421 /** size of probe buffer, for guessing file type from file contents */
422 #define PROBE_BUF_MIN 2048
423 #define PROBE_BUF_MAX (1<<20)
425 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
428 AVFormatParameters *ap)
431 AVProbeData probe_data, *pd = &probe_data;
432 ByteIOContext *pb = NULL;
436 pd->filename = filename;
441 /* guess format if no file can be opened */
442 fmt = av_probe_input_format(pd, 0);
445 /* Do not open file if the format does not need it. XXX: specific
446 hack needed to handle RTSP/TCP */
447 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
448 /* if no file needed do not try to open one */
449 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
453 url_setbufsize(pb, buf_size);
456 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
457 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
458 /* read probe data */
459 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
460 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
461 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
462 if (url_fseek(pb, 0, SEEK_SET) < 0) {
464 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
470 /* guess file format */
471 fmt = av_probe_input_format2(pd, 1, &score);
476 /* if still no format found, error */
482 /* check filename in case an image number is expected */
483 if (fmt->flags & AVFMT_NEEDNUMBER) {
484 if (!av_filename_number_test(filename)) {
485 err = AVERROR_NUMEXPECTED;
489 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
502 /*******************************************************/
504 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
505 AVPacketList **plast_pktl){
506 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
511 (*plast_pktl)->next = pktl;
513 *packet_buffer = pktl;
515 /* add the packet in the buffered packet list */
521 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
527 AVPacketList *pktl = s->raw_packet_buffer;
531 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
532 s->raw_packet_buffer = pktl->next;
539 ret= s->iformat->read_packet(s, pkt);
542 st= s->streams[pkt->stream_index];
544 switch(st->codec->codec_type){
545 case CODEC_TYPE_VIDEO:
546 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
548 case CODEC_TYPE_AUDIO:
549 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
551 case CODEC_TYPE_SUBTITLE:
552 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
556 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
559 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
561 if(st->codec->codec_id == CODEC_ID_PROBE){
562 AVProbeData *pd = &st->probe_data;
564 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
565 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
566 pd->buf_size += pkt->size;
567 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
569 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
570 set_codec_from_probe_data(st, pd, 1);
571 if(st->codec->codec_id != CODEC_ID_PROBE){
580 /**********************************************************/
583 * Get the number of samples of an audio frame. Return -1 on error.
585 static int get_audio_frame_size(AVCodecContext *enc, int size)
589 if(enc->codec_id == CODEC_ID_VORBIS)
592 if (enc->frame_size <= 1) {
593 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
595 if (bits_per_sample) {
596 if (enc->channels == 0)
598 frame_size = (size << 3) / (bits_per_sample * enc->channels);
600 /* used for example by ADPCM codecs */
601 if (enc->bit_rate == 0)
603 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
606 frame_size = enc->frame_size;
613 * Return the frame duration in seconds. Return 0 if not available.
615 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
616 AVCodecParserContext *pc, AVPacket *pkt)
622 switch(st->codec->codec_type) {
623 case CODEC_TYPE_VIDEO:
624 if(st->time_base.num*1000LL > st->time_base.den){
625 *pnum = st->time_base.num;
626 *pden = st->time_base.den;
627 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
628 *pnum = st->codec->time_base.num;
629 *pden = st->codec->time_base.den;
630 if (pc && pc->repeat_pict) {
631 *pnum = (*pnum) * (1 + pc->repeat_pict);
635 case CODEC_TYPE_AUDIO:
636 frame_size = get_audio_frame_size(st->codec, pkt->size);
640 *pden = st->codec->sample_rate;
647 static int is_intra_only(AVCodecContext *enc){
648 if(enc->codec_type == CODEC_TYPE_AUDIO){
650 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
651 switch(enc->codec_id){
653 case CODEC_ID_MJPEGB:
655 case CODEC_ID_RAWVIDEO:
656 case CODEC_ID_DVVIDEO:
657 case CODEC_ID_HUFFYUV:
658 case CODEC_ID_FFVHUFF:
663 case CODEC_ID_JPEG2000:
671 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
672 int64_t dts, int64_t pts)
674 AVStream *st= s->streams[stream_index];
675 AVPacketList *pktl= s->packet_buffer;
677 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
680 st->first_dts= dts - st->cur_dts;
683 for(; pktl; pktl= pktl->next){
684 if(pktl->pkt.stream_index != stream_index)
686 //FIXME think more about this check
687 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
688 pktl->pkt.pts += st->first_dts;
690 if(pktl->pkt.dts != AV_NOPTS_VALUE)
691 pktl->pkt.dts += st->first_dts;
693 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
694 st->start_time= pktl->pkt.pts;
696 if (st->start_time == AV_NOPTS_VALUE)
697 st->start_time = pts;
700 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
702 AVPacketList *pktl= s->packet_buffer;
705 if(st->first_dts != AV_NOPTS_VALUE){
706 cur_dts= st->first_dts;
707 for(; pktl; pktl= pktl->next){
708 if(pktl->pkt.stream_index == pkt->stream_index){
709 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
711 cur_dts -= pkt->duration;
714 pktl= s->packet_buffer;
715 st->first_dts = cur_dts;
716 }else if(st->cur_dts)
719 for(; pktl; pktl= pktl->next){
720 if(pktl->pkt.stream_index != pkt->stream_index)
722 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
723 && !pktl->pkt.duration){
724 pktl->pkt.dts= cur_dts;
725 if(!st->codec->has_b_frames)
726 pktl->pkt.pts= cur_dts;
727 cur_dts += pkt->duration;
728 pktl->pkt.duration= pkt->duration;
732 if(st->first_dts == AV_NOPTS_VALUE)
733 st->cur_dts= cur_dts;
736 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
737 AVCodecParserContext *pc, AVPacket *pkt)
739 int num, den, presentation_delayed, delay, i;
742 /* do we have a video B-frame ? */
743 delay= st->codec->has_b_frames;
744 presentation_delayed = 0;
745 /* XXX: need has_b_frame, but cannot get it if the codec is
748 pc && pc->pict_type != FF_B_TYPE)
749 presentation_delayed = 1;
751 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
752 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
753 pkt->dts -= 1LL<<st->pts_wrap_bits;
756 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
757 // we take the conservative approach and discard both
758 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
759 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
760 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
761 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
764 if (pkt->duration == 0) {
765 compute_frame_duration(&num, &den, st, pc, pkt);
767 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
769 if(pkt->duration != 0 && s->packet_buffer)
770 update_initial_durations(s, st, pkt);
774 /* correct timestamps with byte offset if demuxers only have timestamps
775 on packet boundaries */
776 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
777 /* this will estimate bitrate based on this frame's duration and size */
778 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
779 if(pkt->pts != AV_NOPTS_VALUE)
781 if(pkt->dts != AV_NOPTS_VALUE)
785 if (pc && pc->dts_sync_point >= 0) {
786 // we have synchronization info from the parser
787 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
789 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
790 if (pkt->dts != AV_NOPTS_VALUE) {
791 // got DTS from the stream, update reference timestamp
792 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
793 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
794 } else if (st->reference_dts != AV_NOPTS_VALUE) {
795 // compute DTS based on reference timestamp
796 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
797 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
799 if (pc->dts_sync_point > 0)
800 st->reference_dts = pkt->dts; // new reference
804 /* This may be redundant, but it should not hurt. */
805 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
806 presentation_delayed = 1;
808 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
809 /* interpolate PTS and DTS if they are not present */
810 //We skip H264 currently because delay and has_b_frames are not reliably set
811 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
812 if (presentation_delayed) {
813 /* DTS = decompression timestamp */
814 /* PTS = presentation timestamp */
815 if (pkt->dts == AV_NOPTS_VALUE)
816 pkt->dts = st->last_IP_pts;
817 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
818 if (pkt->dts == AV_NOPTS_VALUE)
819 pkt->dts = st->cur_dts;
821 /* this is tricky: the dts must be incremented by the duration
822 of the frame we are displaying, i.e. the last I- or P-frame */
823 if (st->last_IP_duration == 0)
824 st->last_IP_duration = pkt->duration;
825 if(pkt->dts != AV_NOPTS_VALUE)
826 st->cur_dts = pkt->dts + st->last_IP_duration;
827 st->last_IP_duration = pkt->duration;
828 st->last_IP_pts= pkt->pts;
829 /* cannot compute PTS if not present (we can compute it only
830 by knowing the future */
831 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
832 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
833 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
834 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
835 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
836 pkt->pts += pkt->duration;
837 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
841 /* presentation is not delayed : PTS and DTS are the same */
842 if(pkt->pts == AV_NOPTS_VALUE)
844 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
845 if(pkt->pts == AV_NOPTS_VALUE)
846 pkt->pts = st->cur_dts;
848 if(pkt->pts != AV_NOPTS_VALUE)
849 st->cur_dts = pkt->pts + pkt->duration;
853 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
854 st->pts_buffer[0]= pkt->pts;
855 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
856 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
857 if(pkt->dts == AV_NOPTS_VALUE)
858 pkt->dts= st->pts_buffer[0];
859 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
860 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
862 if(pkt->dts > st->cur_dts)
863 st->cur_dts = pkt->dts;
866 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
869 if(is_intra_only(st->codec))
870 pkt->flags |= PKT_FLAG_KEY;
873 /* keyframe computation */
874 if (pc->key_frame == 1)
875 pkt->flags |= PKT_FLAG_KEY;
876 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
877 pkt->flags |= PKT_FLAG_KEY;
880 pkt->convergence_duration = pc->convergence_duration;
884 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
892 /* select current input stream component */
895 if (!st->need_parsing || !st->parser) {
896 /* no parsing needed: we just output the packet as is */
897 /* raw data support */
898 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
899 compute_pkt_fields(s, st, NULL, pkt);
901 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
902 (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
903 ff_reduce_index(s, st->index);
904 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
907 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
908 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
909 st->cur_ptr, st->cur_len,
910 st->cur_pkt.pts, st->cur_pkt.dts,
912 st->cur_pkt.pts = AV_NOPTS_VALUE;
913 st->cur_pkt.dts = AV_NOPTS_VALUE;
914 /* increment read pointer */
918 /* return packet if any */
922 pkt->stream_index = st->index;
923 pkt->pts = st->parser->pts;
924 pkt->dts = st->parser->dts;
925 pkt->pos = st->parser->pos;
926 pkt->destruct = NULL;
927 compute_pkt_fields(s, st, st->parser, pkt);
929 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
930 ff_reduce_index(s, st->index);
931 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
932 0, 0, AVINDEX_KEYFRAME);
939 av_free_packet(&st->cur_pkt);
944 /* read next packet */
945 ret = av_read_packet(s, &cur_pkt);
947 if (ret == AVERROR(EAGAIN))
949 /* return the last frames, if any */
950 for(i = 0; i < s->nb_streams; i++) {
952 if (st->parser && st->need_parsing) {
953 av_parser_parse2(st->parser, st->codec,
954 &pkt->data, &pkt->size,
956 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
962 /* no more packets: really terminate parsing */
965 st = s->streams[cur_pkt.stream_index];
966 st->cur_pkt= cur_pkt;
968 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
969 st->cur_pkt.dts != AV_NOPTS_VALUE &&
970 st->cur_pkt.pts < st->cur_pkt.dts){
971 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
972 st->cur_pkt.stream_index,
976 // av_free_packet(&st->cur_pkt);
980 if(s->debug & FF_FDEBUG_TS)
981 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
982 st->cur_pkt.stream_index,
989 st->cur_ptr = st->cur_pkt.data;
990 st->cur_len = st->cur_pkt.size;
991 if (st->need_parsing && !st->parser) {
992 st->parser = av_parser_init(st->codec->codec_id);
994 /* no parser available: just output the raw packets */
995 st->need_parsing = AVSTREAM_PARSE_NONE;
996 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
997 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
999 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1000 st->parser->next_frame_offset=
1001 st->parser->cur_offset= st->cur_pkt.pos;
1006 if(s->debug & FF_FDEBUG_TS)
1007 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1017 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1021 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1024 pktl = s->packet_buffer;
1026 AVPacket *next_pkt= &pktl->pkt;
1028 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1029 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1030 if( pktl->pkt.stream_index == next_pkt->stream_index
1031 && next_pkt->dts < pktl->pkt.dts
1032 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1033 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1034 next_pkt->pts= pktl->pkt.dts;
1038 pktl = s->packet_buffer;
1041 if( next_pkt->pts != AV_NOPTS_VALUE
1042 || next_pkt->dts == AV_NOPTS_VALUE
1044 /* read packet from packet buffer, if there is data */
1046 s->packet_buffer = pktl->next;
1052 int ret= av_read_frame_internal(s, pkt);
1054 if(pktl && ret != AVERROR(EAGAIN)){
1061 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1062 &s->packet_buffer_end)) < 0)
1063 return AVERROR(ENOMEM);
1065 assert(!s->packet_buffer);
1066 return av_read_frame_internal(s, pkt);
1071 /* XXX: suppress the packet queue */
1072 static void flush_packet_queue(AVFormatContext *s)
1077 pktl = s->packet_buffer;
1080 s->packet_buffer = pktl->next;
1081 av_free_packet(&pktl->pkt);
1086 /*******************************************************/
1089 int av_find_default_stream_index(AVFormatContext *s)
1091 int first_audio_index = -1;
1095 if (s->nb_streams <= 0)
1097 for(i = 0; i < s->nb_streams; i++) {
1099 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1102 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1103 first_audio_index = i;
1105 return first_audio_index >= 0 ? first_audio_index : 0;
1109 * Flush the frame reader.
1111 static void av_read_frame_flush(AVFormatContext *s)
1116 flush_packet_queue(s);
1120 /* for each stream, reset read state */
1121 for(i = 0; i < s->nb_streams; i++) {
1125 av_parser_close(st->parser);
1127 av_free_packet(&st->cur_pkt);
1129 st->last_IP_pts = AV_NOPTS_VALUE;
1130 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1131 st->reference_dts = AV_NOPTS_VALUE;
1138 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1141 for(i = 0; i < s->nb_streams; i++) {
1142 AVStream *st = s->streams[i];
1144 st->cur_dts = av_rescale(timestamp,
1145 st->time_base.den * (int64_t)ref_st->time_base.num,
1146 st->time_base.num * (int64_t)ref_st->time_base.den);
1150 void ff_reduce_index(AVFormatContext *s, int stream_index)
1152 AVStream *st= s->streams[stream_index];
1153 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1155 if((unsigned)st->nb_index_entries >= max_entries){
1157 for(i=0; 2*i<st->nb_index_entries; i++)
1158 st->index_entries[i]= st->index_entries[2*i];
1159 st->nb_index_entries= i;
1163 int av_add_index_entry(AVStream *st,
1164 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1166 AVIndexEntry *entries, *ie;
1169 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1172 entries = av_fast_realloc(st->index_entries,
1173 &st->index_entries_allocated_size,
1174 (st->nb_index_entries + 1) *
1175 sizeof(AVIndexEntry));
1179 st->index_entries= entries;
1181 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1184 index= st->nb_index_entries++;
1185 ie= &entries[index];
1186 assert(index==0 || ie[-1].timestamp < timestamp);
1188 ie= &entries[index];
1189 if(ie->timestamp != timestamp){
1190 if(ie->timestamp <= timestamp)
1192 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1193 st->nb_index_entries++;
1194 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1195 distance= ie->min_distance;
1199 ie->timestamp = timestamp;
1200 ie->min_distance= distance;
1207 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1210 AVIndexEntry *entries= st->index_entries;
1211 int nb_entries= st->nb_index_entries;
1220 timestamp = entries[m].timestamp;
1221 if(timestamp >= wanted_timestamp)
1223 if(timestamp <= wanted_timestamp)
1226 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1228 if(!(flags & AVSEEK_FLAG_ANY)){
1229 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1230 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1241 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1242 AVInputFormat *avif= s->iformat;
1243 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1244 int64_t ts_min, ts_max, ts;
1248 if (stream_index < 0)
1252 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1256 ts_min= AV_NOPTS_VALUE;
1257 pos_limit= -1; //gcc falsely says it may be uninitialized
1259 st= s->streams[stream_index];
1260 if(st->index_entries){
1263 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1264 index= FFMAX(index, 0);
1265 e= &st->index_entries[index];
1267 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1269 ts_min= e->timestamp;
1271 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1278 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1279 assert(index < st->nb_index_entries);
1281 e= &st->index_entries[index];
1282 assert(e->timestamp >= target_ts);
1284 ts_max= e->timestamp;
1285 pos_limit= pos_max - e->min_distance;
1287 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1288 pos_max,pos_limit, ts_max);
1293 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1298 url_fseek(s->pb, pos, SEEK_SET);
1300 av_update_cur_dts(s, st, ts);
1305 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1307 int64_t start_pos, filesize;
1311 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1314 if(ts_min == AV_NOPTS_VALUE){
1315 pos_min = s->data_offset;
1316 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1317 if (ts_min == AV_NOPTS_VALUE)
1321 if(ts_max == AV_NOPTS_VALUE){
1323 filesize = url_fsize(s->pb);
1324 pos_max = filesize - 1;
1327 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1329 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1330 if (ts_max == AV_NOPTS_VALUE)
1334 int64_t tmp_pos= pos_max + 1;
1335 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1336 if(tmp_ts == AV_NOPTS_VALUE)
1340 if(tmp_pos >= filesize)
1346 if(ts_min > ts_max){
1348 }else if(ts_min == ts_max){
1353 while (pos_min < pos_limit) {
1355 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1359 assert(pos_limit <= pos_max);
1362 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1363 // interpolate position (better than dichotomy)
1364 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1365 + pos_min - approximate_keyframe_distance;
1366 }else if(no_change==1){
1367 // bisection, if interpolation failed to change min or max pos last time
1368 pos = (pos_min + pos_limit)>>1;
1370 /* linear search if bisection failed, can only happen if there
1371 are very few or no keyframes between min/max */
1376 else if(pos > pos_limit)
1380 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1386 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1387 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1388 start_pos, no_change);
1390 if(ts == AV_NOPTS_VALUE){
1391 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1394 assert(ts != AV_NOPTS_VALUE);
1395 if (target_ts <= ts) {
1396 pos_limit = start_pos - 1;
1400 if (target_ts >= ts) {
1406 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1407 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1410 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1412 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1413 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1414 pos, ts_min, target_ts, ts_max);
1420 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1421 int64_t pos_min, pos_max;
1425 if (stream_index < 0)
1428 st= s->streams[stream_index];
1431 pos_min = s->data_offset;
1432 pos_max = url_fsize(s->pb) - 1;
1434 if (pos < pos_min) pos= pos_min;
1435 else if(pos > pos_max) pos= pos_max;
1437 url_fseek(s->pb, pos, SEEK_SET);
1440 av_update_cur_dts(s, st, ts);
1445 static int av_seek_frame_generic(AVFormatContext *s,
1446 int stream_index, int64_t timestamp, int flags)
1452 st = s->streams[stream_index];
1454 index = av_index_search_timestamp(st, timestamp, flags);
1456 if(index < 0 || index==st->nb_index_entries-1){
1460 if(st->nb_index_entries){
1461 assert(st->index_entries);
1462 ie= &st->index_entries[st->nb_index_entries-1];
1463 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1465 av_update_cur_dts(s, st, ie->timestamp);
1467 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1473 ret = av_read_frame(s, &pkt);
1474 }while(ret == AVERROR(EAGAIN));
1477 av_free_packet(&pkt);
1478 if(stream_index == pkt.stream_index){
1479 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1483 index = av_index_search_timestamp(st, timestamp, flags);
1488 av_read_frame_flush(s);
1489 if (s->iformat->read_seek){
1490 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1493 ie = &st->index_entries[index];
1494 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1496 av_update_cur_dts(s, st, ie->timestamp);
1501 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1506 av_read_frame_flush(s);
1508 if(flags & AVSEEK_FLAG_BYTE)
1509 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1511 if(stream_index < 0){
1512 stream_index= av_find_default_stream_index(s);
1513 if(stream_index < 0)
1516 st= s->streams[stream_index];
1517 /* timestamp for default must be expressed in AV_TIME_BASE units */
1518 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1521 /* first, we try the format specific seek */
1522 if (s->iformat->read_seek)
1523 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1530 if(s->iformat->read_timestamp)
1531 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1533 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1536 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1538 if(min_ts > ts || max_ts < ts)
1541 av_read_frame_flush(s);
1543 if (s->iformat->read_seek2)
1544 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1546 if(s->iformat->read_timestamp){
1547 //try to seek via read_timestamp()
1550 //Fallback to old API if new is not implemented but old is
1551 //Note the old has somewat different sematics
1552 if(s->iformat->read_seek || 1)
1553 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1555 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1558 /*******************************************************/
1561 * Returns TRUE if the stream has accurate duration in any stream.
1563 * @return TRUE if the stream has accurate duration for at least one component.
1565 static int av_has_duration(AVFormatContext *ic)
1570 for(i = 0;i < ic->nb_streams; i++) {
1571 st = ic->streams[i];
1572 if (st->duration != AV_NOPTS_VALUE)
1579 * Estimate the stream timings from the one of each components.
1581 * Also computes the global bitrate if possible.
1583 static void av_update_stream_timings(AVFormatContext *ic)
1585 int64_t start_time, start_time1, end_time, end_time1;
1586 int64_t duration, duration1;
1590 start_time = INT64_MAX;
1591 end_time = INT64_MIN;
1592 duration = INT64_MIN;
1593 for(i = 0;i < ic->nb_streams; i++) {
1594 st = ic->streams[i];
1595 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1596 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1597 if (start_time1 < start_time)
1598 start_time = start_time1;
1599 if (st->duration != AV_NOPTS_VALUE) {
1600 end_time1 = start_time1
1601 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1602 if (end_time1 > end_time)
1603 end_time = end_time1;
1606 if (st->duration != AV_NOPTS_VALUE) {
1607 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1608 if (duration1 > duration)
1609 duration = duration1;
1612 if (start_time != INT64_MAX) {
1613 ic->start_time = start_time;
1614 if (end_time != INT64_MIN) {
1615 if (end_time - start_time > duration)
1616 duration = end_time - start_time;
1619 if (duration != INT64_MIN) {
1620 ic->duration = duration;
1621 if (ic->file_size > 0) {
1622 /* compute the bitrate */
1623 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1624 (double)ic->duration;
1629 static void fill_all_stream_timings(AVFormatContext *ic)
1634 av_update_stream_timings(ic);
1635 for(i = 0;i < ic->nb_streams; i++) {
1636 st = ic->streams[i];
1637 if (st->start_time == AV_NOPTS_VALUE) {
1638 if(ic->start_time != AV_NOPTS_VALUE)
1639 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1640 if(ic->duration != AV_NOPTS_VALUE)
1641 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1646 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1648 int64_t filesize, duration;
1652 /* if bit_rate is already set, we believe it */
1653 if (ic->bit_rate == 0) {
1655 for(i=0;i<ic->nb_streams;i++) {
1656 st = ic->streams[i];
1657 bit_rate += st->codec->bit_rate;
1659 ic->bit_rate = bit_rate;
1662 /* if duration is already set, we believe it */
1663 if (ic->duration == AV_NOPTS_VALUE &&
1664 ic->bit_rate != 0 &&
1665 ic->file_size != 0) {
1666 filesize = ic->file_size;
1668 for(i = 0; i < ic->nb_streams; i++) {
1669 st = ic->streams[i];
1670 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1671 if (st->duration == AV_NOPTS_VALUE)
1672 st->duration = duration;
1678 #define DURATION_MAX_READ_SIZE 250000
1680 /* only usable for MPEG-PS streams */
1681 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1683 AVPacket pkt1, *pkt = &pkt1;
1685 int read_size, i, ret;
1687 int64_t filesize, offset, duration;
1691 /* flush packet queue */
1692 flush_packet_queue(ic);
1694 for(i=0;i<ic->nb_streams;i++) {
1695 st = ic->streams[i];
1697 av_parser_close(st->parser);
1699 av_free_packet(&st->cur_pkt);
1703 /* we read the first packets to get the first PTS (not fully
1704 accurate, but it is enough now) */
1705 url_fseek(ic->pb, 0, SEEK_SET);
1708 if (read_size >= DURATION_MAX_READ_SIZE)
1710 /* if all info is available, we can stop */
1711 for(i = 0;i < ic->nb_streams; i++) {
1712 st = ic->streams[i];
1713 if (st->start_time == AV_NOPTS_VALUE)
1716 if (i == ic->nb_streams)
1720 ret = av_read_packet(ic, pkt);
1721 }while(ret == AVERROR(EAGAIN));
1724 read_size += pkt->size;
1725 st = ic->streams[pkt->stream_index];
1726 if (pkt->pts != AV_NOPTS_VALUE) {
1727 if (st->start_time == AV_NOPTS_VALUE)
1728 st->start_time = pkt->pts;
1730 av_free_packet(pkt);
1733 /* estimate the end time (duration) */
1734 /* XXX: may need to support wrapping */
1735 filesize = ic->file_size;
1736 offset = filesize - DURATION_MAX_READ_SIZE;
1740 url_fseek(ic->pb, offset, SEEK_SET);
1743 if (read_size >= DURATION_MAX_READ_SIZE)
1747 ret = av_read_packet(ic, pkt);
1748 }while(ret == AVERROR(EAGAIN));
1751 read_size += pkt->size;
1752 st = ic->streams[pkt->stream_index];
1753 if (pkt->pts != AV_NOPTS_VALUE &&
1754 st->start_time != AV_NOPTS_VALUE) {
1755 end_time = pkt->pts;
1756 duration = end_time - st->start_time;
1758 if (st->duration == AV_NOPTS_VALUE ||
1759 st->duration < duration)
1760 st->duration = duration;
1763 av_free_packet(pkt);
1766 fill_all_stream_timings(ic);
1768 url_fseek(ic->pb, old_offset, SEEK_SET);
1769 for(i=0; i<ic->nb_streams; i++){
1771 st->cur_dts= st->first_dts;
1772 st->last_IP_pts = AV_NOPTS_VALUE;
1776 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1780 /* get the file size, if possible */
1781 if (ic->iformat->flags & AVFMT_NOFILE) {
1784 file_size = url_fsize(ic->pb);
1788 ic->file_size = file_size;
1790 if ((!strcmp(ic->iformat->name, "mpeg") ||
1791 !strcmp(ic->iformat->name, "mpegts")) &&
1792 file_size && !url_is_streamed(ic->pb)) {
1793 /* get accurate estimate from the PTSes */
1794 av_estimate_timings_from_pts(ic, old_offset);
1795 } else if (av_has_duration(ic)) {
1796 /* at least one component has timings - we use them for all
1798 fill_all_stream_timings(ic);
1800 /* less precise: use bitrate info */
1801 av_estimate_timings_from_bit_rate(ic);
1803 av_update_stream_timings(ic);
1809 for(i = 0;i < ic->nb_streams; i++) {
1810 st = ic->streams[i];
1811 printf("%d: start_time: %0.3f duration: %0.3f\n",
1812 i, (double)st->start_time / AV_TIME_BASE,
1813 (double)st->duration / AV_TIME_BASE);
1815 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1816 (double)ic->start_time / AV_TIME_BASE,
1817 (double)ic->duration / AV_TIME_BASE,
1818 ic->bit_rate / 1000);
1823 static int has_codec_parameters(AVCodecContext *enc)
1826 switch(enc->codec_type) {
1827 case CODEC_TYPE_AUDIO:
1828 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1829 if(!enc->frame_size &&
1830 (enc->codec_id == CODEC_ID_VORBIS ||
1831 enc->codec_id == CODEC_ID_AAC))
1834 case CODEC_TYPE_VIDEO:
1835 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1841 return enc->codec_id != CODEC_ID_NONE && val != 0;
1844 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
1848 int got_picture, data_size, ret=0;
1851 if(!st->codec->codec){
1852 codec = avcodec_find_decoder(st->codec->codec_id);
1855 ret = avcodec_open(st->codec, codec);
1860 if(!has_codec_parameters(st->codec)){
1861 switch(st->codec->codec_type) {
1862 case CODEC_TYPE_VIDEO:
1863 avcodec_get_frame_defaults(&picture);
1864 ret = avcodec_decode_video2(st->codec, &picture,
1865 &got_picture, avpkt);
1867 case CODEC_TYPE_AUDIO:
1868 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1869 samples = av_malloc(data_size);
1872 ret = avcodec_decode_audio3(st->codec, samples,
1884 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1886 while (tags->id != CODEC_ID_NONE) {
1894 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1897 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1898 if(tag == tags[i].tag)
1901 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1902 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1903 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1904 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1905 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1908 return CODEC_ID_NONE;
1911 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1914 for(i=0; tags && tags[i]; i++){
1915 int tag= codec_get_tag(tags[i], id);
1921 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1924 for(i=0; tags && tags[i]; i++){
1925 enum CodecID id= codec_get_id(tags[i], tag);
1926 if(id!=CODEC_ID_NONE) return id;
1928 return CODEC_ID_NONE;
1931 static void compute_chapters_end(AVFormatContext *s)
1935 for (i=0; i+1<s->nb_chapters; i++)
1936 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1937 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1938 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1939 s->chapters[i]->end = s->chapters[i+1]->start;
1942 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1943 assert(s->start_time != AV_NOPTS_VALUE);
1944 assert(s->duration > 0);
1945 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1947 s->chapters[i]->time_base);
1951 /* absolute maximum size we read until we abort */
1952 #define MAX_READ_SIZE 5000000
1954 #define MAX_STD_TIMEBASES (60*12+5)
1955 static int get_std_framerate(int i){
1956 if(i<60*12) return i*1001;
1957 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1961 * Is the time base unreliable.
1962 * This is a heuristic to balance between quick acceptance of the values in
1963 * the headers vs. some extra checks.
1964 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1965 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1966 * And there are "variable" fps files this needs to detect as well.
1968 static int tb_unreliable(AVCodecContext *c){
1969 if( c->time_base.den >= 101L*c->time_base.num
1970 || c->time_base.den < 5L*c->time_base.num
1971 /* || c->codec_tag == AV_RL32("DIVX")
1972 || c->codec_tag == AV_RL32("XVID")*/
1973 || c->codec_id == CODEC_ID_MPEG2VIDEO
1974 || c->codec_id == CODEC_ID_H264
1980 int av_find_stream_info(AVFormatContext *ic)
1982 int i, count, ret, read_size, j;
1984 AVPacket pkt1, *pkt;
1985 int64_t last_dts[MAX_STREAMS];
1986 int64_t duration_gcd[MAX_STREAMS]={0};
1987 int duration_count[MAX_STREAMS]={0};
1988 double (*duration_error)[MAX_STD_TIMEBASES];
1989 int64_t old_offset = url_ftell(ic->pb);
1990 int64_t codec_info_duration[MAX_STREAMS]={0};
1991 int codec_info_nb_frames[MAX_STREAMS]={0};
1993 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1994 if (!duration_error) return AVERROR(ENOMEM);
1996 for(i=0;i<ic->nb_streams;i++) {
1997 st = ic->streams[i];
1998 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1999 /* if(!st->time_base.num)
2001 if(!st->codec->time_base.num)
2002 st->codec->time_base= st->time_base;
2004 //only for the split stuff
2006 st->parser = av_parser_init(st->codec->codec_id);
2007 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2008 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2013 for(i=0;i<MAX_STREAMS;i++){
2014 last_dts[i]= AV_NOPTS_VALUE;
2020 if(url_interrupt_cb()){
2021 ret= AVERROR(EINTR);
2025 /* check if one codec still needs to be handled */
2026 for(i=0;i<ic->nb_streams;i++) {
2027 st = ic->streams[i];
2028 if (!has_codec_parameters(st->codec))
2030 /* variable fps and no guess at the real fps */
2031 if( tb_unreliable(st->codec)
2032 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2034 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2036 if(st->first_dts == AV_NOPTS_VALUE)
2039 if (i == ic->nb_streams) {
2040 /* NOTE: if the format has no header, then we need to read
2041 some packets to get most of the streams, so we cannot
2043 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2044 /* if we found the info for all the codecs, we can stop */
2049 /* we did not get all the codec info, but we read too much data */
2050 if (read_size >= MAX_READ_SIZE) {
2055 /* NOTE: a new stream can be added there if no header in file
2056 (AVFMTCTX_NOHEADER) */
2057 ret = av_read_frame_internal(ic, &pkt1);
2058 if(ret == AVERROR(EAGAIN))
2062 ret = -1; /* we could not have all the codec parameters before EOF */
2063 for(i=0;i<ic->nb_streams;i++) {
2064 st = ic->streams[i];
2065 if (!has_codec_parameters(st->codec)){
2067 avcodec_string(buf, sizeof(buf), st->codec, 0);
2068 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2076 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2077 if(av_dup_packet(pkt) < 0) {
2078 av_free(duration_error);
2079 return AVERROR(ENOMEM);
2082 read_size += pkt->size;
2084 st = ic->streams[pkt->stream_index];
2085 if(codec_info_nb_frames[st->index]>1) {
2086 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration)
2088 codec_info_duration[st->index] += pkt->duration;
2090 if (pkt->duration != 0)
2091 codec_info_nb_frames[st->index]++;
2094 int index= pkt->stream_index;
2095 int64_t last= last_dts[index];
2096 int64_t duration= pkt->dts - last;
2098 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2099 double dur= duration * av_q2d(st->time_base);
2101 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2102 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2103 if(duration_count[index] < 2)
2104 memset(duration_error[index], 0, sizeof(*duration_error));
2105 for(i=1; i<MAX_STD_TIMEBASES; i++){
2106 int framerate= get_std_framerate(i);
2107 int ticks= lrintf(dur*framerate/(1001*12));
2108 double error= dur - ticks*1001*12/(double)framerate;
2109 duration_error[index][i] += error*error;
2111 duration_count[index]++;
2112 // ignore the first 4 values, they might have some random jitter
2113 if (duration_count[index] > 3)
2114 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2116 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2117 last_dts[pkt->stream_index]= pkt->dts;
2119 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2120 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2122 st->codec->extradata_size= i;
2123 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2124 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2125 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2129 /* if still no information, we try to open the codec and to
2130 decompress the frame. We try to avoid that in most cases as
2131 it takes longer and uses more memory. For MPEG-4, we need to
2132 decompress for QuickTime. */
2133 if (!has_codec_parameters(st->codec) /*&&
2134 (st->codec->codec_id == CODEC_ID_FLV1 ||
2135 st->codec->codec_id == CODEC_ID_H264 ||
2136 st->codec->codec_id == CODEC_ID_H263 ||
2137 st->codec->codec_id == CODEC_ID_H261 ||
2138 st->codec->codec_id == CODEC_ID_VORBIS ||
2139 st->codec->codec_id == CODEC_ID_MJPEG ||
2140 st->codec->codec_id == CODEC_ID_PNG ||
2141 st->codec->codec_id == CODEC_ID_PAM ||
2142 st->codec->codec_id == CODEC_ID_PGM ||
2143 st->codec->codec_id == CODEC_ID_PGMYUV ||
2144 st->codec->codec_id == CODEC_ID_PBM ||
2145 st->codec->codec_id == CODEC_ID_PPM ||
2146 st->codec->codec_id == CODEC_ID_SHORTEN ||
2147 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2148 try_decode_frame(st, pkt);
2153 // close codecs which were opened in try_decode_frame()
2154 for(i=0;i<ic->nb_streams;i++) {
2155 st = ic->streams[i];
2156 if(st->codec->codec)
2157 avcodec_close(st->codec);
2159 for(i=0;i<ic->nb_streams;i++) {
2160 st = ic->streams[i];
2161 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2162 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2163 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2165 // the check for tb_unreliable() is not completely correct, since this is not about handling
2166 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2167 // ipmovie.c produces.
2168 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2169 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2170 if(duration_count[i]
2171 && tb_unreliable(st->codec) /*&&
2172 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2173 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2175 double best_error= 2*av_q2d(st->time_base);
2176 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2178 for(j=1; j<MAX_STD_TIMEBASES; j++){
2179 double error= duration_error[i][j] * get_std_framerate(j);
2180 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2181 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2182 if(error < best_error){
2184 num = get_std_framerate(j);
2187 // do not increase frame rate by more than 1 % in order to match a standard rate.
2188 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2189 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2192 if (!st->r_frame_rate.num){
2193 if( st->codec->time_base.den * (int64_t)st->time_base.num
2194 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2195 st->r_frame_rate.num = st->codec->time_base.den;
2196 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2198 st->r_frame_rate.num = st->time_base.den;
2199 st->r_frame_rate.den = st->time_base.num;
2202 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2203 if(!st->codec->bits_per_coded_sample)
2204 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2208 av_estimate_timings(ic, old_offset);
2210 compute_chapters_end(ic);
2213 /* correct DTS for B-frame streams with no timestamps */
2214 for(i=0;i<ic->nb_streams;i++) {
2215 st = ic->streams[i];
2216 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2218 ppktl = &ic->packet_buffer;
2220 if(ppkt1->stream_index != i)
2222 if(ppkt1->pkt->dts < 0)
2224 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2226 ppkt1->pkt->dts -= delta;
2231 st->cur_dts -= delta;
2237 av_free(duration_error);
2242 /*******************************************************/
2244 int av_read_play(AVFormatContext *s)
2246 if (s->iformat->read_play)
2247 return s->iformat->read_play(s);
2249 return av_url_read_fpause(s->pb, 0);
2250 return AVERROR(ENOSYS);
2253 int av_read_pause(AVFormatContext *s)
2255 if (s->iformat->read_pause)
2256 return s->iformat->read_pause(s);
2258 return av_url_read_fpause(s->pb, 1);
2259 return AVERROR(ENOSYS);
2262 void av_close_input_stream(AVFormatContext *s)
2267 if (s->iformat->read_close)
2268 s->iformat->read_close(s);
2269 for(i=0;i<s->nb_streams;i++) {
2270 /* free all data in a stream component */
2273 av_parser_close(st->parser);
2274 av_free_packet(&st->cur_pkt);
2276 av_metadata_free(&st->metadata);
2277 av_free(st->index_entries);
2278 av_free(st->codec->extradata);
2280 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2281 av_free(st->filename);
2283 av_free(st->priv_data);
2286 for(i=s->nb_programs-1; i>=0; i--) {
2287 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2288 av_freep(&s->programs[i]->provider_name);
2289 av_freep(&s->programs[i]->name);
2291 av_metadata_free(&s->programs[i]->metadata);
2292 av_freep(&s->programs[i]->stream_index);
2293 av_freep(&s->programs[i]);
2295 av_freep(&s->programs);
2296 flush_packet_queue(s);
2297 av_freep(&s->priv_data);
2298 while(s->nb_chapters--) {
2299 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2300 av_free(s->chapters[s->nb_chapters]->title);
2302 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2303 av_free(s->chapters[s->nb_chapters]);
2305 av_freep(&s->chapters);
2306 av_metadata_free(&s->metadata);
2310 void av_close_input_file(AVFormatContext *s)
2312 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2313 av_close_input_stream(s);
2318 AVStream *av_new_stream(AVFormatContext *s, int id)
2323 if (s->nb_streams >= MAX_STREAMS)
2326 st = av_mallocz(sizeof(AVStream));
2330 st->codec= avcodec_alloc_context();
2332 /* no default bitrate if decoding */
2333 st->codec->bit_rate = 0;
2335 st->index = s->nb_streams;
2337 st->start_time = AV_NOPTS_VALUE;
2338 st->duration = AV_NOPTS_VALUE;
2339 /* we set the current DTS to 0 so that formats without any timestamps
2340 but durations get some timestamps, formats with some unknown
2341 timestamps have their first few packets buffered and the
2342 timestamps corrected before they are returned to the user */
2344 st->first_dts = AV_NOPTS_VALUE;
2346 /* default pts setting is MPEG-like */
2347 av_set_pts_info(st, 33, 1, 90000);
2348 st->last_IP_pts = AV_NOPTS_VALUE;
2349 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2350 st->pts_buffer[i]= AV_NOPTS_VALUE;
2351 st->reference_dts = AV_NOPTS_VALUE;
2353 st->sample_aspect_ratio = (AVRational){0,1};
2355 s->streams[s->nb_streams++] = st;
2359 AVProgram *av_new_program(AVFormatContext *ac, int id)
2361 AVProgram *program=NULL;
2365 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2368 for(i=0; i<ac->nb_programs; i++)
2369 if(ac->programs[i]->id == id)
2370 program = ac->programs[i];
2373 program = av_mallocz(sizeof(AVProgram));
2376 dynarray_add(&ac->programs, &ac->nb_programs, program);
2377 program->discard = AVDISCARD_NONE;
2384 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2386 AVChapter *chapter = NULL;
2389 for(i=0; i<s->nb_chapters; i++)
2390 if(s->chapters[i]->id == id)
2391 chapter = s->chapters[i];
2394 chapter= av_mallocz(sizeof(AVChapter));
2397 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2399 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2400 av_free(chapter->title);
2402 av_metadata_set(&chapter->metadata, "title", title);
2404 chapter->time_base= time_base;
2405 chapter->start = start;
2411 /************************************************************/
2412 /* output media file */
2414 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2418 if (s->oformat->priv_data_size > 0) {
2419 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2421 return AVERROR(ENOMEM);
2423 s->priv_data = NULL;
2425 if (s->oformat->set_parameters) {
2426 ret = s->oformat->set_parameters(s, ap);
2433 int av_write_header(AVFormatContext *s)
2438 // some sanity checks
2439 for(i=0;i<s->nb_streams;i++) {
2442 switch (st->codec->codec_type) {
2443 case CODEC_TYPE_AUDIO:
2444 if(st->codec->sample_rate<=0){
2445 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2448 if(!st->codec->block_align)
2449 st->codec->block_align = st->codec->channels *
2450 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2452 case CODEC_TYPE_VIDEO:
2453 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2454 av_log(s, AV_LOG_ERROR, "time base not set\n");
2457 if(st->codec->width<=0 || st->codec->height<=0){
2458 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2461 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2462 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2468 if(s->oformat->codec_tag){
2469 if(st->codec->codec_tag){
2471 //check that tag + id is in the table
2472 //if neither is in the table -> OK
2473 //if tag is in the table with another id -> FAIL
2474 //if id is in the table with another tag -> FAIL unless strict < ?
2476 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2479 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2480 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2481 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2484 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2485 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2487 return AVERROR(ENOMEM);
2490 #if LIBAVFORMAT_VERSION_MAJOR < 53
2491 ff_metadata_mux_compat(s);
2494 if(s->oformat->write_header){
2495 ret = s->oformat->write_header(s);
2500 /* init PTS generation */
2501 for(i=0;i<s->nb_streams;i++) {
2502 int64_t den = AV_NOPTS_VALUE;
2505 switch (st->codec->codec_type) {
2506 case CODEC_TYPE_AUDIO:
2507 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2509 case CODEC_TYPE_VIDEO:
2510 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2515 if (den != AV_NOPTS_VALUE) {
2517 return AVERROR_INVALIDDATA;
2518 av_frac_init(&st->pts, 0, 0, den);
2524 //FIXME merge with compute_pkt_fields
2525 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2526 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2527 int num, den, frame_size, i;
2529 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2531 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2534 /* duration field */
2535 if (pkt->duration == 0) {
2536 compute_frame_duration(&num, &den, st, NULL, pkt);
2538 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2542 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2545 //XXX/FIXME this is a temporary hack until all encoders output pts
2546 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2548 // pkt->pts= st->cur_dts;
2549 pkt->pts= st->pts.val;
2552 //calculate dts from pts
2553 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2554 st->pts_buffer[0]= pkt->pts;
2555 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2556 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2557 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2558 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2560 pkt->dts= st->pts_buffer[0];
2563 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2564 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2567 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2568 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2572 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2573 st->cur_dts= pkt->dts;
2574 st->pts.val= pkt->dts;
2577 switch (st->codec->codec_type) {
2578 case CODEC_TYPE_AUDIO:
2579 frame_size = get_audio_frame_size(st->codec, pkt->size);
2581 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2582 likely equal to the encoder delay, but it would be better if we
2583 had the real timestamps from the encoder */
2584 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2585 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2588 case CODEC_TYPE_VIDEO:
2589 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2597 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2599 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2601 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2604 ret= s->oformat->write_packet(s, pkt);
2606 ret= url_ferror(s->pb);
2610 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2611 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2613 AVPacketList **next_point, *this_pktl;
2615 this_pktl = av_mallocz(sizeof(AVPacketList));
2616 this_pktl->pkt= *pkt;
2617 pkt->destruct= NULL; // do not free original but only the copy
2618 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2620 next_point = &s->packet_buffer;
2622 if(compare(s, &(*next_point)->pkt, pkt))
2624 next_point= &(*next_point)->next;
2626 this_pktl->next= *next_point;
2627 *next_point= this_pktl;
2630 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2632 AVStream *st = s->streams[ pkt ->stream_index];
2633 AVStream *st2= s->streams[ next->stream_index];
2634 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2635 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2637 if (pkt->dts == AV_NOPTS_VALUE)
2640 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2643 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2646 int streams[MAX_STREAMS];
2649 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2652 memset(streams, 0, sizeof(streams));
2653 pktl= s->packet_buffer;
2655 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2656 if(streams[ pktl->pkt.stream_index ] == 0)
2658 streams[ pktl->pkt.stream_index ]++;
2662 if(stream_count && (s->nb_streams == stream_count || flush)){
2663 pktl= s->packet_buffer;
2666 s->packet_buffer= pktl->next;
2670 av_init_packet(out);
2676 * Interleaves an AVPacket correctly so it can be muxed.
2677 * @param out the interleaved packet will be output here
2678 * @param in the input packet
2679 * @param flush 1 if no further packets are available as input and all
2680 * remaining packets should be output
2681 * @return 1 if a packet was output, 0 if no packet could be output,
2682 * < 0 if an error occurred
2684 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2685 if(s->oformat->interleave_packet)
2686 return s->oformat->interleave_packet(s, out, in, flush);
2688 return av_interleave_packet_per_dts(s, out, in, flush);
2691 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2692 AVStream *st= s->streams[ pkt->stream_index];
2694 //FIXME/XXX/HACK drop zero sized packets
2695 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2698 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2699 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2702 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2707 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2708 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2711 ret= s->oformat->write_packet(s, &opkt);
2713 av_free_packet(&opkt);
2718 if(url_ferror(s->pb))
2719 return url_ferror(s->pb);
2723 int av_write_trailer(AVFormatContext *s)
2729 ret= av_interleave_packet(s, &pkt, NULL, 1);
2730 if(ret<0) //FIXME cleanup needed for ret<0 ?
2735 ret= s->oformat->write_packet(s, &pkt);
2737 av_free_packet(&pkt);
2741 if(url_ferror(s->pb))
2745 if(s->oformat->write_trailer)
2746 ret = s->oformat->write_trailer(s);
2749 ret=url_ferror(s->pb);
2750 for(i=0;i<s->nb_streams;i++)
2751 av_freep(&s->streams[i]->priv_data);
2752 av_freep(&s->priv_data);
2756 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2759 AVProgram *program=NULL;
2762 for(i=0; i<ac->nb_programs; i++){
2763 if(ac->programs[i]->id != progid)
2765 program = ac->programs[i];
2766 for(j=0; j<program->nb_stream_indexes; j++)
2767 if(program->stream_index[j] == idx)
2770 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2773 program->stream_index = tmp;
2774 program->stream_index[program->nb_stream_indexes++] = idx;
2779 static void print_fps(double d, const char *postfix){
2780 uint64_t v= lrintf(d*100);
2781 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2782 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2783 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2786 /* "user interface" functions */
2787 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2790 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2791 AVStream *st = ic->streams[i];
2792 int g = av_gcd(st->time_base.num, st->time_base.den);
2793 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2794 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2795 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2796 /* the pid is an important information, so we display it */
2797 /* XXX: add a generic system */
2798 if (flags & AVFMT_SHOW_IDS)
2799 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2801 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2802 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2803 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2804 if (st->sample_aspect_ratio.num && // default
2805 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2806 AVRational display_aspect_ratio;
2807 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2808 st->codec->width*st->sample_aspect_ratio.num,
2809 st->codec->height*st->sample_aspect_ratio.den,
2811 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2812 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2813 display_aspect_ratio.num, display_aspect_ratio.den);
2815 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2816 if(st->r_frame_rate.den && st->r_frame_rate.num)
2817 print_fps(av_q2d(st->r_frame_rate), "tbr");
2818 if(st->time_base.den && st->time_base.num)
2819 print_fps(1/av_q2d(st->time_base), "tbn");
2820 if(st->codec->time_base.den && st->codec->time_base.num)
2821 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2823 av_log(NULL, AV_LOG_INFO, "\n");
2826 void dump_format(AVFormatContext *ic,
2833 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2834 is_output ? "Output" : "Input",
2836 is_output ? ic->oformat->name : ic->iformat->name,
2837 is_output ? "to" : "from", url);
2839 av_log(NULL, AV_LOG_INFO, " Duration: ");
2840 if (ic->duration != AV_NOPTS_VALUE) {
2841 int hours, mins, secs, us;
2842 secs = ic->duration / AV_TIME_BASE;
2843 us = ic->duration % AV_TIME_BASE;
2848 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2849 (100 * us) / AV_TIME_BASE);
2851 av_log(NULL, AV_LOG_INFO, "N/A");
2853 if (ic->start_time != AV_NOPTS_VALUE) {
2855 av_log(NULL, AV_LOG_INFO, ", start: ");
2856 secs = ic->start_time / AV_TIME_BASE;
2857 us = ic->start_time % AV_TIME_BASE;
2858 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2859 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2861 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2863 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2865 av_log(NULL, AV_LOG_INFO, "N/A");
2867 av_log(NULL, AV_LOG_INFO, "\n");
2869 if(ic->nb_programs) {
2871 for(j=0; j<ic->nb_programs; j++) {
2872 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
2874 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2875 name ? name->value : "");
2876 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2877 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2880 for(i=0;i<ic->nb_streams;i++)
2881 dump_stream_format(ic, i, index, is_output);
2884 #if LIBAVFORMAT_VERSION_MAJOR < 53
2885 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2887 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2890 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2892 AVRational frame_rate;
2893 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2894 *frame_rate_num= frame_rate.num;
2895 *frame_rate_den= frame_rate.den;
2900 int64_t av_gettime(void)
2903 gettimeofday(&tv,NULL);
2904 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2907 int64_t parse_date(const char *datestr, int duration)
2913 static const char * const date_fmt[] = {
2917 static const char * const time_fmt[] = {
2927 time_t now = time(0);
2929 len = strlen(datestr);
2931 lastch = datestr[len - 1];
2934 is_utc = (lastch == 'z' || lastch == 'Z');
2936 memset(&dt, 0, sizeof(dt));
2941 if (!strncasecmp(datestr, "now", len))
2942 return (int64_t) now * 1000000;
2944 /* parse the year-month-day part */
2945 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2946 q = small_strptime(p, date_fmt[i], &dt);
2952 /* if the year-month-day part is missing, then take the
2953 * current year-month-day time */
2958 dt = *localtime(&now);
2960 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2965 if (*p == 'T' || *p == 't' || *p == ' ')
2968 /* parse the hour-minute-second part */
2969 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2970 q = small_strptime(p, time_fmt[i], &dt);
2976 /* parse datestr as a duration */
2981 /* parse datestr as HH:MM:SS */
2982 q = small_strptime(p, time_fmt[0], &dt);
2984 /* parse datestr as S+ */
2985 dt.tm_sec = strtol(p, (char **)&q, 10);
2987 /* the parsing didn't succeed */
2994 /* Now we have all the fields that we can get */
3000 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3002 dt.tm_isdst = -1; /* unknown */
3012 /* parse the .m... part */
3016 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3019 val += n * (*q - '0');
3023 return negative ? -t : t;
3026 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3036 while (*p != '\0' && *p != '=' && *p != '&') {
3037 if ((q - tag) < sizeof(tag) - 1)
3045 while (*p != '&' && *p != '\0') {
3046 if ((q - arg) < arg_size - 1) {
3056 if (!strcmp(tag, tag1))
3065 int av_get_frame_filename(char *buf, int buf_size,
3066 const char *path, int number)
3069 char *q, buf1[20], c;
3070 int nd, len, percentd_found;
3082 while (isdigit(*p)) {
3083 nd = nd * 10 + *p++ - '0';
3086 } while (isdigit(c));
3095 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3097 if ((q - buf + len) > buf_size - 1)
3099 memcpy(q, buf1, len);
3107 if ((q - buf) < buf_size - 1)
3111 if (!percentd_found)
3120 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3123 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3125 for(i=0;i<size;i+=16) {
3132 PRINT(" %02x", buf[i+j]);
3137 for(j=0;j<len;j++) {
3139 if (c < ' ' || c > '~')
3148 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3150 hex_dump_internal(NULL, f, 0, buf, size);
3153 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3155 hex_dump_internal(avcl, NULL, level, buf, size);
3158 //FIXME needs to know the time_base
3159 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3161 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3162 PRINT("stream #%d:\n", pkt->stream_index);
3163 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3164 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3165 /* DTS is _always_ valid after av_read_frame() */
3167 if (pkt->dts == AV_NOPTS_VALUE)
3170 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3171 /* PTS may not be known if B-frames are present. */
3173 if (pkt->pts == AV_NOPTS_VALUE)
3176 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3178 PRINT(" size=%d\n", pkt->size);
3181 av_hex_dump(f, pkt->data, pkt->size);
3184 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3186 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3189 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3191 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3194 void url_split(char *proto, int proto_size,
3195 char *authorization, int authorization_size,
3196 char *hostname, int hostname_size,
3198 char *path, int path_size,
3201 const char *p, *ls, *at, *col, *brk;
3203 if (port_ptr) *port_ptr = -1;
3204 if (proto_size > 0) proto[0] = 0;
3205 if (authorization_size > 0) authorization[0] = 0;
3206 if (hostname_size > 0) hostname[0] = 0;
3207 if (path_size > 0) path[0] = 0;
3209 /* parse protocol */
3210 if ((p = strchr(url, ':'))) {
3211 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3216 /* no protocol means plain filename */
3217 av_strlcpy(path, url, path_size);
3221 /* separate path from hostname */
3222 ls = strchr(p, '/');
3224 ls = strchr(p, '?');
3226 av_strlcpy(path, ls, path_size);
3228 ls = &p[strlen(p)]; // XXX
3230 /* the rest is hostname, use that to parse auth/port */
3232 /* authorization (user[:pass]@hostname) */
3233 if ((at = strchr(p, '@')) && at < ls) {
3234 av_strlcpy(authorization, p,
3235 FFMIN(authorization_size, at + 1 - p));
3236 p = at + 1; /* skip '@' */
3239 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3241 av_strlcpy(hostname, p + 1,
3242 FFMIN(hostname_size, brk - p));
3243 if (brk[1] == ':' && port_ptr)
3244 *port_ptr = atoi(brk + 2);
3245 } else if ((col = strchr(p, ':')) && col < ls) {
3246 av_strlcpy(hostname, p,
3247 FFMIN(col + 1 - p, hostname_size));
3248 if (port_ptr) *port_ptr = atoi(col + 1);
3250 av_strlcpy(hostname, p,
3251 FFMIN(ls + 1 - p, hostname_size));
3255 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3258 static const char hex_table[16] = { '0', '1', '2', '3',
3261 'C', 'D', 'E', 'F' };
3263 for(i = 0; i < s; i++) {
3264 buff[i * 2] = hex_table[src[i] >> 4];
3265 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3271 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3272 int pts_num, int pts_den)
3274 unsigned int gcd= av_gcd(pts_num, pts_den);
3275 s->pts_wrap_bits = pts_wrap_bits;
3276 s->time_base.num = pts_num/gcd;
3277 s->time_base.den = pts_den/gcd;
3280 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);