2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
87 } else if (num >= den) {
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
115 while (*p != NULL) p = &(*p)->next;
120 void av_register_output_format(AVOutputFormat *format)
124 while (*p != NULL) p = &(*p)->next;
129 int match_ext(const char *filename, const char *extensions)
137 ext = strrchr(filename, '.');
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
146 if (!strcasecmp(ext1, ext))
156 static int match_format(const char *name, const char *names)
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
188 /* Find the proper file type. */
192 while (fmt != NULL) {
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
202 if (score > score_max) {
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
259 /* memory handling */
261 void av_destruct_packet(AVPacket *pkt)
264 pkt->data = NULL; pkt->size = 0;
267 void av_init_packet(AVPacket *pkt)
269 pkt->pts = AV_NOPTS_VALUE;
270 pkt->dts = AV_NOPTS_VALUE;
273 pkt->convergence_duration = 0;
275 pkt->stream_index = 0;
276 pkt->destruct= av_destruct_packet_nofree;
279 int av_new_packet(AVPacket *pkt, int size)
282 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
283 return AVERROR(ENOMEM);
284 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
286 return AVERROR(ENOMEM);
287 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
292 pkt->destruct = av_destruct_packet;
296 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
298 int ret= av_new_packet(pkt, size);
303 pkt->pos= url_ftell(s);
305 ret= get_buffer(s, pkt->data, size);
314 int av_dup_packet(AVPacket *pkt)
316 if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
318 /* We duplicate the packet and don't forget to add the padding again. */
319 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
320 return AVERROR(ENOMEM);
321 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
323 return AVERROR(ENOMEM);
325 memcpy(data, pkt->data, pkt->size);
326 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
328 pkt->destruct = av_destruct_packet;
333 int av_filename_number_test(const char *filename)
336 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
339 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
341 AVInputFormat *fmt1, *fmt;
345 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
346 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
349 if (fmt1->read_probe) {
350 score = fmt1->read_probe(pd);
351 } else if (fmt1->extensions) {
352 if (match_ext(pd->filename, fmt1->extensions)) {
356 if (score > *score_max) {
359 }else if (score == *score_max)
365 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
367 return av_probe_input_format2(pd, is_opened, &score);
370 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
373 fmt = av_probe_input_format2(pd, 1, &score);
376 if (!strcmp(fmt->name, "mp3")) {
377 st->codec->codec_id = CODEC_ID_MP3;
378 st->codec->codec_type = CODEC_TYPE_AUDIO;
379 } else if (!strcmp(fmt->name, "ac3")) {
380 st->codec->codec_id = CODEC_ID_AC3;
381 st->codec->codec_type = CODEC_TYPE_AUDIO;
382 } else if (!strcmp(fmt->name, "mpegvideo")) {
383 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
384 st->codec->codec_type = CODEC_TYPE_VIDEO;
385 } else if (!strcmp(fmt->name, "m4v")) {
386 st->codec->codec_id = CODEC_ID_MPEG4;
387 st->codec->codec_type = CODEC_TYPE_VIDEO;
388 } else if (!strcmp(fmt->name, "h264")) {
389 st->codec->codec_id = CODEC_ID_H264;
390 st->codec->codec_type = CODEC_TYPE_VIDEO;
396 /************************************************************/
397 /* input media file */
400 * Open a media file from an IO stream. 'fmt' must be specified.
402 int av_open_input_stream(AVFormatContext **ic_ptr,
403 ByteIOContext *pb, const char *filename,
404 AVInputFormat *fmt, AVFormatParameters *ap)
408 AVFormatParameters default_ap;
412 memset(ap, 0, sizeof(default_ap));
415 if(!ap->prealloced_context)
416 ic = av_alloc_format_context();
420 err = AVERROR(ENOMEM);
425 ic->duration = AV_NOPTS_VALUE;
426 ic->start_time = AV_NOPTS_VALUE;
427 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
429 /* allocate private data */
430 if (fmt->priv_data_size > 0) {
431 ic->priv_data = av_mallocz(fmt->priv_data_size);
432 if (!ic->priv_data) {
433 err = AVERROR(ENOMEM);
437 ic->priv_data = NULL;
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic, ap);
446 if (pb && !ic->data_offset)
447 ic->data_offset = url_ftell(ic->pb);
449 #if LIBAVFORMAT_VERSION_MAJOR < 53
450 ff_metadata_demux_compat(ic);
458 av_freep(&ic->priv_data);
459 for(i=0;i<ic->nb_streams;i++) {
460 AVStream *st = ic->streams[i];
462 av_free(st->priv_data);
463 av_free(st->codec->extradata);
473 /** size of probe buffer, for guessing file type from file contents */
474 #define PROBE_BUF_MIN 2048
475 #define PROBE_BUF_MAX (1<<20)
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
480 AVFormatParameters *ap)
483 AVProbeData probe_data, *pd = &probe_data;
484 ByteIOContext *pb = NULL;
488 pd->filename = filename;
493 /* guess format if no file can be opened */
494 fmt = av_probe_input_format(pd, 0);
497 /* Do not open file if the format does not need it. XXX: specific
498 hack needed to handle RTSP/TCP */
499 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
500 /* if no file needed do not try to open one */
501 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
505 url_setbufsize(pb, buf_size);
508 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
509 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
510 /* read probe data */
511 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
512 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
513 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
514 if (url_fseek(pb, 0, SEEK_SET) < 0) {
516 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
522 /* guess file format */
523 fmt = av_probe_input_format2(pd, 1, &score);
528 /* if still no format found, error */
534 /* check filename in case an image number is expected */
535 if (fmt->flags & AVFMT_NEEDNUMBER) {
536 if (!av_filename_number_test(filename)) {
537 err = AVERROR_NUMEXPECTED;
541 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
554 /*******************************************************/
556 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
557 AVPacketList **plast_pktl){
558 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
563 (*plast_pktl)->next = pktl;
565 *packet_buffer = pktl;
567 /* add the packet in the buffered packet list */
573 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
579 AVPacketList *pktl = s->raw_packet_buffer;
583 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
584 s->raw_packet_buffer = pktl->next;
591 ret= s->iformat->read_packet(s, pkt);
594 st= s->streams[pkt->stream_index];
596 switch(st->codec->codec_type){
597 case CODEC_TYPE_VIDEO:
598 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
600 case CODEC_TYPE_AUDIO:
601 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
603 case CODEC_TYPE_SUBTITLE:
604 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
608 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
611 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
613 if(st->codec->codec_id == CODEC_ID_PROBE){
614 AVProbeData *pd = &st->probe_data;
616 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
617 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
618 pd->buf_size += pkt->size;
619 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
621 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
622 set_codec_from_probe_data(st, pd, 1);
623 if(st->codec->codec_id != CODEC_ID_PROBE){
632 /**********************************************************/
635 * Get the number of samples of an audio frame. Return -1 on error.
637 static int get_audio_frame_size(AVCodecContext *enc, int size)
641 if(enc->codec_id == CODEC_ID_VORBIS)
644 if (enc->frame_size <= 1) {
645 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
647 if (bits_per_sample) {
648 if (enc->channels == 0)
650 frame_size = (size << 3) / (bits_per_sample * enc->channels);
652 /* used for example by ADPCM codecs */
653 if (enc->bit_rate == 0)
655 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
658 frame_size = enc->frame_size;
665 * Return the frame duration in seconds. Return 0 if not available.
667 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
668 AVCodecParserContext *pc, AVPacket *pkt)
674 switch(st->codec->codec_type) {
675 case CODEC_TYPE_VIDEO:
676 if(st->time_base.num*1000LL > st->time_base.den){
677 *pnum = st->time_base.num;
678 *pden = st->time_base.den;
679 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
680 *pnum = st->codec->time_base.num;
681 *pden = st->codec->time_base.den;
682 if (pc && pc->repeat_pict) {
684 *pnum = (*pnum) * (2 + pc->repeat_pict);
688 case CODEC_TYPE_AUDIO:
689 frame_size = get_audio_frame_size(st->codec, pkt->size);
693 *pden = st->codec->sample_rate;
700 static int is_intra_only(AVCodecContext *enc){
701 if(enc->codec_type == CODEC_TYPE_AUDIO){
703 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
704 switch(enc->codec_id){
706 case CODEC_ID_MJPEGB:
708 case CODEC_ID_RAWVIDEO:
709 case CODEC_ID_DVVIDEO:
710 case CODEC_ID_HUFFYUV:
711 case CODEC_ID_FFVHUFF:
716 case CODEC_ID_JPEG2000:
724 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
725 int64_t dts, int64_t pts)
727 AVStream *st= s->streams[stream_index];
728 AVPacketList *pktl= s->packet_buffer;
730 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
733 st->first_dts= dts - st->cur_dts;
736 for(; pktl; pktl= pktl->next){
737 if(pktl->pkt.stream_index != stream_index)
739 //FIXME think more about this check
740 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
741 pktl->pkt.pts += st->first_dts;
743 if(pktl->pkt.dts != AV_NOPTS_VALUE)
744 pktl->pkt.dts += st->first_dts;
746 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
747 st->start_time= pktl->pkt.pts;
749 if (st->start_time == AV_NOPTS_VALUE)
750 st->start_time = pts;
753 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
755 AVPacketList *pktl= s->packet_buffer;
758 if(st->first_dts != AV_NOPTS_VALUE){
759 cur_dts= st->first_dts;
760 for(; pktl; pktl= pktl->next){
761 if(pktl->pkt.stream_index == pkt->stream_index){
762 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
764 cur_dts -= pkt->duration;
767 pktl= s->packet_buffer;
768 st->first_dts = cur_dts;
769 }else if(st->cur_dts)
772 for(; pktl; pktl= pktl->next){
773 if(pktl->pkt.stream_index != pkt->stream_index)
775 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
776 && !pktl->pkt.duration){
777 pktl->pkt.dts= cur_dts;
778 if(!st->codec->has_b_frames)
779 pktl->pkt.pts= cur_dts;
780 cur_dts += pkt->duration;
781 pktl->pkt.duration= pkt->duration;
785 if(st->first_dts == AV_NOPTS_VALUE)
786 st->cur_dts= cur_dts;
789 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
790 AVCodecParserContext *pc, AVPacket *pkt)
792 int num, den, presentation_delayed, delay, i;
795 /* do we have a video B-frame ? */
796 delay= st->codec->has_b_frames;
797 presentation_delayed = 0;
798 /* XXX: need has_b_frame, but cannot get it if the codec is
801 pc && pc->pict_type != FF_B_TYPE)
802 presentation_delayed = 1;
804 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
805 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
806 pkt->dts -= 1LL<<st->pts_wrap_bits;
809 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
810 // we take the conservative approach and discard both
811 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
812 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
813 av_log(s, AV_LOG_ERROR, "invalid dts/pts combination\n");
814 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
817 if (pkt->duration == 0) {
818 compute_frame_duration(&num, &den, st, pc, pkt);
820 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
822 if(pkt->duration != 0 && s->packet_buffer)
823 update_initial_durations(s, st, pkt);
827 /* correct timestamps with byte offset if demuxers only have timestamps
828 on packet boundaries */
829 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
830 /* this will estimate bitrate based on this frame's duration and size */
831 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
832 if(pkt->pts != AV_NOPTS_VALUE)
834 if(pkt->dts != AV_NOPTS_VALUE)
838 /* This may be redundant, but it should not hurt. */
839 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
840 presentation_delayed = 1;
842 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
843 /* interpolate PTS and DTS if they are not present */
844 if(delay==0 || (delay==1 && pc)){
845 if (presentation_delayed) {
846 /* DTS = decompression timestamp */
847 /* PTS = presentation timestamp */
848 if (pkt->dts == AV_NOPTS_VALUE)
849 pkt->dts = st->last_IP_pts;
850 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
851 if (pkt->dts == AV_NOPTS_VALUE)
852 pkt->dts = st->cur_dts;
854 /* this is tricky: the dts must be incremented by the duration
855 of the frame we are displaying, i.e. the last I- or P-frame */
856 if (st->last_IP_duration == 0)
857 st->last_IP_duration = pkt->duration;
858 if(pkt->dts != AV_NOPTS_VALUE)
859 st->cur_dts = pkt->dts + st->last_IP_duration;
860 st->last_IP_duration = pkt->duration;
861 st->last_IP_pts= pkt->pts;
862 /* cannot compute PTS if not present (we can compute it only
863 by knowing the future */
864 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
865 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
866 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
867 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
868 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
869 pkt->pts += pkt->duration;
870 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
874 /* presentation is not delayed : PTS and DTS are the same */
875 if(pkt->pts == AV_NOPTS_VALUE)
877 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
878 if(pkt->pts == AV_NOPTS_VALUE)
879 pkt->pts = st->cur_dts;
881 if(pkt->pts != AV_NOPTS_VALUE)
882 st->cur_dts = pkt->pts + pkt->duration;
886 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
887 st->pts_buffer[0]= pkt->pts;
888 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
889 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
890 if(pkt->dts == AV_NOPTS_VALUE)
891 pkt->dts= st->pts_buffer[0];
893 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
895 if(pkt->dts > st->cur_dts)
896 st->cur_dts = pkt->dts;
899 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
902 if(is_intra_only(st->codec))
903 pkt->flags |= PKT_FLAG_KEY;
906 /* keyframe computation */
907 if (pc->pict_type == FF_I_TYPE)
908 pkt->flags |= PKT_FLAG_KEY;
912 void av_destruct_packet_nofree(AVPacket *pkt)
914 pkt->data = NULL; pkt->size = 0;
917 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
925 /* select current input stream component */
928 if (!st->need_parsing || !st->parser) {
929 /* no parsing needed: we just output the packet as is */
930 /* raw data support */
932 compute_pkt_fields(s, st, NULL, pkt);
935 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
936 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
937 s->cur_ptr, s->cur_len,
938 s->cur_pkt.pts, s->cur_pkt.dts);
939 s->cur_pkt.pts = AV_NOPTS_VALUE;
940 s->cur_pkt.dts = AV_NOPTS_VALUE;
941 /* increment read pointer */
945 /* return packet if any */
948 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
950 pkt->stream_index = st->index;
951 pkt->pts = st->parser->pts;
952 pkt->dts = st->parser->dts;
953 pkt->destruct = av_destruct_packet_nofree;
954 compute_pkt_fields(s, st, st->parser, pkt);
956 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
957 ff_reduce_index(s, st->index);
958 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
959 0, 0, AVINDEX_KEYFRAME);
966 av_free_packet(&s->cur_pkt);
970 /* read next packet */
971 ret = av_read_packet(s, &s->cur_pkt);
973 if (ret == AVERROR(EAGAIN))
975 /* return the last frames, if any */
976 for(i = 0; i < s->nb_streams; i++) {
978 if (st->parser && st->need_parsing) {
979 av_parser_parse(st->parser, st->codec,
980 &pkt->data, &pkt->size,
982 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
987 /* no more packets: really terminate parsing */
991 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
992 s->cur_pkt.dts != AV_NOPTS_VALUE &&
993 s->cur_pkt.pts < s->cur_pkt.dts){
994 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
995 s->cur_pkt.stream_index,
999 // av_free_packet(&s->cur_pkt);
1003 st = s->streams[s->cur_pkt.stream_index];
1004 if(s->debug & FF_FDEBUG_TS)
1005 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1006 s->cur_pkt.stream_index,
1013 s->cur_ptr = s->cur_pkt.data;
1014 s->cur_len = s->cur_pkt.size;
1015 if (st->need_parsing && !st->parser) {
1016 st->parser = av_parser_init(st->codec->codec_id);
1018 /* no parser available: just output the raw packets */
1019 st->need_parsing = AVSTREAM_PARSE_NONE;
1020 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1021 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1023 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1024 st->parser->next_frame_offset=
1025 st->parser->cur_offset= s->cur_pkt.pos;
1030 if(s->debug & FF_FDEBUG_TS)
1031 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1041 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1045 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1048 pktl = s->packet_buffer;
1050 AVPacket *next_pkt= &pktl->pkt;
1052 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1053 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1054 if( pktl->pkt.stream_index == next_pkt->stream_index
1055 && next_pkt->dts < pktl->pkt.dts
1056 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1057 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1058 next_pkt->pts= pktl->pkt.dts;
1062 pktl = s->packet_buffer;
1065 if( next_pkt->pts != AV_NOPTS_VALUE
1066 || next_pkt->dts == AV_NOPTS_VALUE
1068 /* read packet from packet buffer, if there is data */
1070 s->packet_buffer = pktl->next;
1076 int ret= av_read_frame_internal(s, pkt);
1078 if(pktl && ret != AVERROR(EAGAIN)){
1085 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1086 &s->packet_buffer_end)) < 0)
1087 return AVERROR(ENOMEM);
1089 assert(!s->packet_buffer);
1090 return av_read_frame_internal(s, pkt);
1095 /* XXX: suppress the packet queue */
1096 static void flush_packet_queue(AVFormatContext *s)
1101 pktl = s->packet_buffer;
1104 s->packet_buffer = pktl->next;
1105 av_free_packet(&pktl->pkt);
1110 /*******************************************************/
1113 int av_find_default_stream_index(AVFormatContext *s)
1115 int first_audio_index = -1;
1119 if (s->nb_streams <= 0)
1121 for(i = 0; i < s->nb_streams; i++) {
1123 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1126 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1127 first_audio_index = i;
1129 return first_audio_index >= 0 ? first_audio_index : 0;
1133 * Flush the frame reader.
1135 static void av_read_frame_flush(AVFormatContext *s)
1140 flush_packet_queue(s);
1142 /* free previous packet */
1144 if (s->cur_st->parser)
1145 av_free_packet(&s->cur_pkt);
1152 /* for each stream, reset read state */
1153 for(i = 0; i < s->nb_streams; i++) {
1157 av_parser_close(st->parser);
1160 st->last_IP_pts = AV_NOPTS_VALUE;
1161 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1165 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1168 for(i = 0; i < s->nb_streams; i++) {
1169 AVStream *st = s->streams[i];
1171 st->cur_dts = av_rescale(timestamp,
1172 st->time_base.den * (int64_t)ref_st->time_base.num,
1173 st->time_base.num * (int64_t)ref_st->time_base.den);
1177 void ff_reduce_index(AVFormatContext *s, int stream_index)
1179 AVStream *st= s->streams[stream_index];
1180 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1182 if((unsigned)st->nb_index_entries >= max_entries){
1184 for(i=0; 2*i<st->nb_index_entries; i++)
1185 st->index_entries[i]= st->index_entries[2*i];
1186 st->nb_index_entries= i;
1190 int av_add_index_entry(AVStream *st,
1191 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1193 AVIndexEntry *entries, *ie;
1196 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1199 entries = av_fast_realloc(st->index_entries,
1200 &st->index_entries_allocated_size,
1201 (st->nb_index_entries + 1) *
1202 sizeof(AVIndexEntry));
1206 st->index_entries= entries;
1208 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1211 index= st->nb_index_entries++;
1212 ie= &entries[index];
1213 assert(index==0 || ie[-1].timestamp < timestamp);
1215 ie= &entries[index];
1216 if(ie->timestamp != timestamp){
1217 if(ie->timestamp <= timestamp)
1219 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1220 st->nb_index_entries++;
1221 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1222 distance= ie->min_distance;
1226 ie->timestamp = timestamp;
1227 ie->min_distance= distance;
1234 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1237 AVIndexEntry *entries= st->index_entries;
1238 int nb_entries= st->nb_index_entries;
1247 timestamp = entries[m].timestamp;
1248 if(timestamp >= wanted_timestamp)
1250 if(timestamp <= wanted_timestamp)
1253 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1255 if(!(flags & AVSEEK_FLAG_ANY)){
1256 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1257 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1268 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1269 AVInputFormat *avif= s->iformat;
1270 int64_t pos_min, pos_max, pos, pos_limit;
1271 int64_t ts_min, ts_max, ts;
1275 if (stream_index < 0)
1279 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1283 ts_min= AV_NOPTS_VALUE;
1284 pos_limit= -1; //gcc falsely says it may be uninitialized
1286 st= s->streams[stream_index];
1287 if(st->index_entries){
1290 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1291 index= FFMAX(index, 0);
1292 e= &st->index_entries[index];
1294 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1296 ts_min= e->timestamp;
1298 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1305 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1306 assert(index < st->nb_index_entries);
1308 e= &st->index_entries[index];
1309 assert(e->timestamp >= target_ts);
1311 ts_max= e->timestamp;
1312 pos_limit= pos_max - e->min_distance;
1314 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1315 pos_max,pos_limit, ts_max);
1320 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1325 url_fseek(s->pb, pos, SEEK_SET);
1327 av_update_cur_dts(s, st, ts);
1332 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1334 int64_t start_pos, filesize;
1338 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1341 if(ts_min == AV_NOPTS_VALUE){
1342 pos_min = s->data_offset;
1343 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1344 if (ts_min == AV_NOPTS_VALUE)
1348 if(ts_max == AV_NOPTS_VALUE){
1350 filesize = url_fsize(s->pb);
1351 pos_max = filesize - 1;
1354 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1356 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1357 if (ts_max == AV_NOPTS_VALUE)
1361 int64_t tmp_pos= pos_max + 1;
1362 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1363 if(tmp_ts == AV_NOPTS_VALUE)
1367 if(tmp_pos >= filesize)
1373 if(ts_min > ts_max){
1375 }else if(ts_min == ts_max){
1380 while (pos_min < pos_limit) {
1382 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1386 assert(pos_limit <= pos_max);
1389 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1390 // interpolate position (better than dichotomy)
1391 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1392 + pos_min - approximate_keyframe_distance;
1393 }else if(no_change==1){
1394 // bisection, if interpolation failed to change min or max pos last time
1395 pos = (pos_min + pos_limit)>>1;
1397 /* linear search if bisection failed, can only happen if there
1398 are very few or no keyframes between min/max */
1403 else if(pos > pos_limit)
1407 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1413 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1415 if(ts == AV_NOPTS_VALUE){
1416 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1419 assert(ts != AV_NOPTS_VALUE);
1420 if (target_ts <= ts) {
1421 pos_limit = start_pos - 1;
1425 if (target_ts >= ts) {
1431 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1432 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1435 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1437 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1438 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1439 pos, ts_min, target_ts, ts_max);
1445 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1446 int64_t pos_min, pos_max;
1450 if (stream_index < 0)
1453 st= s->streams[stream_index];
1456 pos_min = s->data_offset;
1457 pos_max = url_fsize(s->pb) - 1;
1459 if (pos < pos_min) pos= pos_min;
1460 else if(pos > pos_max) pos= pos_max;
1462 url_fseek(s->pb, pos, SEEK_SET);
1465 av_update_cur_dts(s, st, ts);
1470 static int av_seek_frame_generic(AVFormatContext *s,
1471 int stream_index, int64_t timestamp, int flags)
1477 st = s->streams[stream_index];
1479 index = av_index_search_timestamp(st, timestamp, flags);
1481 if(index < 0 || index==st->nb_index_entries-1){
1485 if(st->nb_index_entries){
1486 assert(st->index_entries);
1487 ie= &st->index_entries[st->nb_index_entries-1];
1488 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1490 av_update_cur_dts(s, st, ie->timestamp);
1492 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1498 ret = av_read_frame(s, &pkt);
1499 }while(ret == AVERROR(EAGAIN));
1502 av_free_packet(&pkt);
1503 if(stream_index == pkt.stream_index){
1504 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1508 index = av_index_search_timestamp(st, timestamp, flags);
1513 av_read_frame_flush(s);
1514 if (s->iformat->read_seek){
1515 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1518 ie = &st->index_entries[index];
1519 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1521 av_update_cur_dts(s, st, ie->timestamp);
1526 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1531 av_read_frame_flush(s);
1533 if(flags & AVSEEK_FLAG_BYTE)
1534 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1536 if(stream_index < 0){
1537 stream_index= av_find_default_stream_index(s);
1538 if(stream_index < 0)
1541 st= s->streams[stream_index];
1542 /* timestamp for default must be expressed in AV_TIME_BASE units */
1543 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1546 /* first, we try the format specific seek */
1547 if (s->iformat->read_seek)
1548 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1555 if(s->iformat->read_timestamp)
1556 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1558 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1561 /*******************************************************/
1564 * Returns TRUE if the stream has accurate duration in any stream.
1566 * @return TRUE if the stream has accurate duration for at least one component.
1568 static int av_has_duration(AVFormatContext *ic)
1573 for(i = 0;i < ic->nb_streams; i++) {
1574 st = ic->streams[i];
1575 if (st->duration != AV_NOPTS_VALUE)
1582 * Estimate the stream timings from the one of each components.
1584 * Also computes the global bitrate if possible.
1586 static void av_update_stream_timings(AVFormatContext *ic)
1588 int64_t start_time, start_time1, end_time, end_time1;
1589 int64_t duration, duration1;
1593 start_time = INT64_MAX;
1594 end_time = INT64_MIN;
1595 duration = INT64_MIN;
1596 for(i = 0;i < ic->nb_streams; i++) {
1597 st = ic->streams[i];
1598 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1599 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1600 if (start_time1 < start_time)
1601 start_time = start_time1;
1602 if (st->duration != AV_NOPTS_VALUE) {
1603 end_time1 = start_time1
1604 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1605 if (end_time1 > end_time)
1606 end_time = end_time1;
1609 if (st->duration != AV_NOPTS_VALUE) {
1610 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1611 if (duration1 > duration)
1612 duration = duration1;
1615 if (start_time != INT64_MAX) {
1616 ic->start_time = start_time;
1617 if (end_time != INT64_MIN) {
1618 if (end_time - start_time > duration)
1619 duration = end_time - start_time;
1622 if (duration != INT64_MIN) {
1623 ic->duration = duration;
1624 if (ic->file_size > 0) {
1625 /* compute the bitrate */
1626 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1627 (double)ic->duration;
1632 static void fill_all_stream_timings(AVFormatContext *ic)
1637 av_update_stream_timings(ic);
1638 for(i = 0;i < ic->nb_streams; i++) {
1639 st = ic->streams[i];
1640 if (st->start_time == AV_NOPTS_VALUE) {
1641 if(ic->start_time != AV_NOPTS_VALUE)
1642 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1643 if(ic->duration != AV_NOPTS_VALUE)
1644 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1649 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1651 int64_t filesize, duration;
1655 /* if bit_rate is already set, we believe it */
1656 if (ic->bit_rate == 0) {
1658 for(i=0;i<ic->nb_streams;i++) {
1659 st = ic->streams[i];
1660 bit_rate += st->codec->bit_rate;
1662 ic->bit_rate = bit_rate;
1665 /* if duration is already set, we believe it */
1666 if (ic->duration == AV_NOPTS_VALUE &&
1667 ic->bit_rate != 0 &&
1668 ic->file_size != 0) {
1669 filesize = ic->file_size;
1671 for(i = 0; i < ic->nb_streams; i++) {
1672 st = ic->streams[i];
1673 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1674 if (st->duration == AV_NOPTS_VALUE)
1675 st->duration = duration;
1681 #define DURATION_MAX_READ_SIZE 250000
1683 /* only usable for MPEG-PS streams */
1684 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1686 AVPacket pkt1, *pkt = &pkt1;
1688 int read_size, i, ret;
1690 int64_t filesize, offset, duration;
1692 /* free previous packet */
1693 if (ic->cur_st && ic->cur_st->parser)
1694 av_free_packet(&ic->cur_pkt);
1697 /* flush packet queue */
1698 flush_packet_queue(ic);
1700 for(i=0;i<ic->nb_streams;i++) {
1701 st = ic->streams[i];
1703 av_parser_close(st->parser);
1708 /* we read the first packets to get the first PTS (not fully
1709 accurate, but it is enough now) */
1710 url_fseek(ic->pb, 0, SEEK_SET);
1713 if (read_size >= DURATION_MAX_READ_SIZE)
1715 /* if all info is available, we can stop */
1716 for(i = 0;i < ic->nb_streams; i++) {
1717 st = ic->streams[i];
1718 if (st->start_time == AV_NOPTS_VALUE)
1721 if (i == ic->nb_streams)
1725 ret = av_read_packet(ic, pkt);
1726 }while(ret == AVERROR(EAGAIN));
1729 read_size += pkt->size;
1730 st = ic->streams[pkt->stream_index];
1731 if (pkt->pts != AV_NOPTS_VALUE) {
1732 if (st->start_time == AV_NOPTS_VALUE)
1733 st->start_time = pkt->pts;
1735 av_free_packet(pkt);
1738 /* estimate the end time (duration) */
1739 /* XXX: may need to support wrapping */
1740 filesize = ic->file_size;
1741 offset = filesize - DURATION_MAX_READ_SIZE;
1745 url_fseek(ic->pb, offset, SEEK_SET);
1748 if (read_size >= DURATION_MAX_READ_SIZE)
1752 ret = av_read_packet(ic, pkt);
1753 }while(ret == AVERROR(EAGAIN));
1756 read_size += pkt->size;
1757 st = ic->streams[pkt->stream_index];
1758 if (pkt->pts != AV_NOPTS_VALUE &&
1759 st->start_time != AV_NOPTS_VALUE) {
1760 end_time = pkt->pts;
1761 duration = end_time - st->start_time;
1763 if (st->duration == AV_NOPTS_VALUE ||
1764 st->duration < duration)
1765 st->duration = duration;
1768 av_free_packet(pkt);
1771 fill_all_stream_timings(ic);
1773 url_fseek(ic->pb, old_offset, SEEK_SET);
1774 for(i=0; i<ic->nb_streams; i++){
1776 st->cur_dts= st->first_dts;
1777 st->last_IP_pts = AV_NOPTS_VALUE;
1781 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1785 /* get the file size, if possible */
1786 if (ic->iformat->flags & AVFMT_NOFILE) {
1789 file_size = url_fsize(ic->pb);
1793 ic->file_size = file_size;
1795 if ((!strcmp(ic->iformat->name, "mpeg") ||
1796 !strcmp(ic->iformat->name, "mpegts")) &&
1797 file_size && !url_is_streamed(ic->pb)) {
1798 /* get accurate estimate from the PTSes */
1799 av_estimate_timings_from_pts(ic, old_offset);
1800 } else if (av_has_duration(ic)) {
1801 /* at least one component has timings - we use them for all
1803 fill_all_stream_timings(ic);
1805 /* less precise: use bitrate info */
1806 av_estimate_timings_from_bit_rate(ic);
1808 av_update_stream_timings(ic);
1814 for(i = 0;i < ic->nb_streams; i++) {
1815 st = ic->streams[i];
1816 printf("%d: start_time: %0.3f duration: %0.3f\n",
1817 i, (double)st->start_time / AV_TIME_BASE,
1818 (double)st->duration / AV_TIME_BASE);
1820 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1821 (double)ic->start_time / AV_TIME_BASE,
1822 (double)ic->duration / AV_TIME_BASE,
1823 ic->bit_rate / 1000);
1828 static int has_codec_parameters(AVCodecContext *enc)
1831 switch(enc->codec_type) {
1832 case CODEC_TYPE_AUDIO:
1833 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1834 if(!enc->frame_size &&
1835 (enc->codec_id == CODEC_ID_VORBIS ||
1836 enc->codec_id == CODEC_ID_AAC))
1839 case CODEC_TYPE_VIDEO:
1840 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1846 return enc->codec_id != CODEC_ID_NONE && val != 0;
1849 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1853 int got_picture, data_size, ret=0;
1856 if(!st->codec->codec){
1857 codec = avcodec_find_decoder(st->codec->codec_id);
1860 ret = avcodec_open(st->codec, codec);
1865 if(!has_codec_parameters(st->codec)){
1866 switch(st->codec->codec_type) {
1867 case CODEC_TYPE_VIDEO:
1868 ret = avcodec_decode_video(st->codec, &picture,
1869 &got_picture, data, size);
1871 case CODEC_TYPE_AUDIO:
1872 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1873 samples = av_malloc(data_size);
1876 ret = avcodec_decode_audio2(st->codec, samples,
1877 &data_size, data, size);
1888 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1890 while (tags->id != CODEC_ID_NONE) {
1898 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1901 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1902 if(tag == tags[i].tag)
1905 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1906 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1907 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1908 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1909 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1912 return CODEC_ID_NONE;
1915 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1918 for(i=0; tags && tags[i]; i++){
1919 int tag= codec_get_tag(tags[i], id);
1925 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1928 for(i=0; tags && tags[i]; i++){
1929 enum CodecID id= codec_get_id(tags[i], tag);
1930 if(id!=CODEC_ID_NONE) return id;
1932 return CODEC_ID_NONE;
1935 static void compute_chapters_end(AVFormatContext *s)
1939 for (i=0; i+1<s->nb_chapters; i++)
1940 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1941 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1942 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1943 s->chapters[i]->end = s->chapters[i+1]->start;
1946 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1947 assert(s->start_time != AV_NOPTS_VALUE);
1948 assert(s->duration > 0);
1949 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1951 s->chapters[i]->time_base);
1955 /* absolute maximum size we read until we abort */
1956 #define MAX_READ_SIZE 5000000
1958 #define MAX_STD_TIMEBASES (60*12+5)
1959 static int get_std_framerate(int i){
1960 if(i<60*12) return i*1001;
1961 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1965 * Is the time base unreliable.
1966 * This is a heuristic to balance between quick acceptance of the values in
1967 * the headers vs. some extra checks.
1968 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1969 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1970 * And there are "variable" fps files this needs to detect as well.
1972 static int tb_unreliable(AVCodecContext *c){
1973 if( c->time_base.den >= 101L*c->time_base.num
1974 || c->time_base.den < 5L*c->time_base.num
1975 /* || c->codec_tag == AV_RL32("DIVX")
1976 || c->codec_tag == AV_RL32("XVID")*/
1977 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1982 int av_find_stream_info(AVFormatContext *ic)
1984 int i, count, ret, read_size, j;
1986 AVPacket pkt1, *pkt;
1987 int64_t last_dts[MAX_STREAMS];
1988 int duration_count[MAX_STREAMS]={0};
1989 double (*duration_error)[MAX_STD_TIMEBASES];
1990 int64_t old_offset = url_ftell(ic->pb);
1991 int64_t codec_info_duration[MAX_STREAMS]={0};
1992 int codec_info_nb_frames[MAX_STREAMS]={0};
1994 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1995 if (!duration_error) return AVERROR(ENOMEM);
1997 for(i=0;i<ic->nb_streams;i++) {
1998 st = ic->streams[i];
1999 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2000 /* if(!st->time_base.num)
2002 if(!st->codec->time_base.num)
2003 st->codec->time_base= st->time_base;
2005 //only for the split stuff
2007 st->parser = av_parser_init(st->codec->codec_id);
2008 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2009 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2014 for(i=0;i<MAX_STREAMS;i++){
2015 last_dts[i]= AV_NOPTS_VALUE;
2021 /* check if one codec still needs to be handled */
2022 for(i=0;i<ic->nb_streams;i++) {
2023 st = ic->streams[i];
2024 if (!has_codec_parameters(st->codec))
2026 /* variable fps and no guess at the real fps */
2027 if( tb_unreliable(st->codec)
2028 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2030 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2032 if(st->first_dts == AV_NOPTS_VALUE)
2035 if (i == ic->nb_streams) {
2036 /* NOTE: if the format has no header, then we need to read
2037 some packets to get most of the streams, so we cannot
2039 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2040 /* if we found the info for all the codecs, we can stop */
2045 /* we did not get all the codec info, but we read too much data */
2046 if (read_size >= MAX_READ_SIZE) {
2051 /* NOTE: a new stream can be added there if no header in file
2052 (AVFMTCTX_NOHEADER) */
2053 ret = av_read_frame_internal(ic, &pkt1);
2054 if(ret == AVERROR(EAGAIN))
2058 ret = -1; /* we could not have all the codec parameters before EOF */
2059 for(i=0;i<ic->nb_streams;i++) {
2060 st = ic->streams[i];
2061 if (!has_codec_parameters(st->codec)){
2063 avcodec_string(buf, sizeof(buf), st->codec, 0);
2064 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2072 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2073 if(av_dup_packet(pkt) < 0) {
2074 av_free(duration_error);
2075 return AVERROR(ENOMEM);
2078 read_size += pkt->size;
2080 st = ic->streams[pkt->stream_index];
2081 if(codec_info_nb_frames[st->index]>1)
2082 codec_info_duration[st->index] += pkt->duration;
2083 if (pkt->duration != 0)
2084 codec_info_nb_frames[st->index]++;
2087 int index= pkt->stream_index;
2088 int64_t last= last_dts[index];
2089 int64_t duration= pkt->dts - last;
2091 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2092 double dur= duration * av_q2d(st->time_base);
2094 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2095 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2096 if(duration_count[index] < 2)
2097 memset(duration_error[index], 0, sizeof(*duration_error));
2098 for(i=1; i<MAX_STD_TIMEBASES; i++){
2099 int framerate= get_std_framerate(i);
2100 int ticks= lrintf(dur*framerate/(1001*12));
2101 double error= dur - ticks*1001*12/(double)framerate;
2102 duration_error[index][i] += error*error;
2104 duration_count[index]++;
2106 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2107 last_dts[pkt->stream_index]= pkt->dts;
2109 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2110 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2112 st->codec->extradata_size= i;
2113 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2114 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2115 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2119 /* if still no information, we try to open the codec and to
2120 decompress the frame. We try to avoid that in most cases as
2121 it takes longer and uses more memory. For MPEG-4, we need to
2122 decompress for QuickTime. */
2123 if (!has_codec_parameters(st->codec) /*&&
2124 (st->codec->codec_id == CODEC_ID_FLV1 ||
2125 st->codec->codec_id == CODEC_ID_H264 ||
2126 st->codec->codec_id == CODEC_ID_H263 ||
2127 st->codec->codec_id == CODEC_ID_H261 ||
2128 st->codec->codec_id == CODEC_ID_VORBIS ||
2129 st->codec->codec_id == CODEC_ID_MJPEG ||
2130 st->codec->codec_id == CODEC_ID_PNG ||
2131 st->codec->codec_id == CODEC_ID_PAM ||
2132 st->codec->codec_id == CODEC_ID_PGM ||
2133 st->codec->codec_id == CODEC_ID_PGMYUV ||
2134 st->codec->codec_id == CODEC_ID_PBM ||
2135 st->codec->codec_id == CODEC_ID_PPM ||
2136 st->codec->codec_id == CODEC_ID_SHORTEN ||
2137 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2138 try_decode_frame(st, pkt->data, pkt->size);
2140 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2146 // close codecs which were opened in try_decode_frame()
2147 for(i=0;i<ic->nb_streams;i++) {
2148 st = ic->streams[i];
2149 if(st->codec->codec)
2150 avcodec_close(st->codec);
2152 for(i=0;i<ic->nb_streams;i++) {
2153 st = ic->streams[i];
2154 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2155 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2156 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2158 if(duration_count[i]
2159 && tb_unreliable(st->codec) /*&&
2160 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2161 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2162 double best_error= 2*av_q2d(st->time_base);
2163 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2165 for(j=1; j<MAX_STD_TIMEBASES; j++){
2166 double error= duration_error[i][j] * get_std_framerate(j);
2167 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2168 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2169 if(error < best_error){
2171 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2176 if (!st->r_frame_rate.num){
2177 if( st->codec->time_base.den * (int64_t)st->time_base.num
2178 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2179 st->r_frame_rate.num = st->codec->time_base.den;
2180 st->r_frame_rate.den = st->codec->time_base.num;
2182 st->r_frame_rate.num = st->time_base.den;
2183 st->r_frame_rate.den = st->time_base.num;
2186 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2187 if(!st->codec->bits_per_coded_sample)
2188 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2192 av_estimate_timings(ic, old_offset);
2194 compute_chapters_end(ic);
2197 /* correct DTS for B-frame streams with no timestamps */
2198 for(i=0;i<ic->nb_streams;i++) {
2199 st = ic->streams[i];
2200 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2202 ppktl = &ic->packet_buffer;
2204 if(ppkt1->stream_index != i)
2206 if(ppkt1->pkt->dts < 0)
2208 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2210 ppkt1->pkt->dts -= delta;
2215 st->cur_dts -= delta;
2221 av_free(duration_error);
2226 /*******************************************************/
2228 int av_read_play(AVFormatContext *s)
2230 if (s->iformat->read_play)
2231 return s->iformat->read_play(s);
2233 return av_url_read_fpause(s->pb, 0);
2234 return AVERROR(ENOSYS);
2237 int av_read_pause(AVFormatContext *s)
2239 if (s->iformat->read_pause)
2240 return s->iformat->read_pause(s);
2242 return av_url_read_fpause(s->pb, 1);
2243 return AVERROR(ENOSYS);
2246 void av_close_input_stream(AVFormatContext *s)
2251 /* free previous packet */
2252 if (s->cur_st && s->cur_st->parser)
2253 av_free_packet(&s->cur_pkt);
2255 if (s->iformat->read_close)
2256 s->iformat->read_close(s);
2257 for(i=0;i<s->nb_streams;i++) {
2258 /* free all data in a stream component */
2261 av_parser_close(st->parser);
2263 av_metadata_free(&st->metadata);
2264 av_free(st->index_entries);
2265 av_free(st->codec->extradata);
2267 av_free(st->filename);
2268 av_free(st->priv_data);
2271 for(i=s->nb_programs-1; i>=0; i--) {
2272 av_freep(&s->programs[i]->provider_name);
2273 av_freep(&s->programs[i]->name);
2274 av_metadata_free(&s->programs[i]->metadata);
2275 av_freep(&s->programs[i]->stream_index);
2276 av_freep(&s->programs[i]);
2278 av_freep(&s->programs);
2279 flush_packet_queue(s);
2280 av_freep(&s->priv_data);
2281 while(s->nb_chapters--) {
2282 av_free(s->chapters[s->nb_chapters]->title);
2283 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2284 av_free(s->chapters[s->nb_chapters]);
2286 av_freep(&s->chapters);
2287 av_metadata_free(&s->metadata);
2291 void av_close_input_file(AVFormatContext *s)
2293 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2294 av_close_input_stream(s);
2299 AVStream *av_new_stream(AVFormatContext *s, int id)
2304 if (s->nb_streams >= MAX_STREAMS)
2307 st = av_mallocz(sizeof(AVStream));
2311 st->codec= avcodec_alloc_context();
2313 /* no default bitrate if decoding */
2314 st->codec->bit_rate = 0;
2316 st->index = s->nb_streams;
2318 st->start_time = AV_NOPTS_VALUE;
2319 st->duration = AV_NOPTS_VALUE;
2320 /* we set the current DTS to 0 so that formats without any timestamps
2321 but durations get some timestamps, formats with some unknown
2322 timestamps have their first few packets buffered and the
2323 timestamps corrected before they are returned to the user */
2325 st->first_dts = AV_NOPTS_VALUE;
2327 /* default pts setting is MPEG-like */
2328 av_set_pts_info(st, 33, 1, 90000);
2329 st->last_IP_pts = AV_NOPTS_VALUE;
2330 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2331 st->pts_buffer[i]= AV_NOPTS_VALUE;
2333 st->sample_aspect_ratio = (AVRational){0,1};
2335 s->streams[s->nb_streams++] = st;
2339 AVProgram *av_new_program(AVFormatContext *ac, int id)
2341 AVProgram *program=NULL;
2345 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2348 for(i=0; i<ac->nb_programs; i++)
2349 if(ac->programs[i]->id == id)
2350 program = ac->programs[i];
2353 program = av_mallocz(sizeof(AVProgram));
2356 dynarray_add(&ac->programs, &ac->nb_programs, program);
2357 program->discard = AVDISCARD_NONE;
2364 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2366 assert(!provider_name == !name);
2368 av_free(program->provider_name);
2369 av_free(program-> name);
2370 program->provider_name = av_strdup(provider_name);
2371 program-> name = av_strdup( name);
2375 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2377 AVChapter *chapter = NULL;
2380 for(i=0; i<s->nb_chapters; i++)
2381 if(s->chapters[i]->id == id)
2382 chapter = s->chapters[i];
2385 chapter= av_mallocz(sizeof(AVChapter));
2388 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2390 av_free(chapter->title);
2391 chapter->title = av_strdup(title);
2393 chapter->time_base= time_base;
2394 chapter->start = start;
2400 /************************************************************/
2401 /* output media file */
2403 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2407 if (s->oformat->priv_data_size > 0) {
2408 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2410 return AVERROR(ENOMEM);
2412 s->priv_data = NULL;
2414 if (s->oformat->set_parameters) {
2415 ret = s->oformat->set_parameters(s, ap);
2422 int av_write_header(AVFormatContext *s)
2427 // some sanity checks
2428 for(i=0;i<s->nb_streams;i++) {
2431 switch (st->codec->codec_type) {
2432 case CODEC_TYPE_AUDIO:
2433 if(st->codec->sample_rate<=0){
2434 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2437 if(!st->codec->block_align)
2438 st->codec->block_align = st->codec->channels *
2439 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2441 case CODEC_TYPE_VIDEO:
2442 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2443 av_log(s, AV_LOG_ERROR, "time base not set\n");
2446 if(st->codec->width<=0 || st->codec->height<=0){
2447 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2450 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2451 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2457 if(s->oformat->codec_tag){
2458 if(st->codec->codec_tag){
2460 //check that tag + id is in the table
2461 //if neither is in the table -> OK
2462 //if tag is in the table with another id -> FAIL
2463 //if id is in the table with another tag -> FAIL unless strict < ?
2465 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2469 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2470 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2472 return AVERROR(ENOMEM);
2475 #if LIBAVFORMAT_VERSION_MAJOR < 53
2476 ff_metadata_mux_compat(s);
2479 if(s->oformat->write_header){
2480 ret = s->oformat->write_header(s);
2485 /* init PTS generation */
2486 for(i=0;i<s->nb_streams;i++) {
2487 int64_t den = AV_NOPTS_VALUE;
2490 switch (st->codec->codec_type) {
2491 case CODEC_TYPE_AUDIO:
2492 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2494 case CODEC_TYPE_VIDEO:
2495 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2500 if (den != AV_NOPTS_VALUE) {
2502 return AVERROR_INVALIDDATA;
2503 av_frac_init(&st->pts, 0, 0, den);
2509 //FIXME merge with compute_pkt_fields
2510 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2511 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2512 int num, den, frame_size, i;
2514 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2516 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2519 /* duration field */
2520 if (pkt->duration == 0) {
2521 compute_frame_duration(&num, &den, st, NULL, pkt);
2523 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2527 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2530 //XXX/FIXME this is a temporary hack until all encoders output pts
2531 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2533 // pkt->pts= st->cur_dts;
2534 pkt->pts= st->pts.val;
2537 //calculate dts from pts
2538 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2539 st->pts_buffer[0]= pkt->pts;
2540 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2541 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2542 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2543 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2545 pkt->dts= st->pts_buffer[0];
2548 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2549 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2552 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2553 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2557 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2558 st->cur_dts= pkt->dts;
2559 st->pts.val= pkt->dts;
2562 switch (st->codec->codec_type) {
2563 case CODEC_TYPE_AUDIO:
2564 frame_size = get_audio_frame_size(st->codec, pkt->size);
2566 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2567 likely equal to the encoder delay, but it would be better if we
2568 had the real timestamps from the encoder */
2569 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2570 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2573 case CODEC_TYPE_VIDEO:
2574 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2582 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2584 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2586 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2589 ret= s->oformat->write_packet(s, pkt);
2591 ret= url_ferror(s->pb);
2595 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2596 AVPacketList *pktl, **next_point, *this_pktl;
2598 int streams[MAX_STREAMS];
2601 AVStream *st= s->streams[ pkt->stream_index];
2603 // assert(pkt->destruct != av_destruct_packet); //FIXME
2605 this_pktl = av_mallocz(sizeof(AVPacketList));
2606 this_pktl->pkt= *pkt;
2607 if(pkt->destruct == av_destruct_packet)
2608 pkt->destruct= NULL; // not shared -> must keep original from being freed
2610 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2612 next_point = &s->packet_buffer;
2614 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2615 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2616 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2617 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2619 next_point= &(*next_point)->next;
2621 this_pktl->next= *next_point;
2622 *next_point= this_pktl;
2625 memset(streams, 0, sizeof(streams));
2626 pktl= s->packet_buffer;
2628 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2629 if(streams[ pktl->pkt.stream_index ] == 0)
2631 streams[ pktl->pkt.stream_index ]++;
2635 if(stream_count && (s->nb_streams == stream_count || flush)){
2636 pktl= s->packet_buffer;
2639 s->packet_buffer= pktl->next;
2643 av_init_packet(out);
2649 * Interleaves an AVPacket correctly so it can be muxed.
2650 * @param out the interleaved packet will be output here
2651 * @param in the input packet
2652 * @param flush 1 if no further packets are available as input and all
2653 * remaining packets should be output
2654 * @return 1 if a packet was output, 0 if no packet could be output,
2655 * < 0 if an error occurred
2657 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2658 if(s->oformat->interleave_packet)
2659 return s->oformat->interleave_packet(s, out, in, flush);
2661 return av_interleave_packet_per_dts(s, out, in, flush);
2664 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2665 AVStream *st= s->streams[ pkt->stream_index];
2667 //FIXME/XXX/HACK drop zero sized packets
2668 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2671 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2672 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2675 if(pkt->dts == AV_NOPTS_VALUE)
2680 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2681 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2684 ret= s->oformat->write_packet(s, &opkt);
2686 av_free_packet(&opkt);
2691 if(url_ferror(s->pb))
2692 return url_ferror(s->pb);
2696 int av_write_trailer(AVFormatContext *s)
2702 ret= av_interleave_packet(s, &pkt, NULL, 1);
2703 if(ret<0) //FIXME cleanup needed for ret<0 ?
2708 ret= s->oformat->write_packet(s, &pkt);
2710 av_free_packet(&pkt);
2714 if(url_ferror(s->pb))
2718 if(s->oformat->write_trailer)
2719 ret = s->oformat->write_trailer(s);
2722 ret=url_ferror(s->pb);
2723 for(i=0;i<s->nb_streams;i++)
2724 av_freep(&s->streams[i]->priv_data);
2725 av_freep(&s->priv_data);
2729 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2732 AVProgram *program=NULL;
2735 for(i=0; i<ac->nb_programs; i++){
2736 if(ac->programs[i]->id != progid)
2738 program = ac->programs[i];
2739 for(j=0; j<program->nb_stream_indexes; j++)
2740 if(program->stream_index[j] == idx)
2743 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2746 program->stream_index = tmp;
2747 program->stream_index[program->nb_stream_indexes++] = idx;
2752 /* "user interface" functions */
2753 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2756 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2757 AVStream *st = ic->streams[i];
2758 int g = av_gcd(st->time_base.num, st->time_base.den);
2759 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2760 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2761 /* the pid is an important information, so we display it */
2762 /* XXX: add a generic system */
2763 if (flags & AVFMT_SHOW_IDS)
2764 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2765 if (strlen(st->language) > 0)
2766 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2767 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2768 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2769 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2770 if(st->r_frame_rate.den && st->r_frame_rate.num)
2771 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2772 /* else if(st->time_base.den && st->time_base.num)
2773 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2775 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2777 av_log(NULL, AV_LOG_INFO, "\n");
2780 void dump_format(AVFormatContext *ic,
2787 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2788 is_output ? "Output" : "Input",
2790 is_output ? ic->oformat->name : ic->iformat->name,
2791 is_output ? "to" : "from", url);
2793 av_log(NULL, AV_LOG_INFO, " Duration: ");
2794 if (ic->duration != AV_NOPTS_VALUE) {
2795 int hours, mins, secs, us;
2796 secs = ic->duration / AV_TIME_BASE;
2797 us = ic->duration % AV_TIME_BASE;
2802 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2803 (100 * us) / AV_TIME_BASE);
2805 av_log(NULL, AV_LOG_INFO, "N/A");
2807 if (ic->start_time != AV_NOPTS_VALUE) {
2809 av_log(NULL, AV_LOG_INFO, ", start: ");
2810 secs = ic->start_time / AV_TIME_BASE;
2811 us = ic->start_time % AV_TIME_BASE;
2812 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2813 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2815 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2817 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2819 av_log(NULL, AV_LOG_INFO, "N/A");
2821 av_log(NULL, AV_LOG_INFO, "\n");
2823 if(ic->nb_programs) {
2825 for(j=0; j<ic->nb_programs; j++) {
2826 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2827 ic->programs[j]->name ? ic->programs[j]->name : "");
2828 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2829 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2832 for(i=0;i<ic->nb_streams;i++)
2833 dump_stream_format(ic, i, index, is_output);
2836 #if LIBAVFORMAT_VERSION_MAJOR < 53
2837 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2839 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2842 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2844 AVRational frame_rate;
2845 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2846 *frame_rate_num= frame_rate.num;
2847 *frame_rate_den= frame_rate.den;
2852 int64_t av_gettime(void)
2855 gettimeofday(&tv,NULL);
2856 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2859 int64_t parse_date(const char *datestr, int duration)
2865 static const char * const date_fmt[] = {
2869 static const char * const time_fmt[] = {
2879 time_t now = time(0);
2881 len = strlen(datestr);
2883 lastch = datestr[len - 1];
2886 is_utc = (lastch == 'z' || lastch == 'Z');
2888 memset(&dt, 0, sizeof(dt));
2893 /* parse the year-month-day part */
2894 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2895 q = small_strptime(p, date_fmt[i], &dt);
2901 /* if the year-month-day part is missing, then take the
2902 * current year-month-day time */
2907 dt = *localtime(&now);
2909 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2914 if (*p == 'T' || *p == 't' || *p == ' ')
2917 /* parse the hour-minute-second part */
2918 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2919 q = small_strptime(p, time_fmt[i], &dt);
2925 /* parse datestr as a duration */
2930 /* parse datestr as HH:MM:SS */
2931 q = small_strptime(p, time_fmt[0], &dt);
2933 /* parse datestr as S+ */
2934 dt.tm_sec = strtol(p, (char **)&q, 10);
2936 /* the parsing didn't succeed */
2943 /* Now we have all the fields that we can get */
2949 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2951 dt.tm_isdst = -1; /* unknown */
2961 /* parse the .m... part */
2965 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2968 val += n * (*q - '0');
2972 return negative ? -t : t;
2975 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2985 while (*p != '\0' && *p != '=' && *p != '&') {
2986 if ((q - tag) < sizeof(tag) - 1)
2994 while (*p != '&' && *p != '\0') {
2995 if ((q - arg) < arg_size - 1) {
3005 if (!strcmp(tag, tag1))
3014 int av_get_frame_filename(char *buf, int buf_size,
3015 const char *path, int number)
3018 char *q, buf1[20], c;
3019 int nd, len, percentd_found;
3031 while (isdigit(*p)) {
3032 nd = nd * 10 + *p++ - '0';
3035 } while (isdigit(c));
3044 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3046 if ((q - buf + len) > buf_size - 1)
3048 memcpy(q, buf1, len);
3056 if ((q - buf) < buf_size - 1)
3060 if (!percentd_found)
3069 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3072 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3074 for(i=0;i<size;i+=16) {
3081 PRINT(" %02x", buf[i+j]);
3086 for(j=0;j<len;j++) {
3088 if (c < ' ' || c > '~')
3097 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3099 hex_dump_internal(NULL, f, 0, buf, size);
3102 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3104 hex_dump_internal(avcl, NULL, level, buf, size);
3107 //FIXME needs to know the time_base
3108 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3110 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3111 PRINT("stream #%d:\n", pkt->stream_index);
3112 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3113 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3114 /* DTS is _always_ valid after av_read_frame() */
3116 if (pkt->dts == AV_NOPTS_VALUE)
3119 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3120 /* PTS may not be known if B-frames are present. */
3122 if (pkt->pts == AV_NOPTS_VALUE)
3125 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3127 PRINT(" size=%d\n", pkt->size);
3130 av_hex_dump(f, pkt->data, pkt->size);
3133 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3135 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3138 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3140 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3143 void url_split(char *proto, int proto_size,
3144 char *authorization, int authorization_size,
3145 char *hostname, int hostname_size,
3147 char *path, int path_size,
3150 const char *p, *ls, *at, *col, *brk;
3152 if (port_ptr) *port_ptr = -1;
3153 if (proto_size > 0) proto[0] = 0;
3154 if (authorization_size > 0) authorization[0] = 0;
3155 if (hostname_size > 0) hostname[0] = 0;
3156 if (path_size > 0) path[0] = 0;
3158 /* parse protocol */
3159 if ((p = strchr(url, ':'))) {
3160 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3165 /* no protocol means plain filename */
3166 av_strlcpy(path, url, path_size);
3170 /* separate path from hostname */
3171 ls = strchr(p, '/');
3173 ls = strchr(p, '?');
3175 av_strlcpy(path, ls, path_size);
3177 ls = &p[strlen(p)]; // XXX
3179 /* the rest is hostname, use that to parse auth/port */
3181 /* authorization (user[:pass]@hostname) */
3182 if ((at = strchr(p, '@')) && at < ls) {
3183 av_strlcpy(authorization, p,
3184 FFMIN(authorization_size, at + 1 - p));
3185 p = at + 1; /* skip '@' */
3188 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3190 av_strlcpy(hostname, p + 1,
3191 FFMIN(hostname_size, brk - p));
3192 if (brk[1] == ':' && port_ptr)
3193 *port_ptr = atoi(brk + 2);
3194 } else if ((col = strchr(p, ':')) && col < ls) {
3195 av_strlcpy(hostname, p,
3196 FFMIN(col + 1 - p, hostname_size));
3197 if (port_ptr) *port_ptr = atoi(col + 1);
3199 av_strlcpy(hostname, p,
3200 FFMIN(ls + 1 - p, hostname_size));
3204 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3207 static const char hex_table[16] = { '0', '1', '2', '3',
3210 'C', 'D', 'E', 'F' };
3212 for(i = 0; i < s; i++) {
3213 buff[i * 2] = hex_table[src[i] >> 4];
3214 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3220 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3221 int pts_num, int pts_den)
3223 unsigned int gcd= av_gcd(pts_num, pts_den);
3224 s->pts_wrap_bits = pts_wrap_bits;
3225 s->time_base.num = pts_num/gcd;
3226 s->time_base.den = pts_den/gcd;
3229 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);