2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
87 } else if (num >= den) {
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
115 while (*p != NULL) p = &(*p)->next;
120 void av_register_output_format(AVOutputFormat *format)
124 while (*p != NULL) p = &(*p)->next;
129 int match_ext(const char *filename, const char *extensions)
137 ext = strrchr(filename, '.');
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
146 if (!strcasecmp(ext1, ext))
156 static int match_format(const char *name, const char *names)
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
188 /* Find the proper file type. */
192 while (fmt != NULL) {
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
202 if (score > score_max) {
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
259 /* memory handling */
261 void av_destruct_packet(AVPacket *pkt)
264 pkt->data = NULL; pkt->size = 0;
267 void av_init_packet(AVPacket *pkt)
269 pkt->pts = AV_NOPTS_VALUE;
270 pkt->dts = AV_NOPTS_VALUE;
273 pkt->convergence_duration = 0;
275 pkt->stream_index = 0;
276 pkt->destruct= av_destruct_packet_nofree;
279 int av_new_packet(AVPacket *pkt, int size)
282 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
283 return AVERROR(ENOMEM);
284 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
286 return AVERROR(ENOMEM);
287 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
292 pkt->destruct = av_destruct_packet;
296 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
298 int ret= av_new_packet(pkt, size);
303 pkt->pos= url_ftell(s);
305 ret= get_buffer(s, pkt->data, size);
314 int av_dup_packet(AVPacket *pkt)
316 if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
318 /* We duplicate the packet and don't forget to add the padding again. */
319 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
320 return AVERROR(ENOMEM);
321 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
323 return AVERROR(ENOMEM);
325 memcpy(data, pkt->data, pkt->size);
326 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
328 pkt->destruct = av_destruct_packet;
333 int av_filename_number_test(const char *filename)
336 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
339 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
341 AVInputFormat *fmt1, *fmt;
345 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
346 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
349 if (fmt1->read_probe) {
350 score = fmt1->read_probe(pd);
351 } else if (fmt1->extensions) {
352 if (match_ext(pd->filename, fmt1->extensions)) {
356 if (score > *score_max) {
359 }else if (score == *score_max)
365 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
367 return av_probe_input_format2(pd, is_opened, &score);
370 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
373 fmt = av_probe_input_format2(pd, 1, &score);
376 if (!strcmp(fmt->name, "mp3")) {
377 st->codec->codec_id = CODEC_ID_MP3;
378 st->codec->codec_type = CODEC_TYPE_AUDIO;
379 } else if (!strcmp(fmt->name, "ac3")) {
380 st->codec->codec_id = CODEC_ID_AC3;
381 st->codec->codec_type = CODEC_TYPE_AUDIO;
382 } else if (!strcmp(fmt->name, "mpegvideo")) {
383 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
384 st->codec->codec_type = CODEC_TYPE_VIDEO;
385 } else if (!strcmp(fmt->name, "m4v")) {
386 st->codec->codec_id = CODEC_ID_MPEG4;
387 st->codec->codec_type = CODEC_TYPE_VIDEO;
388 } else if (!strcmp(fmt->name, "h264")) {
389 st->codec->codec_id = CODEC_ID_H264;
390 st->codec->codec_type = CODEC_TYPE_VIDEO;
396 /************************************************************/
397 /* input media file */
400 * Open a media file from an IO stream. 'fmt' must be specified.
402 int av_open_input_stream(AVFormatContext **ic_ptr,
403 ByteIOContext *pb, const char *filename,
404 AVInputFormat *fmt, AVFormatParameters *ap)
408 AVFormatParameters default_ap;
412 memset(ap, 0, sizeof(default_ap));
415 if(!ap->prealloced_context)
416 ic = avformat_alloc_context();
420 err = AVERROR(ENOMEM);
425 ic->duration = AV_NOPTS_VALUE;
426 ic->start_time = AV_NOPTS_VALUE;
427 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
429 /* allocate private data */
430 if (fmt->priv_data_size > 0) {
431 ic->priv_data = av_mallocz(fmt->priv_data_size);
432 if (!ic->priv_data) {
433 err = AVERROR(ENOMEM);
437 ic->priv_data = NULL;
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic, ap);
446 if (pb && !ic->data_offset)
447 ic->data_offset = url_ftell(ic->pb);
449 #if LIBAVFORMAT_VERSION_MAJOR < 53
450 ff_metadata_demux_compat(ic);
458 av_freep(&ic->priv_data);
459 for(i=0;i<ic->nb_streams;i++) {
460 AVStream *st = ic->streams[i];
462 av_free(st->priv_data);
463 av_free(st->codec->extradata);
473 /** size of probe buffer, for guessing file type from file contents */
474 #define PROBE_BUF_MIN 2048
475 #define PROBE_BUF_MAX (1<<20)
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
480 AVFormatParameters *ap)
483 AVProbeData probe_data, *pd = &probe_data;
484 ByteIOContext *pb = NULL;
488 pd->filename = filename;
493 /* guess format if no file can be opened */
494 fmt = av_probe_input_format(pd, 0);
497 /* Do not open file if the format does not need it. XXX: specific
498 hack needed to handle RTSP/TCP */
499 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
500 /* if no file needed do not try to open one */
501 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
505 url_setbufsize(pb, buf_size);
508 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
509 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
510 /* read probe data */
511 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
512 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
513 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
514 if (url_fseek(pb, 0, SEEK_SET) < 0) {
516 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
522 /* guess file format */
523 fmt = av_probe_input_format2(pd, 1, &score);
528 /* if still no format found, error */
534 /* check filename in case an image number is expected */
535 if (fmt->flags & AVFMT_NEEDNUMBER) {
536 if (!av_filename_number_test(filename)) {
537 err = AVERROR_NUMEXPECTED;
541 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
554 /*******************************************************/
556 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
557 AVPacketList **plast_pktl){
558 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
563 (*plast_pktl)->next = pktl;
565 *packet_buffer = pktl;
567 /* add the packet in the buffered packet list */
573 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
579 AVPacketList *pktl = s->raw_packet_buffer;
583 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
584 s->raw_packet_buffer = pktl->next;
591 ret= s->iformat->read_packet(s, pkt);
594 st= s->streams[pkt->stream_index];
596 switch(st->codec->codec_type){
597 case CODEC_TYPE_VIDEO:
598 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
600 case CODEC_TYPE_AUDIO:
601 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
603 case CODEC_TYPE_SUBTITLE:
604 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
608 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
611 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
613 if(st->codec->codec_id == CODEC_ID_PROBE){
614 AVProbeData *pd = &st->probe_data;
616 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
617 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
618 pd->buf_size += pkt->size;
619 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
621 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
622 set_codec_from_probe_data(st, pd, 1);
623 if(st->codec->codec_id != CODEC_ID_PROBE){
632 /**********************************************************/
635 * Get the number of samples of an audio frame. Return -1 on error.
637 static int get_audio_frame_size(AVCodecContext *enc, int size)
641 if(enc->codec_id == CODEC_ID_VORBIS)
644 if (enc->frame_size <= 1) {
645 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
647 if (bits_per_sample) {
648 if (enc->channels == 0)
650 frame_size = (size << 3) / (bits_per_sample * enc->channels);
652 /* used for example by ADPCM codecs */
653 if (enc->bit_rate == 0)
655 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
658 frame_size = enc->frame_size;
665 * Return the frame duration in seconds. Return 0 if not available.
667 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
668 AVCodecParserContext *pc, AVPacket *pkt)
674 switch(st->codec->codec_type) {
675 case CODEC_TYPE_VIDEO:
676 if(st->time_base.num*1000LL > st->time_base.den){
677 *pnum = st->time_base.num;
678 *pden = st->time_base.den;
679 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
680 *pnum = st->codec->time_base.num;
681 *pden = st->codec->time_base.den;
682 if (pc && pc->repeat_pict) {
684 *pnum = (*pnum) * (2 + pc->repeat_pict);
688 case CODEC_TYPE_AUDIO:
689 frame_size = get_audio_frame_size(st->codec, pkt->size);
693 *pden = st->codec->sample_rate;
700 static int is_intra_only(AVCodecContext *enc){
701 if(enc->codec_type == CODEC_TYPE_AUDIO){
703 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
704 switch(enc->codec_id){
706 case CODEC_ID_MJPEGB:
708 case CODEC_ID_RAWVIDEO:
709 case CODEC_ID_DVVIDEO:
710 case CODEC_ID_HUFFYUV:
711 case CODEC_ID_FFVHUFF:
716 case CODEC_ID_JPEG2000:
724 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
725 int64_t dts, int64_t pts)
727 AVStream *st= s->streams[stream_index];
728 AVPacketList *pktl= s->packet_buffer;
730 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
733 st->first_dts= dts - st->cur_dts;
736 for(; pktl; pktl= pktl->next){
737 if(pktl->pkt.stream_index != stream_index)
739 //FIXME think more about this check
740 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
741 pktl->pkt.pts += st->first_dts;
743 if(pktl->pkt.dts != AV_NOPTS_VALUE)
744 pktl->pkt.dts += st->first_dts;
746 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
747 st->start_time= pktl->pkt.pts;
749 if (st->start_time == AV_NOPTS_VALUE)
750 st->start_time = pts;
753 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
755 AVPacketList *pktl= s->packet_buffer;
758 if(st->first_dts != AV_NOPTS_VALUE){
759 cur_dts= st->first_dts;
760 for(; pktl; pktl= pktl->next){
761 if(pktl->pkt.stream_index == pkt->stream_index){
762 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
764 cur_dts -= pkt->duration;
767 pktl= s->packet_buffer;
768 st->first_dts = cur_dts;
769 }else if(st->cur_dts)
772 for(; pktl; pktl= pktl->next){
773 if(pktl->pkt.stream_index != pkt->stream_index)
775 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
776 && !pktl->pkt.duration){
777 pktl->pkt.dts= cur_dts;
778 if(!st->codec->has_b_frames)
779 pktl->pkt.pts= cur_dts;
780 cur_dts += pkt->duration;
781 pktl->pkt.duration= pkt->duration;
785 if(st->first_dts == AV_NOPTS_VALUE)
786 st->cur_dts= cur_dts;
789 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
790 AVCodecParserContext *pc, AVPacket *pkt)
792 int num, den, presentation_delayed, delay, i;
795 /* do we have a video B-frame ? */
796 delay= st->codec->has_b_frames;
797 presentation_delayed = 0;
798 /* XXX: need has_b_frame, but cannot get it if the codec is
801 pc && pc->pict_type != FF_B_TYPE)
802 presentation_delayed = 1;
804 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
805 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
806 pkt->dts -= 1LL<<st->pts_wrap_bits;
809 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
810 // we take the conservative approach and discard both
811 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
812 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
813 av_log(s, AV_LOG_ERROR, "invalid dts/pts combination\n");
814 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
817 if (pkt->duration == 0) {
818 compute_frame_duration(&num, &den, st, pc, pkt);
820 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
822 if(pkt->duration != 0 && s->packet_buffer)
823 update_initial_durations(s, st, pkt);
827 /* correct timestamps with byte offset if demuxers only have timestamps
828 on packet boundaries */
829 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
830 /* this will estimate bitrate based on this frame's duration and size */
831 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
832 if(pkt->pts != AV_NOPTS_VALUE)
834 if(pkt->dts != AV_NOPTS_VALUE)
838 /* This may be redundant, but it should not hurt. */
839 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
840 presentation_delayed = 1;
842 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
843 /* interpolate PTS and DTS if they are not present */
844 if(delay==0 || (delay==1 && pc)){
845 if (presentation_delayed) {
846 /* DTS = decompression timestamp */
847 /* PTS = presentation timestamp */
848 if (pkt->dts == AV_NOPTS_VALUE)
849 pkt->dts = st->last_IP_pts;
850 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
851 if (pkt->dts == AV_NOPTS_VALUE)
852 pkt->dts = st->cur_dts;
854 /* this is tricky: the dts must be incremented by the duration
855 of the frame we are displaying, i.e. the last I- or P-frame */
856 if (st->last_IP_duration == 0)
857 st->last_IP_duration = pkt->duration;
858 if(pkt->dts != AV_NOPTS_VALUE)
859 st->cur_dts = pkt->dts + st->last_IP_duration;
860 st->last_IP_duration = pkt->duration;
861 st->last_IP_pts= pkt->pts;
862 /* cannot compute PTS if not present (we can compute it only
863 by knowing the future */
864 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
865 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
866 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
867 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
868 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
869 pkt->pts += pkt->duration;
870 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
874 /* presentation is not delayed : PTS and DTS are the same */
875 if(pkt->pts == AV_NOPTS_VALUE)
877 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
878 if(pkt->pts == AV_NOPTS_VALUE)
879 pkt->pts = st->cur_dts;
881 if(pkt->pts != AV_NOPTS_VALUE)
882 st->cur_dts = pkt->pts + pkt->duration;
886 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
887 st->pts_buffer[0]= pkt->pts;
888 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
889 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
890 if(pkt->dts == AV_NOPTS_VALUE)
891 pkt->dts= st->pts_buffer[0];
893 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
895 if(pkt->dts > st->cur_dts)
896 st->cur_dts = pkt->dts;
899 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
902 if(is_intra_only(st->codec))
903 pkt->flags |= PKT_FLAG_KEY;
906 /* keyframe computation */
907 if (pc->key_frame == 1)
908 pkt->flags |= PKT_FLAG_KEY;
909 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
910 pkt->flags |= PKT_FLAG_KEY;
914 void av_destruct_packet_nofree(AVPacket *pkt)
916 pkt->data = NULL; pkt->size = 0;
919 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
927 /* select current input stream component */
930 if (!st->need_parsing || !st->parser) {
931 /* no parsing needed: we just output the packet as is */
932 /* raw data support */
933 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
934 compute_pkt_fields(s, st, NULL, pkt);
937 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
938 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
939 st->cur_ptr, st->cur_len,
940 st->cur_pkt.pts, st->cur_pkt.dts);
941 st->cur_pkt.pts = AV_NOPTS_VALUE;
942 st->cur_pkt.dts = AV_NOPTS_VALUE;
943 /* increment read pointer */
947 /* return packet if any */
949 pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close.
952 pkt->stream_index = st->index;
953 pkt->pts = st->parser->pts;
954 pkt->dts = st->parser->dts;
955 pkt->destruct = av_destruct_packet_nofree;
956 compute_pkt_fields(s, st, st->parser, pkt);
958 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
959 ff_reduce_index(s, st->index);
960 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
961 0, 0, AVINDEX_KEYFRAME);
968 av_free_packet(&st->cur_pkt);
973 /* read next packet */
974 ret = av_read_packet(s, &cur_pkt);
976 if (ret == AVERROR(EAGAIN))
978 /* return the last frames, if any */
979 for(i = 0; i < s->nb_streams; i++) {
981 if (st->parser && st->need_parsing) {
982 av_parser_parse(st->parser, st->codec,
983 &pkt->data, &pkt->size,
985 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
990 /* no more packets: really terminate parsing */
993 st = s->streams[cur_pkt.stream_index];
994 st->cur_pkt= cur_pkt;
996 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
997 st->cur_pkt.dts != AV_NOPTS_VALUE &&
998 st->cur_pkt.pts < st->cur_pkt.dts){
999 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1000 st->cur_pkt.stream_index,
1004 // av_free_packet(&st->cur_pkt);
1008 if(s->debug & FF_FDEBUG_TS)
1009 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1010 st->cur_pkt.stream_index,
1017 st->cur_ptr = st->cur_pkt.data;
1018 st->cur_len = st->cur_pkt.size;
1019 if (st->need_parsing && !st->parser) {
1020 st->parser = av_parser_init(st->codec->codec_id);
1022 /* no parser available: just output the raw packets */
1023 st->need_parsing = AVSTREAM_PARSE_NONE;
1024 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1025 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1027 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1028 st->parser->next_frame_offset=
1029 st->parser->cur_offset= st->cur_pkt.pos;
1034 if(s->debug & FF_FDEBUG_TS)
1035 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1045 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1049 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1052 pktl = s->packet_buffer;
1054 AVPacket *next_pkt= &pktl->pkt;
1056 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1057 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1058 if( pktl->pkt.stream_index == next_pkt->stream_index
1059 && next_pkt->dts < pktl->pkt.dts
1060 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1061 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1062 next_pkt->pts= pktl->pkt.dts;
1066 pktl = s->packet_buffer;
1069 if( next_pkt->pts != AV_NOPTS_VALUE
1070 || next_pkt->dts == AV_NOPTS_VALUE
1072 /* read packet from packet buffer, if there is data */
1074 s->packet_buffer = pktl->next;
1080 int ret= av_read_frame_internal(s, pkt);
1082 if(pktl && ret != AVERROR(EAGAIN)){
1089 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1090 &s->packet_buffer_end)) < 0)
1091 return AVERROR(ENOMEM);
1093 assert(!s->packet_buffer);
1094 return av_read_frame_internal(s, pkt);
1099 /* XXX: suppress the packet queue */
1100 static void flush_packet_queue(AVFormatContext *s)
1105 pktl = s->packet_buffer;
1108 s->packet_buffer = pktl->next;
1109 av_free_packet(&pktl->pkt);
1114 /*******************************************************/
1117 int av_find_default_stream_index(AVFormatContext *s)
1119 int first_audio_index = -1;
1123 if (s->nb_streams <= 0)
1125 for(i = 0; i < s->nb_streams; i++) {
1127 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1130 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1131 first_audio_index = i;
1133 return first_audio_index >= 0 ? first_audio_index : 0;
1137 * Flush the frame reader.
1139 static void av_read_frame_flush(AVFormatContext *s)
1144 flush_packet_queue(s);
1148 /* for each stream, reset read state */
1149 for(i = 0; i < s->nb_streams; i++) {
1153 av_parser_close(st->parser);
1155 av_free_packet(&st->cur_pkt);
1157 st->last_IP_pts = AV_NOPTS_VALUE;
1158 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1165 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1168 for(i = 0; i < s->nb_streams; i++) {
1169 AVStream *st = s->streams[i];
1171 st->cur_dts = av_rescale(timestamp,
1172 st->time_base.den * (int64_t)ref_st->time_base.num,
1173 st->time_base.num * (int64_t)ref_st->time_base.den);
1177 void ff_reduce_index(AVFormatContext *s, int stream_index)
1179 AVStream *st= s->streams[stream_index];
1180 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1182 if((unsigned)st->nb_index_entries >= max_entries){
1184 for(i=0; 2*i<st->nb_index_entries; i++)
1185 st->index_entries[i]= st->index_entries[2*i];
1186 st->nb_index_entries= i;
1190 int av_add_index_entry(AVStream *st,
1191 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1193 AVIndexEntry *entries, *ie;
1196 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1199 entries = av_fast_realloc(st->index_entries,
1200 &st->index_entries_allocated_size,
1201 (st->nb_index_entries + 1) *
1202 sizeof(AVIndexEntry));
1206 st->index_entries= entries;
1208 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1211 index= st->nb_index_entries++;
1212 ie= &entries[index];
1213 assert(index==0 || ie[-1].timestamp < timestamp);
1215 ie= &entries[index];
1216 if(ie->timestamp != timestamp){
1217 if(ie->timestamp <= timestamp)
1219 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1220 st->nb_index_entries++;
1221 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1222 distance= ie->min_distance;
1226 ie->timestamp = timestamp;
1227 ie->min_distance= distance;
1234 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1237 AVIndexEntry *entries= st->index_entries;
1238 int nb_entries= st->nb_index_entries;
1247 timestamp = entries[m].timestamp;
1248 if(timestamp >= wanted_timestamp)
1250 if(timestamp <= wanted_timestamp)
1253 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1255 if(!(flags & AVSEEK_FLAG_ANY)){
1256 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1257 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1268 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1269 AVInputFormat *avif= s->iformat;
1270 int64_t pos_min, pos_max, pos, pos_limit;
1271 int64_t ts_min, ts_max, ts;
1275 if (stream_index < 0)
1279 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1283 ts_min= AV_NOPTS_VALUE;
1284 pos_limit= -1; //gcc falsely says it may be uninitialized
1286 st= s->streams[stream_index];
1287 if(st->index_entries){
1290 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1291 index= FFMAX(index, 0);
1292 e= &st->index_entries[index];
1294 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1296 ts_min= e->timestamp;
1298 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1305 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1306 assert(index < st->nb_index_entries);
1308 e= &st->index_entries[index];
1309 assert(e->timestamp >= target_ts);
1311 ts_max= e->timestamp;
1312 pos_limit= pos_max - e->min_distance;
1314 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1315 pos_max,pos_limit, ts_max);
1320 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1325 url_fseek(s->pb, pos, SEEK_SET);
1327 av_update_cur_dts(s, st, ts);
1332 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1334 int64_t start_pos, filesize;
1338 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1341 if(ts_min == AV_NOPTS_VALUE){
1342 pos_min = s->data_offset;
1343 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1344 if (ts_min == AV_NOPTS_VALUE)
1348 if(ts_max == AV_NOPTS_VALUE){
1350 filesize = url_fsize(s->pb);
1351 pos_max = filesize - 1;
1354 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1356 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1357 if (ts_max == AV_NOPTS_VALUE)
1361 int64_t tmp_pos= pos_max + 1;
1362 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1363 if(tmp_ts == AV_NOPTS_VALUE)
1367 if(tmp_pos >= filesize)
1373 if(ts_min > ts_max){
1375 }else if(ts_min == ts_max){
1380 while (pos_min < pos_limit) {
1382 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1386 assert(pos_limit <= pos_max);
1389 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1390 // interpolate position (better than dichotomy)
1391 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1392 + pos_min - approximate_keyframe_distance;
1393 }else if(no_change==1){
1394 // bisection, if interpolation failed to change min or max pos last time
1395 pos = (pos_min + pos_limit)>>1;
1397 /* linear search if bisection failed, can only happen if there
1398 are very few or no keyframes between min/max */
1403 else if(pos > pos_limit)
1407 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1413 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1415 if(ts == AV_NOPTS_VALUE){
1416 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1419 assert(ts != AV_NOPTS_VALUE);
1420 if (target_ts <= ts) {
1421 pos_limit = start_pos - 1;
1425 if (target_ts >= ts) {
1431 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1432 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1435 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1437 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1438 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1439 pos, ts_min, target_ts, ts_max);
1445 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1446 int64_t pos_min, pos_max;
1450 if (stream_index < 0)
1453 st= s->streams[stream_index];
1456 pos_min = s->data_offset;
1457 pos_max = url_fsize(s->pb) - 1;
1459 if (pos < pos_min) pos= pos_min;
1460 else if(pos > pos_max) pos= pos_max;
1462 url_fseek(s->pb, pos, SEEK_SET);
1465 av_update_cur_dts(s, st, ts);
1470 static int av_seek_frame_generic(AVFormatContext *s,
1471 int stream_index, int64_t timestamp, int flags)
1477 st = s->streams[stream_index];
1479 index = av_index_search_timestamp(st, timestamp, flags);
1481 if(index < 0 || index==st->nb_index_entries-1){
1485 if(st->nb_index_entries){
1486 assert(st->index_entries);
1487 ie= &st->index_entries[st->nb_index_entries-1];
1488 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1490 av_update_cur_dts(s, st, ie->timestamp);
1492 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1498 ret = av_read_frame(s, &pkt);
1499 }while(ret == AVERROR(EAGAIN));
1502 av_free_packet(&pkt);
1503 if(stream_index == pkt.stream_index){
1504 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1508 index = av_index_search_timestamp(st, timestamp, flags);
1513 av_read_frame_flush(s);
1514 if (s->iformat->read_seek){
1515 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1518 ie = &st->index_entries[index];
1519 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1521 av_update_cur_dts(s, st, ie->timestamp);
1526 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1531 av_read_frame_flush(s);
1533 if(flags & AVSEEK_FLAG_BYTE)
1534 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1536 if(stream_index < 0){
1537 stream_index= av_find_default_stream_index(s);
1538 if(stream_index < 0)
1541 st= s->streams[stream_index];
1542 /* timestamp for default must be expressed in AV_TIME_BASE units */
1543 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1546 /* first, we try the format specific seek */
1547 if (s->iformat->read_seek)
1548 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1555 if(s->iformat->read_timestamp)
1556 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1558 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1561 /*******************************************************/
1564 * Returns TRUE if the stream has accurate duration in any stream.
1566 * @return TRUE if the stream has accurate duration for at least one component.
1568 static int av_has_duration(AVFormatContext *ic)
1573 for(i = 0;i < ic->nb_streams; i++) {
1574 st = ic->streams[i];
1575 if (st->duration != AV_NOPTS_VALUE)
1582 * Estimate the stream timings from the one of each components.
1584 * Also computes the global bitrate if possible.
1586 static void av_update_stream_timings(AVFormatContext *ic)
1588 int64_t start_time, start_time1, end_time, end_time1;
1589 int64_t duration, duration1;
1593 start_time = INT64_MAX;
1594 end_time = INT64_MIN;
1595 duration = INT64_MIN;
1596 for(i = 0;i < ic->nb_streams; i++) {
1597 st = ic->streams[i];
1598 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1599 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1600 if (start_time1 < start_time)
1601 start_time = start_time1;
1602 if (st->duration != AV_NOPTS_VALUE) {
1603 end_time1 = start_time1
1604 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1605 if (end_time1 > end_time)
1606 end_time = end_time1;
1609 if (st->duration != AV_NOPTS_VALUE) {
1610 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1611 if (duration1 > duration)
1612 duration = duration1;
1615 if (start_time != INT64_MAX) {
1616 ic->start_time = start_time;
1617 if (end_time != INT64_MIN) {
1618 if (end_time - start_time > duration)
1619 duration = end_time - start_time;
1622 if (duration != INT64_MIN) {
1623 ic->duration = duration;
1624 if (ic->file_size > 0) {
1625 /* compute the bitrate */
1626 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1627 (double)ic->duration;
1632 static void fill_all_stream_timings(AVFormatContext *ic)
1637 av_update_stream_timings(ic);
1638 for(i = 0;i < ic->nb_streams; i++) {
1639 st = ic->streams[i];
1640 if (st->start_time == AV_NOPTS_VALUE) {
1641 if(ic->start_time != AV_NOPTS_VALUE)
1642 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1643 if(ic->duration != AV_NOPTS_VALUE)
1644 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1649 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1651 int64_t filesize, duration;
1655 /* if bit_rate is already set, we believe it */
1656 if (ic->bit_rate == 0) {
1658 for(i=0;i<ic->nb_streams;i++) {
1659 st = ic->streams[i];
1660 bit_rate += st->codec->bit_rate;
1662 ic->bit_rate = bit_rate;
1665 /* if duration is already set, we believe it */
1666 if (ic->duration == AV_NOPTS_VALUE &&
1667 ic->bit_rate != 0 &&
1668 ic->file_size != 0) {
1669 filesize = ic->file_size;
1671 for(i = 0; i < ic->nb_streams; i++) {
1672 st = ic->streams[i];
1673 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1674 if (st->duration == AV_NOPTS_VALUE)
1675 st->duration = duration;
1681 #define DURATION_MAX_READ_SIZE 250000
1683 /* only usable for MPEG-PS streams */
1684 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1686 AVPacket pkt1, *pkt = &pkt1;
1688 int read_size, i, ret;
1690 int64_t filesize, offset, duration;
1694 /* flush packet queue */
1695 flush_packet_queue(ic);
1697 for(i=0;i<ic->nb_streams;i++) {
1698 st = ic->streams[i];
1700 av_parser_close(st->parser);
1702 av_free_packet(&st->cur_pkt);
1706 /* we read the first packets to get the first PTS (not fully
1707 accurate, but it is enough now) */
1708 url_fseek(ic->pb, 0, SEEK_SET);
1711 if (read_size >= DURATION_MAX_READ_SIZE)
1713 /* if all info is available, we can stop */
1714 for(i = 0;i < ic->nb_streams; i++) {
1715 st = ic->streams[i];
1716 if (st->start_time == AV_NOPTS_VALUE)
1719 if (i == ic->nb_streams)
1723 ret = av_read_packet(ic, pkt);
1724 }while(ret == AVERROR(EAGAIN));
1727 read_size += pkt->size;
1728 st = ic->streams[pkt->stream_index];
1729 if (pkt->pts != AV_NOPTS_VALUE) {
1730 if (st->start_time == AV_NOPTS_VALUE)
1731 st->start_time = pkt->pts;
1733 av_free_packet(pkt);
1736 /* estimate the end time (duration) */
1737 /* XXX: may need to support wrapping */
1738 filesize = ic->file_size;
1739 offset = filesize - DURATION_MAX_READ_SIZE;
1743 url_fseek(ic->pb, offset, SEEK_SET);
1746 if (read_size >= DURATION_MAX_READ_SIZE)
1750 ret = av_read_packet(ic, pkt);
1751 }while(ret == AVERROR(EAGAIN));
1754 read_size += pkt->size;
1755 st = ic->streams[pkt->stream_index];
1756 if (pkt->pts != AV_NOPTS_VALUE &&
1757 st->start_time != AV_NOPTS_VALUE) {
1758 end_time = pkt->pts;
1759 duration = end_time - st->start_time;
1761 if (st->duration == AV_NOPTS_VALUE ||
1762 st->duration < duration)
1763 st->duration = duration;
1766 av_free_packet(pkt);
1769 fill_all_stream_timings(ic);
1771 url_fseek(ic->pb, old_offset, SEEK_SET);
1772 for(i=0; i<ic->nb_streams; i++){
1774 st->cur_dts= st->first_dts;
1775 st->last_IP_pts = AV_NOPTS_VALUE;
1779 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1783 /* get the file size, if possible */
1784 if (ic->iformat->flags & AVFMT_NOFILE) {
1787 file_size = url_fsize(ic->pb);
1791 ic->file_size = file_size;
1793 if ((!strcmp(ic->iformat->name, "mpeg") ||
1794 !strcmp(ic->iformat->name, "mpegts")) &&
1795 file_size && !url_is_streamed(ic->pb)) {
1796 /* get accurate estimate from the PTSes */
1797 av_estimate_timings_from_pts(ic, old_offset);
1798 } else if (av_has_duration(ic)) {
1799 /* at least one component has timings - we use them for all
1801 fill_all_stream_timings(ic);
1803 /* less precise: use bitrate info */
1804 av_estimate_timings_from_bit_rate(ic);
1806 av_update_stream_timings(ic);
1812 for(i = 0;i < ic->nb_streams; i++) {
1813 st = ic->streams[i];
1814 printf("%d: start_time: %0.3f duration: %0.3f\n",
1815 i, (double)st->start_time / AV_TIME_BASE,
1816 (double)st->duration / AV_TIME_BASE);
1818 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1819 (double)ic->start_time / AV_TIME_BASE,
1820 (double)ic->duration / AV_TIME_BASE,
1821 ic->bit_rate / 1000);
1826 static int has_codec_parameters(AVCodecContext *enc)
1829 switch(enc->codec_type) {
1830 case CODEC_TYPE_AUDIO:
1831 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1832 if(!enc->frame_size &&
1833 (enc->codec_id == CODEC_ID_VORBIS ||
1834 enc->codec_id == CODEC_ID_AAC))
1837 case CODEC_TYPE_VIDEO:
1838 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1844 return enc->codec_id != CODEC_ID_NONE && val != 0;
1847 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1851 int got_picture, data_size, ret=0;
1854 if(!st->codec->codec){
1855 codec = avcodec_find_decoder(st->codec->codec_id);
1858 ret = avcodec_open(st->codec, codec);
1863 if(!has_codec_parameters(st->codec)){
1864 switch(st->codec->codec_type) {
1865 case CODEC_TYPE_VIDEO:
1866 ret = avcodec_decode_video(st->codec, &picture,
1867 &got_picture, data, size);
1869 case CODEC_TYPE_AUDIO:
1870 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1871 samples = av_malloc(data_size);
1874 ret = avcodec_decode_audio2(st->codec, samples,
1875 &data_size, data, size);
1886 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1888 while (tags->id != CODEC_ID_NONE) {
1896 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1899 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1900 if(tag == tags[i].tag)
1903 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1904 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1905 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1906 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1907 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1910 return CODEC_ID_NONE;
1913 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1916 for(i=0; tags && tags[i]; i++){
1917 int tag= codec_get_tag(tags[i], id);
1923 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1926 for(i=0; tags && tags[i]; i++){
1927 enum CodecID id= codec_get_id(tags[i], tag);
1928 if(id!=CODEC_ID_NONE) return id;
1930 return CODEC_ID_NONE;
1933 static void compute_chapters_end(AVFormatContext *s)
1937 for (i=0; i+1<s->nb_chapters; i++)
1938 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1939 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1940 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1941 s->chapters[i]->end = s->chapters[i+1]->start;
1944 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1945 assert(s->start_time != AV_NOPTS_VALUE);
1946 assert(s->duration > 0);
1947 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1949 s->chapters[i]->time_base);
1953 /* absolute maximum size we read until we abort */
1954 #define MAX_READ_SIZE 5000000
1956 #define MAX_STD_TIMEBASES (60*12+5)
1957 static int get_std_framerate(int i){
1958 if(i<60*12) return i*1001;
1959 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1963 * Is the time base unreliable.
1964 * This is a heuristic to balance between quick acceptance of the values in
1965 * the headers vs. some extra checks.
1966 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1967 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1968 * And there are "variable" fps files this needs to detect as well.
1970 static int tb_unreliable(AVCodecContext *c){
1971 if( c->time_base.den >= 101L*c->time_base.num
1972 || c->time_base.den < 5L*c->time_base.num
1973 /* || c->codec_tag == AV_RL32("DIVX")
1974 || c->codec_tag == AV_RL32("XVID")*/
1975 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1980 int av_find_stream_info(AVFormatContext *ic)
1982 int i, count, ret, read_size, j;
1984 AVPacket pkt1, *pkt;
1985 int64_t last_dts[MAX_STREAMS];
1986 int duration_count[MAX_STREAMS]={0};
1987 double (*duration_error)[MAX_STD_TIMEBASES];
1988 int64_t old_offset = url_ftell(ic->pb);
1989 int64_t codec_info_duration[MAX_STREAMS]={0};
1990 int codec_info_nb_frames[MAX_STREAMS]={0};
1992 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1993 if (!duration_error) return AVERROR(ENOMEM);
1995 for(i=0;i<ic->nb_streams;i++) {
1996 st = ic->streams[i];
1997 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1998 /* if(!st->time_base.num)
2000 if(!st->codec->time_base.num)
2001 st->codec->time_base= st->time_base;
2003 //only for the split stuff
2005 st->parser = av_parser_init(st->codec->codec_id);
2006 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2007 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2012 for(i=0;i<MAX_STREAMS;i++){
2013 last_dts[i]= AV_NOPTS_VALUE;
2019 /* check if one codec still needs to be handled */
2020 for(i=0;i<ic->nb_streams;i++) {
2021 st = ic->streams[i];
2022 if (!has_codec_parameters(st->codec))
2024 /* variable fps and no guess at the real fps */
2025 if( tb_unreliable(st->codec)
2026 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2028 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2030 if(st->first_dts == AV_NOPTS_VALUE)
2033 if (i == ic->nb_streams) {
2034 /* NOTE: if the format has no header, then we need to read
2035 some packets to get most of the streams, so we cannot
2037 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2038 /* if we found the info for all the codecs, we can stop */
2043 /* we did not get all the codec info, but we read too much data */
2044 if (read_size >= MAX_READ_SIZE) {
2049 /* NOTE: a new stream can be added there if no header in file
2050 (AVFMTCTX_NOHEADER) */
2051 ret = av_read_frame_internal(ic, &pkt1);
2052 if(ret == AVERROR(EAGAIN))
2056 ret = -1; /* we could not have all the codec parameters before EOF */
2057 for(i=0;i<ic->nb_streams;i++) {
2058 st = ic->streams[i];
2059 if (!has_codec_parameters(st->codec)){
2061 avcodec_string(buf, sizeof(buf), st->codec, 0);
2062 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2070 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2071 if(av_dup_packet(pkt) < 0) {
2072 av_free(duration_error);
2073 return AVERROR(ENOMEM);
2076 read_size += pkt->size;
2078 st = ic->streams[pkt->stream_index];
2079 if(codec_info_nb_frames[st->index]>1)
2080 codec_info_duration[st->index] += pkt->duration;
2081 if (pkt->duration != 0)
2082 codec_info_nb_frames[st->index]++;
2085 int index= pkt->stream_index;
2086 int64_t last= last_dts[index];
2087 int64_t duration= pkt->dts - last;
2089 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2090 double dur= duration * av_q2d(st->time_base);
2092 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2093 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2094 if(duration_count[index] < 2)
2095 memset(duration_error[index], 0, sizeof(*duration_error));
2096 for(i=1; i<MAX_STD_TIMEBASES; i++){
2097 int framerate= get_std_framerate(i);
2098 int ticks= lrintf(dur*framerate/(1001*12));
2099 double error= dur - ticks*1001*12/(double)framerate;
2100 duration_error[index][i] += error*error;
2102 duration_count[index]++;
2104 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2105 last_dts[pkt->stream_index]= pkt->dts;
2107 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2108 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2110 st->codec->extradata_size= i;
2111 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2112 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2113 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2117 /* if still no information, we try to open the codec and to
2118 decompress the frame. We try to avoid that in most cases as
2119 it takes longer and uses more memory. For MPEG-4, we need to
2120 decompress for QuickTime. */
2121 if (!has_codec_parameters(st->codec) /*&&
2122 (st->codec->codec_id == CODEC_ID_FLV1 ||
2123 st->codec->codec_id == CODEC_ID_H264 ||
2124 st->codec->codec_id == CODEC_ID_H263 ||
2125 st->codec->codec_id == CODEC_ID_H261 ||
2126 st->codec->codec_id == CODEC_ID_VORBIS ||
2127 st->codec->codec_id == CODEC_ID_MJPEG ||
2128 st->codec->codec_id == CODEC_ID_PNG ||
2129 st->codec->codec_id == CODEC_ID_PAM ||
2130 st->codec->codec_id == CODEC_ID_PGM ||
2131 st->codec->codec_id == CODEC_ID_PGMYUV ||
2132 st->codec->codec_id == CODEC_ID_PBM ||
2133 st->codec->codec_id == CODEC_ID_PPM ||
2134 st->codec->codec_id == CODEC_ID_SHORTEN ||
2135 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2136 try_decode_frame(st, pkt->data, pkt->size);
2138 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2144 // close codecs which were opened in try_decode_frame()
2145 for(i=0;i<ic->nb_streams;i++) {
2146 st = ic->streams[i];
2147 if(st->codec->codec)
2148 avcodec_close(st->codec);
2150 for(i=0;i<ic->nb_streams;i++) {
2151 st = ic->streams[i];
2152 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2153 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2154 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2156 if(duration_count[i]
2157 && tb_unreliable(st->codec) /*&&
2158 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2159 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2160 double best_error= 2*av_q2d(st->time_base);
2161 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2163 for(j=1; j<MAX_STD_TIMEBASES; j++){
2164 double error= duration_error[i][j] * get_std_framerate(j);
2165 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2166 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2167 if(error < best_error){
2169 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2174 if (!st->r_frame_rate.num){
2175 if( st->codec->time_base.den * (int64_t)st->time_base.num
2176 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2177 st->r_frame_rate.num = st->codec->time_base.den;
2178 st->r_frame_rate.den = st->codec->time_base.num;
2180 st->r_frame_rate.num = st->time_base.den;
2181 st->r_frame_rate.den = st->time_base.num;
2184 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2185 if(!st->codec->bits_per_coded_sample)
2186 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2190 av_estimate_timings(ic, old_offset);
2192 compute_chapters_end(ic);
2195 /* correct DTS for B-frame streams with no timestamps */
2196 for(i=0;i<ic->nb_streams;i++) {
2197 st = ic->streams[i];
2198 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2200 ppktl = &ic->packet_buffer;
2202 if(ppkt1->stream_index != i)
2204 if(ppkt1->pkt->dts < 0)
2206 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2208 ppkt1->pkt->dts -= delta;
2213 st->cur_dts -= delta;
2219 av_free(duration_error);
2224 /*******************************************************/
2226 int av_read_play(AVFormatContext *s)
2228 if (s->iformat->read_play)
2229 return s->iformat->read_play(s);
2231 return av_url_read_fpause(s->pb, 0);
2232 return AVERROR(ENOSYS);
2235 int av_read_pause(AVFormatContext *s)
2237 if (s->iformat->read_pause)
2238 return s->iformat->read_pause(s);
2240 return av_url_read_fpause(s->pb, 1);
2241 return AVERROR(ENOSYS);
2244 void av_close_input_stream(AVFormatContext *s)
2249 if (s->iformat->read_close)
2250 s->iformat->read_close(s);
2251 for(i=0;i<s->nb_streams;i++) {
2252 /* free all data in a stream component */
2255 av_parser_close(st->parser);
2256 av_free_packet(&st->cur_pkt);
2258 av_metadata_free(&st->metadata);
2259 av_free(st->index_entries);
2260 av_free(st->codec->extradata);
2262 av_free(st->filename);
2263 av_free(st->priv_data);
2266 for(i=s->nb_programs-1; i>=0; i--) {
2267 av_freep(&s->programs[i]->provider_name);
2268 av_freep(&s->programs[i]->name);
2269 av_metadata_free(&s->programs[i]->metadata);
2270 av_freep(&s->programs[i]->stream_index);
2271 av_freep(&s->programs[i]);
2273 av_freep(&s->programs);
2274 flush_packet_queue(s);
2275 av_freep(&s->priv_data);
2276 while(s->nb_chapters--) {
2277 av_free(s->chapters[s->nb_chapters]->title);
2278 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2279 av_free(s->chapters[s->nb_chapters]);
2281 av_freep(&s->chapters);
2282 av_metadata_free(&s->metadata);
2286 void av_close_input_file(AVFormatContext *s)
2288 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2289 av_close_input_stream(s);
2294 AVStream *av_new_stream(AVFormatContext *s, int id)
2299 if (s->nb_streams >= MAX_STREAMS)
2302 st = av_mallocz(sizeof(AVStream));
2306 st->codec= avcodec_alloc_context();
2308 /* no default bitrate if decoding */
2309 st->codec->bit_rate = 0;
2311 st->index = s->nb_streams;
2313 st->start_time = AV_NOPTS_VALUE;
2314 st->duration = AV_NOPTS_VALUE;
2315 /* we set the current DTS to 0 so that formats without any timestamps
2316 but durations get some timestamps, formats with some unknown
2317 timestamps have their first few packets buffered and the
2318 timestamps corrected before they are returned to the user */
2320 st->first_dts = AV_NOPTS_VALUE;
2322 /* default pts setting is MPEG-like */
2323 av_set_pts_info(st, 33, 1, 90000);
2324 st->last_IP_pts = AV_NOPTS_VALUE;
2325 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2326 st->pts_buffer[i]= AV_NOPTS_VALUE;
2328 st->sample_aspect_ratio = (AVRational){0,1};
2330 s->streams[s->nb_streams++] = st;
2334 AVProgram *av_new_program(AVFormatContext *ac, int id)
2336 AVProgram *program=NULL;
2340 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2343 for(i=0; i<ac->nb_programs; i++)
2344 if(ac->programs[i]->id == id)
2345 program = ac->programs[i];
2348 program = av_mallocz(sizeof(AVProgram));
2351 dynarray_add(&ac->programs, &ac->nb_programs, program);
2352 program->discard = AVDISCARD_NONE;
2359 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2361 AVChapter *chapter = NULL;
2364 for(i=0; i<s->nb_chapters; i++)
2365 if(s->chapters[i]->id == id)
2366 chapter = s->chapters[i];
2369 chapter= av_mallocz(sizeof(AVChapter));
2372 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2374 av_free(chapter->title);
2375 chapter->title = av_strdup(title);
2377 chapter->time_base= time_base;
2378 chapter->start = start;
2384 /************************************************************/
2385 /* output media file */
2387 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2391 if (s->oformat->priv_data_size > 0) {
2392 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2394 return AVERROR(ENOMEM);
2396 s->priv_data = NULL;
2398 if (s->oformat->set_parameters) {
2399 ret = s->oformat->set_parameters(s, ap);
2406 int av_write_header(AVFormatContext *s)
2411 // some sanity checks
2412 for(i=0;i<s->nb_streams;i++) {
2415 switch (st->codec->codec_type) {
2416 case CODEC_TYPE_AUDIO:
2417 if(st->codec->sample_rate<=0){
2418 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2421 if(!st->codec->block_align)
2422 st->codec->block_align = st->codec->channels *
2423 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2425 case CODEC_TYPE_VIDEO:
2426 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2427 av_log(s, AV_LOG_ERROR, "time base not set\n");
2430 if(st->codec->width<=0 || st->codec->height<=0){
2431 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2434 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2435 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2441 if(s->oformat->codec_tag){
2442 if(st->codec->codec_tag){
2444 //check that tag + id is in the table
2445 //if neither is in the table -> OK
2446 //if tag is in the table with another id -> FAIL
2447 //if id is in the table with another tag -> FAIL unless strict < ?
2449 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2452 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2453 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2454 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2457 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2458 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2460 return AVERROR(ENOMEM);
2463 #if LIBAVFORMAT_VERSION_MAJOR < 53
2464 ff_metadata_mux_compat(s);
2467 if(s->oformat->write_header){
2468 ret = s->oformat->write_header(s);
2473 /* init PTS generation */
2474 for(i=0;i<s->nb_streams;i++) {
2475 int64_t den = AV_NOPTS_VALUE;
2478 switch (st->codec->codec_type) {
2479 case CODEC_TYPE_AUDIO:
2480 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2482 case CODEC_TYPE_VIDEO:
2483 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2488 if (den != AV_NOPTS_VALUE) {
2490 return AVERROR_INVALIDDATA;
2491 av_frac_init(&st->pts, 0, 0, den);
2497 //FIXME merge with compute_pkt_fields
2498 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2499 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2500 int num, den, frame_size, i;
2502 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2504 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2507 /* duration field */
2508 if (pkt->duration == 0) {
2509 compute_frame_duration(&num, &den, st, NULL, pkt);
2511 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2515 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2518 //XXX/FIXME this is a temporary hack until all encoders output pts
2519 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2521 // pkt->pts= st->cur_dts;
2522 pkt->pts= st->pts.val;
2525 //calculate dts from pts
2526 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2527 st->pts_buffer[0]= pkt->pts;
2528 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2529 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2530 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2531 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2533 pkt->dts= st->pts_buffer[0];
2536 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2537 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2540 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2541 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2545 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2546 st->cur_dts= pkt->dts;
2547 st->pts.val= pkt->dts;
2550 switch (st->codec->codec_type) {
2551 case CODEC_TYPE_AUDIO:
2552 frame_size = get_audio_frame_size(st->codec, pkt->size);
2554 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2555 likely equal to the encoder delay, but it would be better if we
2556 had the real timestamps from the encoder */
2557 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2558 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2561 case CODEC_TYPE_VIDEO:
2562 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2570 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2572 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2574 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2577 ret= s->oformat->write_packet(s, pkt);
2579 ret= url_ferror(s->pb);
2583 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2584 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2586 AVPacketList **next_point, *this_pktl;
2588 this_pktl = av_mallocz(sizeof(AVPacketList));
2589 this_pktl->pkt= *pkt;
2590 if(pkt->destruct == av_destruct_packet)
2591 pkt->destruct= NULL; // not shared -> must keep original from being freed
2593 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2595 next_point = &s->packet_buffer;
2597 if(compare(s, &(*next_point)->pkt, pkt))
2599 next_point= &(*next_point)->next;
2601 this_pktl->next= *next_point;
2602 *next_point= this_pktl;
2605 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2607 AVStream *st = s->streams[ pkt ->stream_index];
2608 AVStream *st2= s->streams[ next->stream_index];
2609 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2610 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2612 if (pkt->dts == AV_NOPTS_VALUE)
2615 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2618 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2621 int streams[MAX_STREAMS];
2624 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2627 memset(streams, 0, sizeof(streams));
2628 pktl= s->packet_buffer;
2630 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2631 if(streams[ pktl->pkt.stream_index ] == 0)
2633 streams[ pktl->pkt.stream_index ]++;
2637 if(stream_count && (s->nb_streams == stream_count || flush)){
2638 pktl= s->packet_buffer;
2641 s->packet_buffer= pktl->next;
2645 av_init_packet(out);
2651 * Interleaves an AVPacket correctly so it can be muxed.
2652 * @param out the interleaved packet will be output here
2653 * @param in the input packet
2654 * @param flush 1 if no further packets are available as input and all
2655 * remaining packets should be output
2656 * @return 1 if a packet was output, 0 if no packet could be output,
2657 * < 0 if an error occurred
2659 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2660 if(s->oformat->interleave_packet)
2661 return s->oformat->interleave_packet(s, out, in, flush);
2663 return av_interleave_packet_per_dts(s, out, in, flush);
2666 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2667 AVStream *st= s->streams[ pkt->stream_index];
2669 //FIXME/XXX/HACK drop zero sized packets
2670 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2673 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2674 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2677 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2682 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2683 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2686 ret= s->oformat->write_packet(s, &opkt);
2688 av_free_packet(&opkt);
2693 if(url_ferror(s->pb))
2694 return url_ferror(s->pb);
2698 int av_write_trailer(AVFormatContext *s)
2704 ret= av_interleave_packet(s, &pkt, NULL, 1);
2705 if(ret<0) //FIXME cleanup needed for ret<0 ?
2710 ret= s->oformat->write_packet(s, &pkt);
2712 av_free_packet(&pkt);
2716 if(url_ferror(s->pb))
2720 if(s->oformat->write_trailer)
2721 ret = s->oformat->write_trailer(s);
2724 ret=url_ferror(s->pb);
2725 for(i=0;i<s->nb_streams;i++)
2726 av_freep(&s->streams[i]->priv_data);
2727 av_freep(&s->priv_data);
2731 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2734 AVProgram *program=NULL;
2737 for(i=0; i<ac->nb_programs; i++){
2738 if(ac->programs[i]->id != progid)
2740 program = ac->programs[i];
2741 for(j=0; j<program->nb_stream_indexes; j++)
2742 if(program->stream_index[j] == idx)
2745 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2748 program->stream_index = tmp;
2749 program->stream_index[program->nb_stream_indexes++] = idx;
2754 /* "user interface" functions */
2755 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2758 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2759 AVStream *st = ic->streams[i];
2760 int g = av_gcd(st->time_base.num, st->time_base.den);
2761 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2762 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2763 /* the pid is an important information, so we display it */
2764 /* XXX: add a generic system */
2765 if (flags & AVFMT_SHOW_IDS)
2766 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2767 if (strlen(st->language) > 0)
2768 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2769 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2770 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2771 if (st->sample_aspect_ratio.num && // default
2772 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2773 AVRational display_aspect_ratio;
2774 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2775 st->codec->width*st->sample_aspect_ratio.num,
2776 st->codec->height*st->sample_aspect_ratio.den,
2778 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2779 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2780 display_aspect_ratio.num, display_aspect_ratio.den);
2782 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2783 if(st->r_frame_rate.den && st->r_frame_rate.num)
2784 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2785 /* else if(st->time_base.den && st->time_base.num)
2786 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2788 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2790 av_log(NULL, AV_LOG_INFO, "\n");
2793 void dump_format(AVFormatContext *ic,
2800 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2801 is_output ? "Output" : "Input",
2803 is_output ? ic->oformat->name : ic->iformat->name,
2804 is_output ? "to" : "from", url);
2806 av_log(NULL, AV_LOG_INFO, " Duration: ");
2807 if (ic->duration != AV_NOPTS_VALUE) {
2808 int hours, mins, secs, us;
2809 secs = ic->duration / AV_TIME_BASE;
2810 us = ic->duration % AV_TIME_BASE;
2815 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2816 (100 * us) / AV_TIME_BASE);
2818 av_log(NULL, AV_LOG_INFO, "N/A");
2820 if (ic->start_time != AV_NOPTS_VALUE) {
2822 av_log(NULL, AV_LOG_INFO, ", start: ");
2823 secs = ic->start_time / AV_TIME_BASE;
2824 us = ic->start_time % AV_TIME_BASE;
2825 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2826 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2828 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2830 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2832 av_log(NULL, AV_LOG_INFO, "N/A");
2834 av_log(NULL, AV_LOG_INFO, "\n");
2836 if(ic->nb_programs) {
2838 for(j=0; j<ic->nb_programs; j++) {
2839 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2840 ic->programs[j]->name ? ic->programs[j]->name : "");
2841 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2842 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2845 for(i=0;i<ic->nb_streams;i++)
2846 dump_stream_format(ic, i, index, is_output);
2849 #if LIBAVFORMAT_VERSION_MAJOR < 53
2850 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2852 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2855 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2857 AVRational frame_rate;
2858 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2859 *frame_rate_num= frame_rate.num;
2860 *frame_rate_den= frame_rate.den;
2865 int64_t av_gettime(void)
2868 gettimeofday(&tv,NULL);
2869 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2872 int64_t parse_date(const char *datestr, int duration)
2878 static const char * const date_fmt[] = {
2882 static const char * const time_fmt[] = {
2892 time_t now = time(0);
2894 len = strlen(datestr);
2896 lastch = datestr[len - 1];
2899 is_utc = (lastch == 'z' || lastch == 'Z');
2901 memset(&dt, 0, sizeof(dt));
2906 if (!strncasecmp(datestr, "now", len))
2907 return (int64_t) now * 1000000;
2909 /* parse the year-month-day part */
2910 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2911 q = small_strptime(p, date_fmt[i], &dt);
2917 /* if the year-month-day part is missing, then take the
2918 * current year-month-day time */
2923 dt = *localtime(&now);
2925 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2930 if (*p == 'T' || *p == 't' || *p == ' ')
2933 /* parse the hour-minute-second part */
2934 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2935 q = small_strptime(p, time_fmt[i], &dt);
2941 /* parse datestr as a duration */
2946 /* parse datestr as HH:MM:SS */
2947 q = small_strptime(p, time_fmt[0], &dt);
2949 /* parse datestr as S+ */
2950 dt.tm_sec = strtol(p, (char **)&q, 10);
2952 /* the parsing didn't succeed */
2959 /* Now we have all the fields that we can get */
2965 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2967 dt.tm_isdst = -1; /* unknown */
2977 /* parse the .m... part */
2981 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2984 val += n * (*q - '0');
2988 return negative ? -t : t;
2991 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3001 while (*p != '\0' && *p != '=' && *p != '&') {
3002 if ((q - tag) < sizeof(tag) - 1)
3010 while (*p != '&' && *p != '\0') {
3011 if ((q - arg) < arg_size - 1) {
3021 if (!strcmp(tag, tag1))
3030 int av_get_frame_filename(char *buf, int buf_size,
3031 const char *path, int number)
3034 char *q, buf1[20], c;
3035 int nd, len, percentd_found;
3047 while (isdigit(*p)) {
3048 nd = nd * 10 + *p++ - '0';
3051 } while (isdigit(c));
3060 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3062 if ((q - buf + len) > buf_size - 1)
3064 memcpy(q, buf1, len);
3072 if ((q - buf) < buf_size - 1)
3076 if (!percentd_found)
3085 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3088 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3090 for(i=0;i<size;i+=16) {
3097 PRINT(" %02x", buf[i+j]);
3102 for(j=0;j<len;j++) {
3104 if (c < ' ' || c > '~')
3113 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3115 hex_dump_internal(NULL, f, 0, buf, size);
3118 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3120 hex_dump_internal(avcl, NULL, level, buf, size);
3123 //FIXME needs to know the time_base
3124 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3126 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3127 PRINT("stream #%d:\n", pkt->stream_index);
3128 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3129 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3130 /* DTS is _always_ valid after av_read_frame() */
3132 if (pkt->dts == AV_NOPTS_VALUE)
3135 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3136 /* PTS may not be known if B-frames are present. */
3138 if (pkt->pts == AV_NOPTS_VALUE)
3141 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3143 PRINT(" size=%d\n", pkt->size);
3146 av_hex_dump(f, pkt->data, pkt->size);
3149 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3151 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3154 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3156 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3159 void url_split(char *proto, int proto_size,
3160 char *authorization, int authorization_size,
3161 char *hostname, int hostname_size,
3163 char *path, int path_size,
3166 const char *p, *ls, *at, *col, *brk;
3168 if (port_ptr) *port_ptr = -1;
3169 if (proto_size > 0) proto[0] = 0;
3170 if (authorization_size > 0) authorization[0] = 0;
3171 if (hostname_size > 0) hostname[0] = 0;
3172 if (path_size > 0) path[0] = 0;
3174 /* parse protocol */
3175 if ((p = strchr(url, ':'))) {
3176 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3181 /* no protocol means plain filename */
3182 av_strlcpy(path, url, path_size);
3186 /* separate path from hostname */
3187 ls = strchr(p, '/');
3189 ls = strchr(p, '?');
3191 av_strlcpy(path, ls, path_size);
3193 ls = &p[strlen(p)]; // XXX
3195 /* the rest is hostname, use that to parse auth/port */
3197 /* authorization (user[:pass]@hostname) */
3198 if ((at = strchr(p, '@')) && at < ls) {
3199 av_strlcpy(authorization, p,
3200 FFMIN(authorization_size, at + 1 - p));
3201 p = at + 1; /* skip '@' */
3204 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3206 av_strlcpy(hostname, p + 1,
3207 FFMIN(hostname_size, brk - p));
3208 if (brk[1] == ':' && port_ptr)
3209 *port_ptr = atoi(brk + 2);
3210 } else if ((col = strchr(p, ':')) && col < ls) {
3211 av_strlcpy(hostname, p,
3212 FFMIN(col + 1 - p, hostname_size));
3213 if (port_ptr) *port_ptr = atoi(col + 1);
3215 av_strlcpy(hostname, p,
3216 FFMIN(ls + 1 - p, hostname_size));
3220 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3223 static const char hex_table[16] = { '0', '1', '2', '3',
3226 'C', 'D', 'E', 'F' };
3228 for(i = 0; i < s; i++) {
3229 buff[i * 2] = hex_table[src[i] >> 4];
3230 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3236 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3237 int pts_num, int pts_den)
3239 unsigned int gcd= av_gcd(pts_num, pts_den);
3240 s->pts_wrap_bits = pts_wrap_bits;
3241 s->time_base.num = pts_num/gcd;
3242 s->time_base.den = pts_den/gcd;
3245 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);