]> rtime.felk.cvut.cz Git - frescor/ffmpeg.git/blobdiff - libavformat/utils.c
frsh: Export information about the last RTP contract and VRES
[frescor/ffmpeg.git] / libavformat / utils.c
index 900bd206e18df2ab8bee04feef42968579e52c7f..bdf168ae93baa6d974f7e799f8bf0a952f696b79 100644 (file)
@@ -258,40 +258,6 @@ AVInputFormat *av_find_input_format(const char *short_name)
 
 /* memory handling */
 
-void av_destruct_packet(AVPacket *pkt)
-{
-    av_free(pkt->data);
-    pkt->data = NULL; pkt->size = 0;
-}
-
-void av_init_packet(AVPacket *pkt)
-{
-    pkt->pts   = AV_NOPTS_VALUE;
-    pkt->dts   = AV_NOPTS_VALUE;
-    pkt->pos   = -1;
-    pkt->duration = 0;
-    pkt->convergence_duration = 0;
-    pkt->flags = 0;
-    pkt->stream_index = 0;
-    pkt->destruct= av_destruct_packet_nofree;
-}
-
-int av_new_packet(AVPacket *pkt, int size)
-{
-    uint8_t *data;
-    if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
-        return AVERROR(ENOMEM);
-    data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
-    if (!data)
-        return AVERROR(ENOMEM);
-    memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-
-    av_init_packet(pkt);
-    pkt->data = data;
-    pkt->size = size;
-    pkt->destruct = av_destruct_packet;
-    return 0;
-}
 
 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
 {
@@ -306,29 +272,11 @@ int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
     if(ret<=0)
         av_free_packet(pkt);
     else
-        pkt->size= ret;
+        av_shrink_packet(pkt, ret);
 
     return ret;
 }
 
-int av_dup_packet(AVPacket *pkt)
-{
-    if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
-        uint8_t *data;
-        /* We duplicate the packet and don't forget to add the padding again. */
-        if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
-            return AVERROR(ENOMEM);
-        data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
-        if (!data) {
-            return AVERROR(ENOMEM);
-        }
-        memcpy(data, pkt->data, pkt->size);
-        memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-        pkt->data = data;
-        pkt->destruct = av_destruct_packet;
-    }
-    return 0;
-}
 
 int av_filename_number_test(const char *filename)
 {
@@ -932,10 +880,6 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
         pkt->convergence_duration = pc->convergence_duration;
 }
 
-void av_destruct_packet_nofree(AVPacket *pkt)
-{
-    pkt->data = NULL; pkt->size = 0;
-}
 
 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
 {
@@ -954,11 +898,17 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
                 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
                 compute_pkt_fields(s, st, NULL, pkt);
                 s->cur_st = NULL;
+                if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+                    (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
+                    ff_reduce_index(s, st->index);
+                    av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
+                }
                 break;
             } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
-                len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
-                                      st->cur_ptr, st->cur_len,
-                                      st->cur_pkt.pts, st->cur_pkt.dts);
+                len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
+                                       st->cur_ptr, st->cur_len,
+                                       st->cur_pkt.pts, st->cur_pkt.dts,
+                                       st->cur_pkt.pos);
                 st->cur_pkt.pts = AV_NOPTS_VALUE;
                 st->cur_pkt.dts = AV_NOPTS_VALUE;
                 /* increment read pointer */
@@ -967,13 +917,13 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
 
                 /* return packet if any */
                 if (pkt->size) {
-                    pkt->pos = st->cur_pkt.pos;              // Isn't quite accurate but close.
                 got_packet:
                     pkt->duration = 0;
                     pkt->stream_index = st->index;
                     pkt->pts = st->parser->pts;
                     pkt->dts = st->parser->dts;
-                    pkt->destruct = av_destruct_packet_nofree;
+                    pkt->pos = st->parser->pos;
+                    pkt->destruct = NULL;
                     compute_pkt_fields(s, st, st->parser, pkt);
 
                     if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
@@ -1000,10 +950,11 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
                 for(i = 0; i < s->nb_streams; i++) {
                     st = s->streams[i];
                     if (st->parser && st->need_parsing) {
-                        av_parser_parse(st->parser, st->codec,
+                        av_parser_parse2(st->parser, st->codec,
                                         &pkt->data, &pkt->size,
                                         NULL, 0,
-                                        AV_NOPTS_VALUE, AV_NOPTS_VALUE);
+                                        AV_NOPTS_VALUE, AV_NOPTS_VALUE,
+                                        AV_NOPTS_VALUE);
                         if (pkt->size)
                             goto got_packet;
                     }
@@ -1289,7 +1240,7 @@ int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
 
 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
     AVInputFormat *avif= s->iformat;
-    int64_t pos_min, pos_max, pos, pos_limit;
+    int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
     int64_t ts_min, ts_max, ts;
     int index;
     AVStream *st;
@@ -1317,8 +1268,8 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
             pos_min= e->pos;
             ts_min= e->timestamp;
 #ifdef DEBUG_SEEK
-        av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
-               pos_min,ts_min);
+            av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
+                   pos_min,ts_min);
 #endif
         }else{
             assert(index==0);
@@ -1333,8 +1284,8 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
             ts_max= e->timestamp;
             pos_limit= pos_max - e->min_distance;
 #ifdef DEBUG_SEEK
-        av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
-               pos_max,pos_limit, ts_max);
+            av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
+                   pos_max,pos_limit, ts_max);
 #endif
         }
     }
@@ -1432,7 +1383,9 @@ int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, i
         else
             no_change=0;
 #ifdef DEBUG_SEEK
-av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
+        av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
+               pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
+               start_pos, no_change);
 #endif
         if(ts == AV_NOPTS_VALUE){
             av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
@@ -1511,7 +1464,7 @@ static int av_seek_frame_generic(AVFormatContext *s,
                 return ret;
             av_update_cur_dts(s, st, ie->timestamp);
         }else{
-            if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
+            if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
                 return ret;
         }
         for(i=0;; i++) {
@@ -1580,6 +1533,28 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f
         return av_seek_frame_generic(s, stream_index, timestamp, flags);
 }
 
+int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
+{
+    if(min_ts > ts || max_ts < ts)
+        return -1;
+
+    av_read_frame_flush(s);
+
+    if (s->iformat->read_seek2)
+        return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
+
+    if(s->iformat->read_timestamp){
+        //try to seek via read_timestamp()
+    }
+
+    //Fallback to old API if new is not implemented but old is
+    //Note the old has somewat different sematics
+    if(s->iformat->read_seek || 1)
+        return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
+
+    // try some generic seek like av_seek_frame_generic() but with new ts semantics
+}
+
 /*******************************************************/
 
 /**
@@ -1866,41 +1841,42 @@ static int has_codec_parameters(AVCodecContext *enc)
     return enc->codec_id != CODEC_ID_NONE && val != 0;
 }
 
-static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
+static int try_decode_frame(AVStream *st, AVPacket *avpkt)
 {
     int16_t *samples;
     AVCodec *codec;
     int got_picture, data_size, ret=0;
     AVFrame picture;
 
-  if(!st->codec->codec){
-    codec = avcodec_find_decoder(st->codec->codec_id);
-    if (!codec)
-        return -1;
-    ret = avcodec_open(st->codec, codec);
-    if (ret < 0)
-        return ret;
-  }
+    if(!st->codec->codec){
+        codec = avcodec_find_decoder(st->codec->codec_id);
+        if (!codec)
+            return -1;
+        ret = avcodec_open(st->codec, codec);
+        if (ret < 0)
+            return ret;
+    }
 
-  if(!has_codec_parameters(st->codec)){
-    switch(st->codec->codec_type) {
-    case CODEC_TYPE_VIDEO:
-        ret = avcodec_decode_video(st->codec, &picture,
-                                   &got_picture, data, size);
-        break;
-    case CODEC_TYPE_AUDIO:
-        data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
-        samples = av_malloc(data_size);
-        if (!samples)
-            goto fail;
-        ret = avcodec_decode_audio2(st->codec, samples,
-                                    &data_size, data, size);
-        av_free(samples);
-        break;
-    default:
-        break;
+    if(!has_codec_parameters(st->codec)){
+        switch(st->codec->codec_type) {
+        case CODEC_TYPE_VIDEO:
+            avcodec_get_frame_defaults(&picture);
+            ret = avcodec_decode_video2(st->codec, &picture,
+                                        &got_picture, avpkt);
+            break;
+        case CODEC_TYPE_AUDIO:
+            data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
+            samples = av_malloc(data_size);
+            if (!samples)
+                goto fail;
+            ret = avcodec_decode_audio3(st->codec, samples,
+                                        &data_size, avpkt);
+            av_free(samples);
+            break;
+        default:
+            break;
+        }
     }
-  }
  fail:
     return ret;
 }
@@ -2106,8 +2082,11 @@ int av_find_stream_info(AVFormatContext *ic)
         read_size += pkt->size;
 
         st = ic->streams[pkt->stream_index];
-        if(codec_info_nb_frames[st->index]>1)
+        if(codec_info_nb_frames[st->index]>1) {
+            if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration)
+                break;
             codec_info_duration[st->index] += pkt->duration;
+        }
         if (pkt->duration != 0)
             codec_info_nb_frames[st->index]++;
 
@@ -2166,11 +2145,8 @@ int av_find_stream_info(AVFormatContext *ic)
              st->codec->codec_id == CODEC_ID_PPM ||
              st->codec->codec_id == CODEC_ID_SHORTEN ||
              (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
-            try_decode_frame(st, pkt->data, pkt->size);
+            try_decode_frame(st, pkt);
 
-        if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
-            break;
-        }
         count++;
     }
 
@@ -2638,10 +2614,8 @@ void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
 
     this_pktl = av_mallocz(sizeof(AVPacketList));
     this_pktl->pkt= *pkt;
-    if(pkt->destruct == av_destruct_packet)
-        pkt->destruct= NULL; // not shared -> must keep original from being freed
-    else
-        av_dup_packet(&this_pktl->pkt);  //shared -> must dup
+    pkt->destruct= NULL;             // do not free original but only the copy
+    av_dup_packet(&this_pktl->pkt);  // duplicate the packet if it uses non-alloced memory
 
     next_point = &s->packet_buffer;
     while(*next_point){