/* memory handling */
-void av_destruct_packet(AVPacket *pkt)
-{
- av_free(pkt->data);
- pkt->data = NULL; pkt->size = 0;
-}
-
-void av_init_packet(AVPacket *pkt)
-{
- pkt->pts = AV_NOPTS_VALUE;
- pkt->dts = AV_NOPTS_VALUE;
- pkt->pos = -1;
- pkt->duration = 0;
- pkt->convergence_duration = 0;
- pkt->flags = 0;
- pkt->stream_index = 0;
- pkt->destruct= av_destruct_packet_nofree;
-}
-
-int av_new_packet(AVPacket *pkt, int size)
-{
- uint8_t *data;
- if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
- return AVERROR(ENOMEM);
- data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!data)
- return AVERROR(ENOMEM);
- memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-
- av_init_packet(pkt);
- pkt->data = data;
- pkt->size = size;
- pkt->destruct = av_destruct_packet;
- return 0;
-}
int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
{
if(ret<=0)
av_free_packet(pkt);
else
- pkt->size= ret;
+ av_shrink_packet(pkt, ret);
return ret;
}
-int av_dup_packet(AVPacket *pkt)
-{
- if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
- uint8_t *data;
- /* We duplicate the packet and don't forget to add the padding again. */
- if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
- return AVERROR(ENOMEM);
- data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!data) {
- return AVERROR(ENOMEM);
- }
- memcpy(data, pkt->data, pkt->size);
- memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
- pkt->data = data;
- pkt->destruct = av_destruct_packet;
- }
- return 0;
-}
int av_filename_number_test(const char *filename)
{
// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
/* interpolate PTS and DTS if they are not present */
- if(delay==0 || (delay==1 && pc)){
+ //We skip H264 currently because delay and has_b_frames are not reliably set
+ if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
if (presentation_delayed) {
/* DTS = decompression timestamp */
/* PTS = presentation timestamp */
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
if(pkt->dts == AV_NOPTS_VALUE)
pkt->dts= st->pts_buffer[0];
- if(delay>1){
+ if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
}
if(pkt->dts > st->cur_dts)
pkt->convergence_duration = pc->convergence_duration;
}
-void av_destruct_packet_nofree(AVPacket *pkt)
-{
- pkt->data = NULL; pkt->size = 0;
-}
static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
*pkt = st->cur_pkt; st->cur_pkt.data= NULL;
compute_pkt_fields(s, st, NULL, pkt);
s->cur_st = NULL;
+ if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+ (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
+ ff_reduce_index(s, st->index);
+ av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
+ }
break;
} else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
- len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
- st->cur_ptr, st->cur_len,
- st->cur_pkt.pts, st->cur_pkt.dts);
+ len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
+ st->cur_ptr, st->cur_len,
+ st->cur_pkt.pts, st->cur_pkt.dts,
+ st->cur_pkt.pos);
st->cur_pkt.pts = AV_NOPTS_VALUE;
st->cur_pkt.dts = AV_NOPTS_VALUE;
/* increment read pointer */
/* return packet if any */
if (pkt->size) {
- pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close.
got_packet:
pkt->duration = 0;
pkt->stream_index = st->index;
pkt->pts = st->parser->pts;
pkt->dts = st->parser->dts;
- pkt->destruct = av_destruct_packet_nofree;
+ pkt->pos = st->parser->pos;
+ pkt->destruct = NULL;
compute_pkt_fields(s, st, st->parser, pkt);
if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing) {
- av_parser_parse(st->parser, st->codec,
+ av_parser_parse2(st->parser, st->codec,
&pkt->data, &pkt->size,
NULL, 0,
- AV_NOPTS_VALUE, AV_NOPTS_VALUE);
+ AV_NOPTS_VALUE, AV_NOPTS_VALUE,
+ AV_NOPTS_VALUE);
if (pkt->size)
goto got_packet;
}
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
AVInputFormat *avif= s->iformat;
- int64_t pos_min, pos_max, pos, pos_limit;
+ int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
int64_t ts_min, ts_max, ts;
int index;
AVStream *st;
pos_min= e->pos;
ts_min= e->timestamp;
#ifdef DEBUG_SEEK
- av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
- pos_min,ts_min);
+ av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
+ pos_min,ts_min);
#endif
}else{
assert(index==0);
ts_max= e->timestamp;
pos_limit= pos_max - e->min_distance;
#ifdef DEBUG_SEEK
- av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
- pos_max,pos_limit, ts_max);
+ av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
+ pos_max,pos_limit, ts_max);
#endif
}
}
else
no_change=0;
#ifdef DEBUG_SEEK
-av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
+ av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
+ pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
+ start_pos, no_change);
#endif
if(ts == AV_NOPTS_VALUE){
av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
return ret;
av_update_cur_dts(s, st, ie->timestamp);
}else{
- if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
+ if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
return ret;
}
for(i=0;; i++) {
return av_seek_frame_generic(s, stream_index, timestamp, flags);
}
+int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
+{
+ if(min_ts > ts || max_ts < ts)
+ return -1;
+
+ av_read_frame_flush(s);
+
+ if (s->iformat->read_seek2)
+ return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
+
+ if(s->iformat->read_timestamp){
+ //try to seek via read_timestamp()
+ }
+
+ //Fallback to old API if new is not implemented but old is
+ //Note the old has somewat different sematics
+ if(s->iformat->read_seek || 1)
+ return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
+
+ // try some generic seek like av_seek_frame_generic() but with new ts semantics
+}
+
/*******************************************************/
/**
return enc->codec_id != CODEC_ID_NONE && val != 0;
}
-static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
+static int try_decode_frame(AVStream *st, AVPacket *avpkt)
{
int16_t *samples;
AVCodec *codec;
int got_picture, data_size, ret=0;
AVFrame picture;
- if(!st->codec->codec){
- codec = avcodec_find_decoder(st->codec->codec_id);
- if (!codec)
- return -1;
- ret = avcodec_open(st->codec, codec);
- if (ret < 0)
- return ret;
- }
+ if(!st->codec->codec){
+ codec = avcodec_find_decoder(st->codec->codec_id);
+ if (!codec)
+ return -1;
+ ret = avcodec_open(st->codec, codec);
+ if (ret < 0)
+ return ret;
+ }
- if(!has_codec_parameters(st->codec)){
- switch(st->codec->codec_type) {
- case CODEC_TYPE_VIDEO:
- ret = avcodec_decode_video(st->codec, &picture,
- &got_picture, data, size);
- break;
- case CODEC_TYPE_AUDIO:
- data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
- samples = av_malloc(data_size);
- if (!samples)
- goto fail;
- ret = avcodec_decode_audio2(st->codec, samples,
- &data_size, data, size);
- av_free(samples);
- break;
- default:
- break;
+ if(!has_codec_parameters(st->codec)){
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ avcodec_get_frame_defaults(&picture);
+ ret = avcodec_decode_video2(st->codec, &picture,
+ &got_picture, avpkt);
+ break;
+ case CODEC_TYPE_AUDIO:
+ data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
+ samples = av_malloc(data_size);
+ if (!samples)
+ goto fail;
+ ret = avcodec_decode_audio3(st->codec, samples,
+ &data_size, avpkt);
+ av_free(samples);
+ break;
+ default:
+ break;
+ }
}
- }
fail:
return ret;
}
read_size += pkt->size;
st = ic->streams[pkt->stream_index];
- if(codec_info_nb_frames[st->index]>1)
+ if(codec_info_nb_frames[st->index]>1) {
+ if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration)
+ break;
codec_info_duration[st->index] += pkt->duration;
+ }
if (pkt->duration != 0)
codec_info_nb_frames[st->index]++;
int64_t last= last_dts[index];
int64_t duration= pkt->dts - last;
- if (st->codec->ticks_per_frame == 2 &&
- st->parser &&
- st->parser->repeat_pict == 0)
- duration *= 2; // two fields are needed per frame
-
if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
double dur= duration * av_q2d(st->time_base);
st->codec->codec_id == CODEC_ID_PPM ||
st->codec->codec_id == CODEC_ID_SHORTEN ||
(st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
- try_decode_frame(st, pkt->data, pkt->size);
+ try_decode_frame(st, pkt);
- if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
- break;
- }
count++;
}
av_free(st->index_entries);
av_free(st->codec->extradata);
av_free(st->codec);
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(st->filename);
+#endif
av_free(st->priv_data);
av_free(st);
}
for(i=s->nb_programs-1; i>=0; i--) {
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_freep(&s->programs[i]->provider_name);
av_freep(&s->programs[i]->name);
+#endif
av_metadata_free(&s->programs[i]->metadata);
av_freep(&s->programs[i]->stream_index);
av_freep(&s->programs[i]);
flush_packet_queue(s);
av_freep(&s->priv_data);
while(s->nb_chapters--) {
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(s->chapters[s->nb_chapters]->title);
+#endif
av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
av_free(s->chapters[s->nb_chapters]);
}
return NULL;
dynarray_add(&s->chapters, &s->nb_chapters, chapter);
}
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(chapter->title);
- chapter->title = av_strdup(title);
+#endif
+ av_metadata_set(&chapter->metadata, "title", title);
chapter->id = id;
chapter->time_base= time_base;
chapter->start = start;
this_pktl = av_mallocz(sizeof(AVPacketList));
this_pktl->pkt= *pkt;
- if(pkt->destruct == av_destruct_packet)
- pkt->destruct= NULL; // not shared -> must keep original from being freed
- else
- av_dup_packet(&this_pktl->pkt); //shared -> must dup
+ pkt->destruct= NULL; // do not free original but only the copy
+ av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
next_point = &s->packet_buffer;
while(*next_point){
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i];
int g = av_gcd(st->time_base.num, st->time_base.den);
+ AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
avcodec_string(buf, sizeof(buf), st->codec, is_output);
av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
/* the pid is an important information, so we display it */
/* XXX: add a generic system */
if (flags & AVFMT_SHOW_IDS)
av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
- if (strlen(st->language) > 0)
- av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
+ if (lang)
+ av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
av_log(NULL, AV_LOG_INFO, ": %s", buf);
if (st->sample_aspect_ratio.num && // default
if(ic->nb_programs) {
int j, k;
for(j=0; j<ic->nb_programs; j++) {
+ AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
+ "name", NULL, 0);
av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
- ic->programs[j]->name ? ic->programs[j]->name : "");
+ name ? name->value : "");
for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
}