unsigned int sub_mb_type;
int i8, i4;
+ assert(h->ref_list[1][0].reference&3);
+
#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)
if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
}
bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0; // use second escape buffer for inter data
- h->rbsp_buffer[bufidx]= av_fast_realloc(h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+FF_INPUT_BUFFER_PADDING_SIZE);
+ av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+FF_INPUT_BUFFER_PADDING_SIZE);
dst= h->rbsp_buffer[bufidx];
if (dst == NULL){
av_freep(&h->mb2b_xy);
av_freep(&h->mb2b8_xy);
- for(i = 0; i < h->s.avctx->thread_count; i++) {
+ for(i = 0; i < MAX_THREADS; i++) {
hx = h->thread_context[i];
if(!hx) continue;
av_freep(&hx->top_borders[1]);
else
avctx->pix_fmt= avctx->get_format(avctx, avctx->codec->pix_fmts);
avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
+ avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
decode_init_vlc();
for(index= 0; index < h->ref_count[list]; index++){
if(!h->ref_list[list][index].data[0]){
av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n");
- h->ref_list[list][index]= s->current_picture; //FIXME this is not a sensible solution
+ if(h->default_ref_list[list][0].data[0])
+ h->ref_list[list][index]= h->default_ref_list[list][0];
+ else
+ return -1;
}
}
}
*/
static int execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
MpegEncContext * const s = &h->s;
- int i, j;
+ int i, av_uninit(j);
int current_ref_assigned=0;
Picture *av_uninit(pic);
av_log(h->s.avctx, AV_LOG_DEBUG, "no mmco here\n");
for(i=0; i<mmco_count; i++){
- int structure, av_uninit(frame_num);
+ int av_uninit(structure), av_uninit(frame_num);
if(s->avctx->debug&FF_DEBUG_MMCO)
av_log(h->s.avctx, AV_LOG_DEBUG, "mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_pic_num, h->mmco[i].long_arg);
}
}
+static void field_end(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ AVCodecContext * const avctx= s->avctx;
+ s->mb_y= 0;
+
+ s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264;
+ s->current_picture_ptr->pict_type= s->pict_type;
+
+ if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+ ff_vdpau_h264_set_reference_frames(s);
+
+ if(!s->dropable) {
+ execute_ref_pic_marking(h, h->mmco, h->mmco_index);
+ h->prev_poc_msb= h->poc_msb;
+ h->prev_poc_lsb= h->poc_lsb;
+ }
+ h->prev_frame_num_offset= h->frame_num_offset;
+ h->prev_frame_num= h->frame_num;
+
+ if (avctx->hwaccel) {
+ if (avctx->hwaccel->end_frame(avctx) < 0)
+ av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture\n");
+ }
+
+ if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
+ ff_vdpau_h264_picture_complete(s);
+
+ /*
+ * FIXME: Error handling code does not seem to support interlaced
+ * when slices span multiple rows
+ * The ff_er_add_slice calls don't work right for bottom
+ * fields; they cause massive erroneous error concealing
+ * Error marking covers both fields (top and bottom).
+ * This causes a mismatched s->error_count
+ * and a bad error table. Further, the error count goes to
+ * INT_MAX when called for bottom field, because mb_y is
+ * past end by one (callers fault) and resync_mb_y != 0
+ * causes problems for the first MB line, too.
+ */
+ if (!FIELD_PICTURE)
+ ff_er_frame_end(s);
+
+ MPV_frame_end(s);
+
+ h->current_slice=0;
+}
+
/**
* Replicates H264 "master" context to thread contexts.
*/
first_mb_in_slice= get_ue_golomb(&s->gb);
- if((s->flags2 & CODEC_FLAG2_CHUNKS) && first_mb_in_slice == 0){
+ if(first_mb_in_slice == 0){ //FIXME better field boundary detection
+ if(h0->current_slice && FIELD_PICTURE){
+ field_end(h);
+ }
+
h0->current_slice = 0;
if (!s0->first_field)
s->current_picture_ptr= NULL;
int qp = s->current_picture.qscale_table[mb_xy];
if(qp <= qp_thresh
&& (mb_x == 0 || ((qp + s->current_picture.qscale_table[mb_xy-1] + 1)>>1) <= qp_thresh)
- && (mb_y == 0 || ((qp + s->current_picture.qscale_table[h->top_mb_xy] + 1)>>1) <= qp_thresh)){
+ && (h->top_mb_xy < 0 || ((qp + s->current_picture.qscale_table[h->top_mb_xy] + 1)>>1) <= qp_thresh)){
return;
}
}
if(h->sps.pic_struct_present_flag){
unsigned int i, num_clock_ts;
h->sei_pic_struct = get_bits(&s->gb, 4);
+ h->sei_ct_type = 0;
if (h->sei_pic_struct > SEI_PIC_STRUCT_FRAME_TRIPLING)
return -1;
for (i = 0 ; i < num_clock_ts ; i++){
if(get_bits(&s->gb, 1)){ /* clock_timestamp_flag */
unsigned int full_timestamp_flag;
- skip_bits(&s->gb, 2); /* ct_type */
+ h->sei_ct_type |= 1<<get_bits(&s->gb, 2);
skip_bits(&s->gb, 1); /* nuit_field_based_flag */
skip_bits(&s->gb, 5); /* counting_type */
full_timestamp_flag = get_bits(&s->gb, 1);
}
if(get_bits1(&s->gb)){ /* chroma_location_info_present_flag */
- get_ue_golomb(&s->gb); /* chroma_sample_location_type_top_field */
+ s->avctx->chroma_sample_location = get_ue_golomb(&s->gb)+1; /* chroma_sample_location_type_top_field */
get_ue_golomb(&s->gb); /* chroma_sample_location_type_bottom_field */
}
decode_vui_parameters(h, sps);
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
- av_log(h->s.avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s %s\n",
+ av_log(h->s.avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s %s %d/%d\n",
sps_id, sps->profile_idc, sps->level_idc,
sps->poc_type,
sps->ref_frame_count,
sps->crop_left, sps->crop_right,
sps->crop_top, sps->crop_bottom,
sps->vui_parameters_present_flag ? "VUI" : "",
- ((const char*[]){"Gray","420","422","444"})[sps->chroma_format_idc]
+ ((const char*[]){"Gray","420","422","444"})[sps->chroma_format_idc],
+ sps->timing_info_present_flag ? sps->num_units_in_tick : 0,
+ sps->timing_info_present_flag ? sps->time_scale : 0
);
}
int buf_index=0;
H264Context *hx; ///< thread context
int context_count = 0;
+ int next_avc= h->is_avc ? 0 : buf_size;
h->max_contexts = avctx->thread_count;
#if 0
int i, nalsize = 0;
int err;
- if(h->is_avc) {
+ if(buf_index >= next_avc) {
if(buf_index >= buf_size) break;
nalsize = 0;
for(i = 0; i < h->nal_length_size; i++)
break;
}
}
+ next_avc= buf_index + nalsize;
} else {
// start code prefix search
for(; buf_index + 3 < buf_size; buf_index++){
hx = h->thread_context[context_count];
- ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, h->is_avc ? nalsize : buf_size - buf_index);
+ ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index);
if (ptr==NULL || dst_length < 0){
return -1;
}
av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d at %d/%d length %d\n", hx->nal_unit_type, buf_index, buf_size, dst_length);
}
- if (h->is_avc && (nalsize != consumed)){
+ if (h->is_avc && (nalsize != consumed) && nalsize){
int i, debug_level = AV_LOG_DEBUG;
for (i = consumed; i < nalsize; i++)
if (buf[buf_index+i])
debug_level = AV_LOG_ERROR;
av_log(h->s.avctx, debug_level, "AVC: Consumed only %d bytes instead of %d\n", consumed, nalsize);
- consumed= nalsize;
}
buf_index += consumed;
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
- const uint8_t *buf, int buf_size)
+ AVPacket *avpkt)
{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
H264Context *h = avctx->priv_data;
MpegEncContext *s = &h->s;
AVFrame *pict = data;
Picture *cur = s->current_picture_ptr;
int i, pics, cross_idr, out_of_order, out_idx;
- s->mb_y= 0;
-
- s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264;
- s->current_picture_ptr->pict_type= s->pict_type;
-
- if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
- ff_vdpau_h264_set_reference_frames(s);
-
- if(!s->dropable) {
- execute_ref_pic_marking(h, h->mmco, h->mmco_index);
- h->prev_poc_msb= h->poc_msb;
- h->prev_poc_lsb= h->poc_lsb;
- }
- h->prev_frame_num_offset= h->frame_num_offset;
- h->prev_frame_num= h->frame_num;
-
- if (avctx->hwaccel) {
- if (avctx->hwaccel->end_frame(avctx) < 0)
- av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture\n");
- }
-
- if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
- ff_vdpau_h264_picture_complete(s);
-
- /*
- * FIXME: Error handling code does not seem to support interlaced
- * when slices span multiple rows
- * The ff_er_add_slice calls don't work right for bottom
- * fields; they cause massive erroneous error concealing
- * Error marking covers both fields (top and bottom).
- * This causes a mismatched s->error_count
- * and a bad error table. Further, the error count goes to
- * INT_MAX when called for bottom field, because mb_y is
- * past end by one (callers fault) and resync_mb_y != 0
- * causes problems for the first MB line, too.
- */
- if (!FIELD_PICTURE)
- ff_er_frame_end(s);
-
- MPV_frame_end(s);
+ field_end(h);
if (cur->field_poc[0]==INT_MAX || cur->field_poc[1]==INT_MAX) {
/* Wait for second field. */
/* Signal interlacing information externally. */
/* Prioritize picture timing SEI information over used decoding process if it exists. */
+ if (h->sei_ct_type)
+ cur->interlaced_frame = (h->sei_ct_type & (1<<1)) != 0;
+ else
+ cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE;
+
if(h->sps.pic_struct_present_flag){
switch (h->sei_pic_struct)
{
- case SEI_PIC_STRUCT_FRAME:
- cur->interlaced_frame = 0;
- break;
- case SEI_PIC_STRUCT_TOP_FIELD:
- case SEI_PIC_STRUCT_BOTTOM_FIELD:
- case SEI_PIC_STRUCT_TOP_BOTTOM:
- case SEI_PIC_STRUCT_BOTTOM_TOP:
- cur->interlaced_frame = 1;
- break;
case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
// Signal the possibility of telecined film externally (pic_struct 5,6)
// From these hints, let the applications decide if they apply deinterlacing.
cur->repeat_pict = 1;
- cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE;
break;
case SEI_PIC_STRUCT_FRAME_DOUBLING:
// Force progressive here, as doubling interlaced frame is a bad idea.
#endif /* TEST */
-static av_cold int decode_end(AVCodecContext *avctx)
+av_cold void ff_h264_free_context(H264Context *h)
{
- H264Context *h = avctx->priv_data;
- MpegEncContext *s = &h->s;
int i;
av_freep(&h->rbsp_buffer[0]);
for(i = 0; i < MAX_PPS_COUNT; i++)
av_freep(h->pps_buffers + i);
+}
+
+static av_cold int decode_end(AVCodecContext *avctx)
+{
+ H264Context *h = avctx->priv_data;
+ MpegEncContext *s = &h->s;
+
+ ff_h264_free_context(h);
MPV_common_end(s);