2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006-2007 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
34 #include "vc1acdata.h"
39 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
40 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
41 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
42 #define MB_INTRA_VLC_BITS 9
43 extern VLC ff_msmp4_mb_i_vlc;
44 extern const uint16_t ff_msmp4_mb_i_table[64][2];
47 static const uint16_t table_mb_intra[64][2];
49 /** Markers used if VC-1 AP frame data */
52 VC1_CODE_RES0 = 0x00000100,
53 VC1_CODE_ENDOFSEQ = 0x0000010A,
62 /** Available Profiles */
67 PROFILE_COMPLEX, ///< TODO: WMV9 specific
72 /** Sequence quantizer mode */
75 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
76 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
77 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
78 QUANT_UNIFORM ///< Uniform quant used for all frames
82 /** Where quant can be changed */
86 DQPROFILE_DOUBLE_EDGES,
87 DQPROFILE_SINGLE_EDGE,
92 /** @name Where quant can be changed
103 /** Which pair of edges is quantized with ALTPQUANT */
106 DQDOUBLE_BEDGE_TOPLEFT,
107 DQDOUBLE_BEDGE_TOPRIGHT,
108 DQDOUBLE_BEDGE_BOTTOMRIGHT,
109 DQDOUBLE_BEDGE_BOTTOMLEFT
113 /** MV modes for P frames */
116 MV_PMODE_1MV_HPEL_BILIN,
120 MV_PMODE_INTENSITY_COMP
124 /** @name MV types for B frames */
129 BMV_TYPE_INTERPOLATED
133 /** @name Block types for P/B frames */
135 enum TransformTypes {
139 TT_8X4, //Both halves
142 TT_4X8, //Both halves
147 /** Table for conversion between TTBLK and TTMB */
148 static const int ttblk_to_tt[3][8] = {
149 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
150 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
151 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
154 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
156 /** MV P mode - the 5th element is only used for mode 1 */
157 static const uint8_t mv_pmode_table[2][5] = {
158 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
159 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
161 static const uint8_t mv_pmode_table2[2][4] = {
162 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
163 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
166 /** One more frame type */
169 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
170 fps_dr[2] = { 1000, 1001 };
171 static const uint8_t pquant_table[3][32] = {
172 { /* Implicit quantizer */
173 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
174 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
176 { /* Explicit quantizer, pquantizer uniform */
177 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
178 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
180 { /* Explicit quantizer, pquantizer non-uniform */
181 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
182 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
186 /** @name VC-1 VLC tables and defines
187 * @todo TODO move this into the context
190 #define VC1_BFRACTION_VLC_BITS 7
191 static VLC vc1_bfraction_vlc;
192 #define VC1_IMODE_VLC_BITS 4
193 static VLC vc1_imode_vlc;
194 #define VC1_NORM2_VLC_BITS 3
195 static VLC vc1_norm2_vlc;
196 #define VC1_NORM6_VLC_BITS 9
197 static VLC vc1_norm6_vlc;
198 /* Could be optimized, one table only needs 8 bits */
199 #define VC1_TTMB_VLC_BITS 9 //12
200 static VLC vc1_ttmb_vlc[3];
201 #define VC1_MV_DIFF_VLC_BITS 9 //15
202 static VLC vc1_mv_diff_vlc[4];
203 #define VC1_CBPCY_P_VLC_BITS 9 //14
204 static VLC vc1_cbpcy_p_vlc[4];
205 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
206 static VLC vc1_4mv_block_pattern_vlc[4];
207 #define VC1_TTBLK_VLC_BITS 5
208 static VLC vc1_ttblk_vlc[3];
209 #define VC1_SUBBLKPAT_VLC_BITS 6
210 static VLC vc1_subblkpat_vlc[3];
212 static VLC vc1_ac_coeff_table[8];
216 CS_HIGH_MOT_INTRA = 0,
226 /** @name Overlap conditions for Advanced Profile */
237 * @fixme Change size wherever another size is more efficient
238 * Many members are only used for Advanced Profile
240 typedef struct VC1Context{
245 /** Simple/Main Profile sequence header */
247 int res_sm; ///< reserved, 2b
248 int res_x8; ///< reserved
249 int multires; ///< frame-level RESPIC syntax element present
250 int res_fasttx; ///< reserved, always 1
251 int res_transtab; ///< reserved, always 0
252 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
254 int res_rtm_flag; ///< reserved, set to 1
255 int reserved; ///< reserved
258 /** Advanced Profile */
260 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
261 int chromaformat; ///< 2bits, 2=4:2:0, only defined
262 int postprocflag; ///< Per-frame processing suggestion flag present
263 int broadcast; ///< TFF/RFF present
264 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
265 int tfcntrflag; ///< TFCNTR present
266 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
267 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
268 int color_prim; ///< 8bits, chroma coordinates of the color primaries
269 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
270 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
271 int hrd_param_flag; ///< Presence of Hypothetical Reference
272 ///< Decoder parameters
273 int psf; ///< Progressive Segmented Frame
276 /** Sequence header data for all Profiles
277 * TODO: choose between ints, uint8_ts and monobit flags
280 int profile; ///< 2bits, Profile
281 int frmrtq_postproc; ///< 3bits,
282 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
283 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
284 int extended_mv; ///< Ext MV in P/B (not in Simple)
285 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
286 int vstransform; ///< variable-size [48]x[48] transform type + info
287 int overlap; ///< overlapped transforms in use
288 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
289 int finterpflag; ///< INTERPFRM present
292 /** Frame decoding info for all profiles */
294 uint8_t mv_mode; ///< MV coding monde
295 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
296 int k_x; ///< Number of bits for MVs (depends on MV range)
297 int k_y; ///< Number of bits for MVs (depends on MV range)
298 int range_x, range_y; ///< MV range
299 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
300 /** pquant parameters */
307 /** AC coding set indexes
308 * @see 8.1.1.10, p(1)10
311 int c_ac_table_index; ///< Chroma index from ACFRM element
312 int y_ac_table_index; ///< Luma index from AC2FRM element
314 int ttfrm; ///< Transform type info present at frame level
315 uint8_t ttmbf; ///< Transform type flag
316 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
317 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
318 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
319 int pqindex; ///< raw pqindex used in coding set selection
320 int a_avail, c_avail;
321 uint8_t *mb_type_base, *mb_type[3];
324 /** Luma compensation parameters */
329 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
330 uint8_t halfpq; ///< Uniform quant over image and qp+.5
331 uint8_t respic; ///< Frame-level flag for resized images
332 int buffer_fullness; ///< HRD info
334 * -# 0 -> [-64n 63.f] x [-32, 31.f]
335 * -# 1 -> [-128, 127.f] x [-64, 63.f]
336 * -# 2 -> [-512, 511.f] x [-128, 127.f]
337 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
340 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
341 VLC *cbpcy_vlc; ///< CBPCY VLC table
342 int tt_index; ///< Index for Transform Type tables
343 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
344 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
345 int mv_type_is_raw; ///< mv type mb plane is not coded
346 int dmb_is_raw; ///< direct mb plane is raw
347 int skip_is_raw; ///< skip mb plane is not coded
348 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
349 int use_ic; ///< use intensity compensation in B-frames
350 int rnd; ///< rounding control
352 /** Frame decoding info for S/M profiles only */
354 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
358 /** Frame decoding info for Advanced profile */
360 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
361 uint8_t numpanscanwin;
363 uint8_t rptfrm, tff, rff;
366 uint16_t bottomrightx;
367 uint16_t bottomrighty;
370 int hrd_num_leaky_buckets;
371 uint8_t bit_rate_exponent;
372 uint8_t buffer_size_exponent;
373 uint8_t* acpred_plane; ///< AC prediction flags bitplane
375 uint8_t* over_flags_plane; ///< Overflags bitplane
378 uint16_t *hrd_rate, *hrd_buffer;
379 uint8_t *hrd_fullness;
380 uint8_t range_mapy_flag;
381 uint8_t range_mapuv_flag;
391 * Get unary code of limited length
392 * @fixme FIXME Slow and ugly
393 * @param gb GetBitContext
394 * @param[in] stop The bitstop value (unary code of 1's or 0's)
395 * @param[in] len Maximum length
396 * @return Unary length/index
398 static int get_prefix(GetBitContext *gb, int stop, int len)
403 for(i = 0; i < len && get_bits1(gb) != stop; i++);
405 /* int i = 0, tmp = !stop;
407 while (i != len && tmp != stop)
409 tmp = get_bits(gb, 1);
412 if (i == len && tmp != stop) return len+1;
419 UPDATE_CACHE(re, gb);
420 buf=GET_CACHE(re, gb); //Still not sure
421 if (stop) buf = ~buf;
423 log= av_log2(-buf); //FIXME: -?
425 LAST_SKIP_BITS(re, gb, log+1);
426 CLOSE_READER(re, gb);
430 LAST_SKIP_BITS(re, gb, limit);
431 CLOSE_READER(re, gb);
436 static inline int decode210(GetBitContext *gb){
442 return 2 - get_bits1(gb);
446 * Init VC-1 specific tables and VC1Context members
447 * @param v The VC1Context to initialize
450 static int vc1_init_common(VC1Context *v)
455 v->hrd_rate = v->hrd_buffer = NULL;
461 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
462 vc1_bfraction_bits, 1, 1,
463 vc1_bfraction_codes, 1, 1, 1);
464 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
465 vc1_norm2_bits, 1, 1,
466 vc1_norm2_codes, 1, 1, 1);
467 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
468 vc1_norm6_bits, 1, 1,
469 vc1_norm6_codes, 2, 2, 1);
470 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
471 vc1_imode_bits, 1, 1,
472 vc1_imode_codes, 1, 1, 1);
475 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
476 vc1_ttmb_bits[i], 1, 1,
477 vc1_ttmb_codes[i], 2, 2, 1);
478 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
479 vc1_ttblk_bits[i], 1, 1,
480 vc1_ttblk_codes[i], 1, 1, 1);
481 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
482 vc1_subblkpat_bits[i], 1, 1,
483 vc1_subblkpat_codes[i], 1, 1, 1);
487 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
488 vc1_4mv_block_pattern_bits[i], 1, 1,
489 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
490 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
491 vc1_cbpcy_p_bits[i], 1, 1,
492 vc1_cbpcy_p_codes[i], 2, 2, 1);
493 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
494 vc1_mv_diff_bits[i], 1, 1,
495 vc1_mv_diff_codes[i], 2, 2, 1);
498 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
499 &vc1_ac_tables[i][0][1], 8, 4,
500 &vc1_ac_tables[i][0][0], 8, 4, 1);
501 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
502 &ff_msmp4_mb_i_table[0][1], 4, 2,
503 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
508 v->mvrange = 0; /* 7.1.1.18, p80 */
513 /***********************************************************************/
515 * @defgroup bitplane VC9 Bitplane decoding
520 /** @addtogroup bitplane
533 /** @} */ //imode defines
535 /** Decode rows by checking if they are skipped
536 * @param plane Buffer to store decoded bits
537 * @param[in] width Width of this buffer
538 * @param[in] height Height of this buffer
539 * @param[in] stride of this buffer
541 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
544 for (y=0; y<height; y++){
545 if (!get_bits(gb, 1)) //rowskip
546 memset(plane, 0, width);
548 for (x=0; x<width; x++)
549 plane[x] = get_bits(gb, 1);
554 /** Decode columns by checking if they are skipped
555 * @param plane Buffer to store decoded bits
556 * @param[in] width Width of this buffer
557 * @param[in] height Height of this buffer
558 * @param[in] stride of this buffer
559 * @fixme FIXME: Optimize
561 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
564 for (x=0; x<width; x++){
565 if (!get_bits(gb, 1)) //colskip
566 for (y=0; y<height; y++)
569 for (y=0; y<height; y++)
570 plane[y*stride] = get_bits(gb, 1);
575 /** Decode a bitplane's bits
576 * @param bp Bitplane where to store the decode bits
577 * @param v VC-1 context for bit reading and logging
579 * @fixme FIXME: Optimize
581 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
583 GetBitContext *gb = &v->s.gb;
585 int imode, x, y, code, offset;
586 uint8_t invert, *planep = data;
587 int width, height, stride;
589 width = v->s.mb_width;
590 height = v->s.mb_height;
591 stride = v->s.mb_stride;
592 invert = get_bits(gb, 1);
593 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
599 //Data is actually read in the MB layer (same for all tests == "raw")
600 *raw_flag = 1; //invert ignored
604 if ((height * width) & 1)
606 *planep++ = get_bits(gb, 1);
610 // decode bitplane as one long line
611 for (y = offset; y < height * width; y += 2) {
612 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
613 *planep++ = code & 1;
615 if(offset == width) {
617 planep += stride - width;
619 *planep++ = code >> 1;
621 if(offset == width) {
623 planep += stride - width;
629 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
630 for(y = 0; y < height; y+= 3) {
631 for(x = width & 1; x < width; x += 2) {
632 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
634 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
637 planep[x + 0] = (code >> 0) & 1;
638 planep[x + 1] = (code >> 1) & 1;
639 planep[x + 0 + stride] = (code >> 2) & 1;
640 planep[x + 1 + stride] = (code >> 3) & 1;
641 planep[x + 0 + stride * 2] = (code >> 4) & 1;
642 planep[x + 1 + stride * 2] = (code >> 5) & 1;
644 planep += stride * 3;
646 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
648 planep += (height & 1) * stride;
649 for(y = height & 1; y < height; y += 2) {
650 for(x = width % 3; x < width; x += 3) {
651 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
653 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
656 planep[x + 0] = (code >> 0) & 1;
657 planep[x + 1] = (code >> 1) & 1;
658 planep[x + 2] = (code >> 2) & 1;
659 planep[x + 0 + stride] = (code >> 3) & 1;
660 planep[x + 1 + stride] = (code >> 4) & 1;
661 planep[x + 2 + stride] = (code >> 5) & 1;
663 planep += stride * 2;
666 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
667 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
671 decode_rowskip(data, width, height, stride, &v->s.gb);
674 decode_colskip(data, width, height, stride, &v->s.gb);
679 /* Applying diff operator */
680 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
684 for (x=1; x<width; x++)
685 planep[x] ^= planep[x-1];
686 for (y=1; y<height; y++)
689 planep[0] ^= planep[-stride];
690 for (x=1; x<width; x++)
692 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
693 else planep[x] ^= planep[x-1];
700 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
702 return (imode<<1) + invert;
705 /** @} */ //Bitplane group
707 /***********************************************************************/
708 /** VOP Dquant decoding
709 * @param v VC-1 Context
711 static int vop_dquant_decoding(VC1Context *v)
713 GetBitContext *gb = &v->s.gb;
719 pqdiff = get_bits(gb, 3);
720 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
721 else v->altpq = v->pq + pqdiff + 1;
725 v->dquantfrm = get_bits(gb, 1);
728 v->dqprofile = get_bits(gb, 2);
729 switch (v->dqprofile)
731 case DQPROFILE_SINGLE_EDGE:
732 case DQPROFILE_DOUBLE_EDGES:
733 v->dqsbedge = get_bits(gb, 2);
735 case DQPROFILE_ALL_MBS:
736 v->dqbilevel = get_bits(gb, 1);
737 default: break; //Forbidden ?
739 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
741 pqdiff = get_bits(gb, 3);
742 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
743 else v->altpq = v->pq + pqdiff + 1;
750 /** Put block onto picture
752 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
756 DSPContext *dsp = &v->s.dsp;
760 for(k = 0; k < 6; k++)
761 for(j = 0; j < 8; j++)
762 for(i = 0; i < 8; i++)
763 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
766 ys = v->s.current_picture.linesize[0];
767 us = v->s.current_picture.linesize[1];
768 vs = v->s.current_picture.linesize[2];
771 dsp->put_pixels_clamped(block[0], Y, ys);
772 dsp->put_pixels_clamped(block[1], Y + 8, ys);
774 dsp->put_pixels_clamped(block[2], Y, ys);
775 dsp->put_pixels_clamped(block[3], Y + 8, ys);
777 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
778 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
779 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
783 /** Do motion compensation over 1 macroblock
784 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
786 static void vc1_mc_1mv(VC1Context *v, int dir)
788 MpegEncContext *s = &v->s;
789 DSPContext *dsp = &v->s.dsp;
790 uint8_t *srcY, *srcU, *srcV;
791 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
793 if(!v->s.last_picture.data[0])return;
795 mx = s->mv[dir][0][0];
796 my = s->mv[dir][0][1];
798 // store motion vectors for further use in B frames
799 if(s->pict_type == P_TYPE) {
800 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
801 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
803 uvmx = (mx + ((mx & 3) == 3)) >> 1;
804 uvmy = (my + ((my & 3) == 3)) >> 1;
806 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
807 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
810 srcY = s->last_picture.data[0];
811 srcU = s->last_picture.data[1];
812 srcV = s->last_picture.data[2];
814 srcY = s->next_picture.data[0];
815 srcU = s->next_picture.data[1];
816 srcV = s->next_picture.data[2];
819 src_x = s->mb_x * 16 + (mx >> 2);
820 src_y = s->mb_y * 16 + (my >> 2);
821 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
822 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
824 if(v->profile != PROFILE_ADVANCED){
825 src_x = av_clip( src_x, -16, s->mb_width * 16);
826 src_y = av_clip( src_y, -16, s->mb_height * 16);
827 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
828 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
830 src_x = av_clip( src_x, -17, s->avctx->coded_width);
831 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
832 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
833 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
836 srcY += src_y * s->linesize + src_x;
837 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
838 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
840 /* for grayscale we should not try to read from unknown area */
841 if(s->flags & CODEC_FLAG_GRAY) {
842 srcU = s->edge_emu_buffer + 18 * s->linesize;
843 srcV = s->edge_emu_buffer + 18 * s->linesize;
846 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
847 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
848 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
849 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
851 srcY -= s->mspel * (1 + s->linesize);
852 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
853 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
854 srcY = s->edge_emu_buffer;
855 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
856 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
857 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
858 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
861 /* if we deal with range reduction we need to scale source blocks */
867 for(j = 0; j < 17 + s->mspel*2; j++) {
868 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
871 src = srcU; src2 = srcV;
872 for(j = 0; j < 9; j++) {
873 for(i = 0; i < 9; i++) {
874 src[i] = ((src[i] - 128) >> 1) + 128;
875 src2[i] = ((src2[i] - 128) >> 1) + 128;
877 src += s->uvlinesize;
878 src2 += s->uvlinesize;
881 /* if we deal with intensity compensation we need to scale source blocks */
882 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
887 for(j = 0; j < 17 + s->mspel*2; j++) {
888 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
891 src = srcU; src2 = srcV;
892 for(j = 0; j < 9; j++) {
893 for(i = 0; i < 9; i++) {
894 src[i] = v->lutuv[src[i]];
895 src2[i] = v->lutuv[src2[i]];
897 src += s->uvlinesize;
898 src2 += s->uvlinesize;
901 srcY += s->mspel * (1 + s->linesize);
905 dxy = ((my & 3) << 2) | (mx & 3);
906 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
907 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
908 srcY += s->linesize * 8;
909 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
910 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
911 } else { // hpel mc - always used for luma
912 dxy = (my & 2) | ((mx & 2) >> 1);
915 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
917 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
920 if(s->flags & CODEC_FLAG_GRAY) return;
921 /* Chroma MC always uses qpel bilinear */
922 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
926 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
927 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
929 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
930 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
934 /** Do motion compensation for 4-MV macroblock - luminance block
936 static void vc1_mc_4mv_luma(VC1Context *v, int n)
938 MpegEncContext *s = &v->s;
939 DSPContext *dsp = &v->s.dsp;
941 int dxy, mx, my, src_x, src_y;
944 if(!v->s.last_picture.data[0])return;
947 srcY = s->last_picture.data[0];
949 off = s->linesize * 4 * (n&2) + (n&1) * 8;
951 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
952 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
954 if(v->profile != PROFILE_ADVANCED){
955 src_x = av_clip( src_x, -16, s->mb_width * 16);
956 src_y = av_clip( src_y, -16, s->mb_height * 16);
958 src_x = av_clip( src_x, -17, s->avctx->coded_width);
959 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
962 srcY += src_y * s->linesize + src_x;
964 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
965 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
966 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
967 srcY -= s->mspel * (1 + s->linesize);
968 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
969 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
970 srcY = s->edge_emu_buffer;
971 /* if we deal with range reduction we need to scale source blocks */
977 for(j = 0; j < 9 + s->mspel*2; j++) {
978 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
982 /* if we deal with intensity compensation we need to scale source blocks */
983 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
988 for(j = 0; j < 9 + s->mspel*2; j++) {
989 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
993 srcY += s->mspel * (1 + s->linesize);
997 dxy = ((my & 3) << 2) | (mx & 3);
998 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
999 } else { // hpel mc - always used for luma
1000 dxy = (my & 2) | ((mx & 2) >> 1);
1002 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
1004 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
1008 static inline int median4(int a, int b, int c, int d)
1011 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
1012 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
1014 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
1015 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
1020 /** Do motion compensation for 4-MV macroblock - both chroma blocks
1022 static void vc1_mc_4mv_chroma(VC1Context *v)
1024 MpegEncContext *s = &v->s;
1025 DSPContext *dsp = &v->s.dsp;
1026 uint8_t *srcU, *srcV;
1027 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1028 int i, idx, tx = 0, ty = 0;
1029 int mvx[4], mvy[4], intra[4];
1030 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1032 if(!v->s.last_picture.data[0])return;
1033 if(s->flags & CODEC_FLAG_GRAY) return;
1035 for(i = 0; i < 4; i++) {
1036 mvx[i] = s->mv[0][i][0];
1037 mvy[i] = s->mv[0][i][1];
1038 intra[i] = v->mb_type[0][s->block_index[i]];
1041 /* calculate chroma MV vector from four luma MVs */
1042 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1043 if(!idx) { // all blocks are inter
1044 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1045 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1046 } else if(count[idx] == 1) { // 3 inter blocks
1049 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1050 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1053 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1054 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1057 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1058 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1061 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1062 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1065 } else if(count[idx] == 2) {
1067 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1068 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1069 tx = (mvx[t1] + mvx[t2]) / 2;
1070 ty = (mvy[t1] + mvy[t2]) / 2;
1072 return; //no need to do MC for inter blocks
1074 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1075 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1076 uvmx = (tx + ((tx&3) == 3)) >> 1;
1077 uvmy = (ty + ((ty&3) == 3)) >> 1;
1079 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1080 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1083 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1084 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1086 if(v->profile != PROFILE_ADVANCED){
1087 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1088 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1090 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1091 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1094 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1095 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1096 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1097 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1098 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1099 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1100 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1101 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1102 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1103 srcU = s->edge_emu_buffer;
1104 srcV = s->edge_emu_buffer + 16;
1106 /* if we deal with range reduction we need to scale source blocks */
1107 if(v->rangeredfrm) {
1109 uint8_t *src, *src2;
1111 src = srcU; src2 = srcV;
1112 for(j = 0; j < 9; j++) {
1113 for(i = 0; i < 9; i++) {
1114 src[i] = ((src[i] - 128) >> 1) + 128;
1115 src2[i] = ((src2[i] - 128) >> 1) + 128;
1117 src += s->uvlinesize;
1118 src2 += s->uvlinesize;
1121 /* if we deal with intensity compensation we need to scale source blocks */
1122 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1124 uint8_t *src, *src2;
1126 src = srcU; src2 = srcV;
1127 for(j = 0; j < 9; j++) {
1128 for(i = 0; i < 9; i++) {
1129 src[i] = v->lutuv[src[i]];
1130 src2[i] = v->lutuv[src2[i]];
1132 src += s->uvlinesize;
1133 src2 += s->uvlinesize;
1138 /* Chroma MC always uses qpel bilinear */
1139 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1143 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1144 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1146 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1147 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1151 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1154 * Decode Simple/Main Profiles sequence header
1155 * @see Figure 7-8, p16-17
1156 * @param avctx Codec context
1157 * @param gb GetBit context initialized from Codec context extra_data
1160 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1162 VC1Context *v = avctx->priv_data;
1164 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1165 v->profile = get_bits(gb, 2);
1166 if (v->profile == PROFILE_COMPLEX)
1168 av_log(avctx, AV_LOG_ERROR, "WMV3 Complex Profile is not fully supported\n");
1171 if (v->profile == PROFILE_ADVANCED)
1173 return decode_sequence_header_adv(v, gb);
1177 v->res_sm = get_bits(gb, 2); //reserved
1180 av_log(avctx, AV_LOG_ERROR,
1181 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1187 v->frmrtq_postproc = get_bits(gb, 3); //common
1188 // (bitrate-32kbps)/64kbps
1189 v->bitrtq_postproc = get_bits(gb, 5); //common
1190 v->s.loop_filter = get_bits(gb, 1); //common
1191 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1193 av_log(avctx, AV_LOG_ERROR,
1194 "LOOPFILTER shell not be enabled in simple profile\n");
1197 v->res_x8 = get_bits(gb, 1); //reserved
1200 av_log(avctx, AV_LOG_ERROR,
1201 "1 for reserved RES_X8 is forbidden\n");
1204 v->multires = get_bits(gb, 1);
1205 v->res_fasttx = get_bits(gb, 1);
1208 av_log(avctx, AV_LOG_ERROR,
1209 "0 for reserved RES_FASTTX is forbidden\n");
1213 v->fastuvmc = get_bits(gb, 1); //common
1214 if (!v->profile && !v->fastuvmc)
1216 av_log(avctx, AV_LOG_ERROR,
1217 "FASTUVMC unavailable in Simple Profile\n");
1220 v->extended_mv = get_bits(gb, 1); //common
1221 if (!v->profile && v->extended_mv)
1223 av_log(avctx, AV_LOG_ERROR,
1224 "Extended MVs unavailable in Simple Profile\n");
1227 v->dquant = get_bits(gb, 2); //common
1228 v->vstransform = get_bits(gb, 1); //common
1230 v->res_transtab = get_bits(gb, 1);
1231 if (v->res_transtab)
1233 av_log(avctx, AV_LOG_ERROR,
1234 "1 for reserved RES_TRANSTAB is forbidden\n");
1238 v->overlap = get_bits(gb, 1); //common
1240 v->s.resync_marker = get_bits(gb, 1);
1241 v->rangered = get_bits(gb, 1);
1242 if (v->rangered && v->profile == PROFILE_SIMPLE)
1244 av_log(avctx, AV_LOG_INFO,
1245 "RANGERED should be set to 0 in simple profile\n");
1248 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1249 v->quantizer_mode = get_bits(gb, 2); //common
1251 v->finterpflag = get_bits(gb, 1); //common
1252 v->res_rtm_flag = get_bits(gb, 1); //reserved
1253 if (!v->res_rtm_flag)
1255 // av_log(avctx, AV_LOG_ERROR,
1256 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1257 av_log(avctx, AV_LOG_ERROR,
1258 "Old WMV3 version detected, only I-frames will be decoded\n");
1261 av_log(avctx, AV_LOG_DEBUG,
1262 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1263 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1264 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1265 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1266 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1267 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1268 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1269 v->dquant, v->quantizer_mode, avctx->max_b_frames
1274 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1276 v->res_rtm_flag = 1;
1277 v->level = get_bits(gb, 3);
1280 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1282 v->chromaformat = get_bits(gb, 2);
1283 if (v->chromaformat != 1)
1285 av_log(v->s.avctx, AV_LOG_ERROR,
1286 "Only 4:2:0 chroma format supported\n");
1291 v->frmrtq_postproc = get_bits(gb, 3); //common
1292 // (bitrate-32kbps)/64kbps
1293 v->bitrtq_postproc = get_bits(gb, 5); //common
1294 v->postprocflag = get_bits(gb, 1); //common
1296 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1297 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1298 v->s.avctx->width = v->s.avctx->coded_width;
1299 v->s.avctx->height = v->s.avctx->coded_height;
1300 v->broadcast = get_bits1(gb);
1301 v->interlace = get_bits1(gb);
1302 v->tfcntrflag = get_bits1(gb);
1303 v->finterpflag = get_bits1(gb);
1304 get_bits1(gb); // reserved
1306 v->s.h_edge_pos = v->s.avctx->coded_width;
1307 v->s.v_edge_pos = v->s.avctx->coded_height;
1309 av_log(v->s.avctx, AV_LOG_DEBUG,
1310 "Advanced Profile level %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1311 "LoopFilter=%i, ChromaFormat=%i, Pulldown=%i, Interlace: %i\n"
1312 "TFCTRflag=%i, FINTERPflag=%i\n",
1313 v->level, v->frmrtq_postproc, v->bitrtq_postproc,
1314 v->s.loop_filter, v->chromaformat, v->broadcast, v->interlace,
1315 v->tfcntrflag, v->finterpflag
1318 v->psf = get_bits1(gb);
1319 if(v->psf) { //PsF, 6.1.13
1320 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1323 v->s.max_b_frames = v->s.avctx->max_b_frames = 7;
1324 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1326 av_log(v->s.avctx, AV_LOG_DEBUG, "Display extended info:\n");
1327 v->s.avctx->width = v->s.width = w = get_bits(gb, 14) + 1;
1328 v->s.avctx->height = v->s.height = h = get_bits(gb, 14) + 1;
1329 av_log(v->s.avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h);
1331 ar = get_bits(gb, 4);
1333 v->s.avctx->sample_aspect_ratio = vc1_pixel_aspect[ar];
1335 w = get_bits(gb, 8);
1336 h = get_bits(gb, 8);
1337 v->s.avctx->sample_aspect_ratio = (AVRational){w, h};
1340 if(get_bits1(gb)){ //framerate stuff
1342 v->s.avctx->time_base.num = 32;
1343 v->s.avctx->time_base.den = get_bits(gb, 16) + 1;
1346 nr = get_bits(gb, 8);
1347 dr = get_bits(gb, 4);
1348 if(nr && nr < 8 && dr && dr < 3){
1349 v->s.avctx->time_base.num = fps_dr[dr - 1];
1350 v->s.avctx->time_base.den = fps_nr[nr - 1] * 1000;
1356 v->color_prim = get_bits(gb, 8);
1357 v->transfer_char = get_bits(gb, 8);
1358 v->matrix_coef = get_bits(gb, 8);
1362 v->hrd_param_flag = get_bits1(gb);
1363 if(v->hrd_param_flag) {
1365 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1366 get_bits(gb, 4); //bitrate exponent
1367 get_bits(gb, 4); //buffer size exponent
1368 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1369 get_bits(gb, 16); //hrd_rate[n]
1370 get_bits(gb, 16); //hrd_buffer[n]
1376 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1378 VC1Context *v = avctx->priv_data;
1379 int i, blink, clentry, refdist;
1381 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1382 blink = get_bits1(gb); // broken link
1383 clentry = get_bits1(gb); // closed entry
1384 v->panscanflag = get_bits1(gb);
1385 refdist = get_bits1(gb); // refdist flag
1386 v->s.loop_filter = get_bits1(gb);
1387 v->fastuvmc = get_bits1(gb);
1388 v->extended_mv = get_bits1(gb);
1389 v->dquant = get_bits(gb, 2);
1390 v->vstransform = get_bits1(gb);
1391 v->overlap = get_bits1(gb);
1392 v->quantizer_mode = get_bits(gb, 2);
1394 if(v->hrd_param_flag){
1395 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1396 get_bits(gb, 8); //hrd_full[n]
1401 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1402 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1405 v->extended_dmv = get_bits1(gb);
1407 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1408 skip_bits(gb, 3); // Y range, ignored for now
1411 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1412 skip_bits(gb, 3); // UV range, ignored for now
1415 av_log(avctx, AV_LOG_DEBUG, "Entry point info:\n"
1416 "BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n"
1417 "RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n"
1418 "DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n",
1419 blink, clentry, v->panscanflag, refdist, v->s.loop_filter,
1420 v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode);
1425 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1427 int pqindex, lowquant, status;
1429 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1430 skip_bits(gb, 2); //framecnt unused
1432 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1433 v->s.pict_type = get_bits(gb, 1);
1434 if (v->s.avctx->max_b_frames) {
1435 if (!v->s.pict_type) {
1436 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1437 else v->s.pict_type = B_TYPE;
1438 } else v->s.pict_type = P_TYPE;
1439 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1442 if(v->s.pict_type == B_TYPE) {
1443 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1444 v->bfraction = vc1_bfraction_lut[v->bfraction];
1445 if(v->bfraction == 0) {
1446 v->s.pict_type = BI_TYPE;
1449 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1450 get_bits(gb, 7); // skip buffer fullness
1453 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1455 if(v->s.pict_type == P_TYPE)
1458 /* Quantizer stuff */
1459 pqindex = get_bits(gb, 5);
1460 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1461 v->pq = pquant_table[0][pqindex];
1463 v->pq = pquant_table[1][pqindex];
1466 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1467 v->pquantizer = pqindex < 9;
1468 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1470 v->pqindex = pqindex;
1471 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1473 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1474 v->pquantizer = get_bits(gb, 1);
1476 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1477 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1478 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1479 v->range_x = 1 << (v->k_x - 1);
1480 v->range_y = 1 << (v->k_y - 1);
1481 if (v->profile == PROFILE_ADVANCED)
1483 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1486 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1488 if(v->res_x8 && (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)){
1489 if(get_bits1(gb))return -1;
1491 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1492 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1494 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1496 switch(v->s.pict_type) {
1498 if (v->pq < 5) v->tt_index = 0;
1499 else if(v->pq < 13) v->tt_index = 1;
1500 else v->tt_index = 2;
1502 lowquant = (v->pq > 12) ? 0 : 1;
1503 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1504 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1506 int scale, shift, i;
1507 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1508 v->lumscale = get_bits(gb, 6);
1509 v->lumshift = get_bits(gb, 6);
1511 /* fill lookup tables for intensity compensation */
1514 shift = (255 - v->lumshift * 2) << 6;
1515 if(v->lumshift > 31)
1518 scale = v->lumscale + 32;
1519 if(v->lumshift > 31)
1520 shift = (v->lumshift - 64) << 6;
1522 shift = v->lumshift << 6;
1524 for(i = 0; i < 256; i++) {
1525 v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
1526 v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1529 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1530 v->s.quarter_sample = 0;
1531 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1532 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1533 v->s.quarter_sample = 0;
1535 v->s.quarter_sample = 1;
1537 v->s.quarter_sample = 1;
1538 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1540 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1541 v->mv_mode2 == MV_PMODE_MIXED_MV)
1542 || v->mv_mode == MV_PMODE_MIXED_MV)
1544 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1545 if (status < 0) return -1;
1546 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1547 "Imode: %i, Invert: %i\n", status>>1, status&1);
1549 v->mv_type_is_raw = 0;
1550 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1552 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1553 if (status < 0) return -1;
1554 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1555 "Imode: %i, Invert: %i\n", status>>1, status&1);
1557 /* Hopefully this is correct for P frames */
1558 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1559 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1563 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1564 vop_dquant_decoding(v);
1567 v->ttfrm = 0; //FIXME Is that so ?
1570 v->ttmbf = get_bits(gb, 1);
1573 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1581 if (v->pq < 5) v->tt_index = 0;
1582 else if(v->pq < 13) v->tt_index = 1;
1583 else v->tt_index = 2;
1585 lowquant = (v->pq > 12) ? 0 : 1;
1586 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1587 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1588 v->s.mspel = v->s.quarter_sample;
1590 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1591 if (status < 0) return -1;
1592 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1593 "Imode: %i, Invert: %i\n", status>>1, status&1);
1594 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1595 if (status < 0) return -1;
1596 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1597 "Imode: %i, Invert: %i\n", status>>1, status&1);
1599 v->s.mv_table_index = get_bits(gb, 2);
1600 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1604 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1605 vop_dquant_decoding(v);
1611 v->ttmbf = get_bits(gb, 1);
1614 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1624 v->c_ac_table_index = decode012(gb);
1625 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1627 v->y_ac_table_index = decode012(gb);
1630 v->s.dc_table_index = get_bits(gb, 1);
1632 if(v->s.pict_type == BI_TYPE) {
1633 v->s.pict_type = B_TYPE;
1639 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1641 int pqindex, lowquant;
1644 v->p_frame_skipped = 0;
1647 v->fcm = decode012(gb);
1648 if(v->fcm) return -1; // interlaced frames/fields are not implemented
1650 switch(get_prefix(gb, 0, 4)) {
1652 v->s.pict_type = P_TYPE;
1655 v->s.pict_type = B_TYPE;
1658 v->s.pict_type = I_TYPE;
1661 v->s.pict_type = BI_TYPE;
1664 v->s.pict_type = P_TYPE; // skipped pic
1665 v->p_frame_skipped = 1;
1671 if(!v->interlace || v->psf) {
1672 v->rptfrm = get_bits(gb, 2);
1674 v->tff = get_bits1(gb);
1675 v->rptfrm = get_bits1(gb);
1678 if(v->panscanflag) {
1681 v->rnd = get_bits1(gb);
1683 v->uvsamp = get_bits1(gb);
1684 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1685 if(v->s.pict_type == B_TYPE) {
1686 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1687 v->bfraction = vc1_bfraction_lut[v->bfraction];
1688 if(v->bfraction == 0) {
1689 v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
1692 pqindex = get_bits(gb, 5);
1693 v->pqindex = pqindex;
1694 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1695 v->pq = pquant_table[0][pqindex];
1697 v->pq = pquant_table[1][pqindex];
1700 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1701 v->pquantizer = pqindex < 9;
1702 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1704 v->pqindex = pqindex;
1705 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1707 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1708 v->pquantizer = get_bits(gb, 1);
1710 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1712 switch(v->s.pict_type) {
1715 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1716 if (status < 0) return -1;
1717 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1718 "Imode: %i, Invert: %i\n", status>>1, status&1);
1719 v->condover = CONDOVER_NONE;
1720 if(v->overlap && v->pq <= 8) {
1721 v->condover = decode012(gb);
1722 if(v->condover == CONDOVER_SELECT) {
1723 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1724 if (status < 0) return -1;
1725 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1726 "Imode: %i, Invert: %i\n", status>>1, status&1);
1732 v->postproc = get_bits1(gb);
1733 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1734 else v->mvrange = 0;
1735 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1736 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1737 v->range_x = 1 << (v->k_x - 1);
1738 v->range_y = 1 << (v->k_y - 1);
1740 if (v->pq < 5) v->tt_index = 0;
1741 else if(v->pq < 13) v->tt_index = 1;
1742 else v->tt_index = 2;
1744 lowquant = (v->pq > 12) ? 0 : 1;
1745 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1746 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1748 int scale, shift, i;
1749 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1750 v->lumscale = get_bits(gb, 6);
1751 v->lumshift = get_bits(gb, 6);
1752 /* fill lookup tables for intensity compensation */
1755 shift = (255 - v->lumshift * 2) << 6;
1756 if(v->lumshift > 31)
1759 scale = v->lumscale + 32;
1760 if(v->lumshift > 31)
1761 shift = (v->lumshift - 64) << 6;
1763 shift = v->lumshift << 6;
1765 for(i = 0; i < 256; i++) {
1766 v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
1767 v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1771 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1772 v->s.quarter_sample = 0;
1773 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1774 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1775 v->s.quarter_sample = 0;
1777 v->s.quarter_sample = 1;
1779 v->s.quarter_sample = 1;
1780 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1782 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1783 v->mv_mode2 == MV_PMODE_MIXED_MV)
1784 || v->mv_mode == MV_PMODE_MIXED_MV)
1786 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1787 if (status < 0) return -1;
1788 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1789 "Imode: %i, Invert: %i\n", status>>1, status&1);
1791 v->mv_type_is_raw = 0;
1792 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1794 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1795 if (status < 0) return -1;
1796 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1797 "Imode: %i, Invert: %i\n", status>>1, status&1);
1799 /* Hopefully this is correct for P frames */
1800 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1801 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1804 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1805 vop_dquant_decoding(v);
1808 v->ttfrm = 0; //FIXME Is that so ?
1811 v->ttmbf = get_bits(gb, 1);
1814 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1823 v->postproc = get_bits1(gb);
1824 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1825 else v->mvrange = 0;
1826 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1827 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1828 v->range_x = 1 << (v->k_x - 1);
1829 v->range_y = 1 << (v->k_y - 1);
1831 if (v->pq < 5) v->tt_index = 0;
1832 else if(v->pq < 13) v->tt_index = 1;
1833 else v->tt_index = 2;
1835 lowquant = (v->pq > 12) ? 0 : 1;
1836 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1837 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1838 v->s.mspel = v->s.quarter_sample;
1840 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1841 if (status < 0) return -1;
1842 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1843 "Imode: %i, Invert: %i\n", status>>1, status&1);
1844 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1845 if (status < 0) return -1;
1846 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1847 "Imode: %i, Invert: %i\n", status>>1, status&1);
1849 v->s.mv_table_index = get_bits(gb, 2);
1850 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1854 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1855 vop_dquant_decoding(v);
1861 v->ttmbf = get_bits(gb, 1);
1864 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1874 v->c_ac_table_index = decode012(gb);
1875 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1877 v->y_ac_table_index = decode012(gb);
1880 v->s.dc_table_index = get_bits(gb, 1);
1881 if ((v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE) && v->dquant) {
1882 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1883 vop_dquant_decoding(v);
1887 if(v->s.pict_type == BI_TYPE) {
1888 v->s.pict_type = B_TYPE;
1894 /***********************************************************************/
1896 * @defgroup block VC-1 Block-level functions
1897 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1903 * @brief Get macroblock-level quantizer scale
1905 #define GET_MQUANT() \
1909 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1913 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1917 mqdiff = get_bits(gb, 3); \
1918 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1919 else mquant = get_bits(gb, 5); \
1922 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1923 edges = 1 << v->dqsbedge; \
1924 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1925 edges = (3 << v->dqsbedge) % 15; \
1926 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1928 if((edges&1) && !s->mb_x) \
1929 mquant = v->altpq; \
1930 if((edges&2) && s->first_slice_line) \
1931 mquant = v->altpq; \
1932 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1933 mquant = v->altpq; \
1934 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1935 mquant = v->altpq; \
1939 * @def GET_MVDATA(_dmv_x, _dmv_y)
1940 * @brief Get MV differentials
1941 * @see MVDATA decoding from 8.3.5.2, p(1)20
1942 * @param _dmv_x Horizontal differential for decoded MV
1943 * @param _dmv_y Vertical differential for decoded MV
1945 #define GET_MVDATA(_dmv_x, _dmv_y) \
1946 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1947 VC1_MV_DIFF_VLC_BITS, 2); \
1950 mb_has_coeffs = 1; \
1953 else mb_has_coeffs = 0; \
1955 if (!index) { _dmv_x = _dmv_y = 0; } \
1956 else if (index == 35) \
1958 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1959 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1961 else if (index == 36) \
1970 if (!s->quarter_sample && index1 == 5) val = 1; \
1972 if(size_table[index1] - val > 0) \
1973 val = get_bits(gb, size_table[index1] - val); \
1975 sign = 0 - (val&1); \
1976 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1979 if (!s->quarter_sample && index1 == 5) val = 1; \
1981 if(size_table[index1] - val > 0) \
1982 val = get_bits(gb, size_table[index1] - val); \
1984 sign = 0 - (val&1); \
1985 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1988 /** Predict and set motion vector
1990 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1992 int xy, wrap, off = 0;
1997 /* scale MV difference to be quad-pel */
1998 dmv_x <<= 1 - s->quarter_sample;
1999 dmv_y <<= 1 - s->quarter_sample;
2001 wrap = s->b8_stride;
2002 xy = s->block_index[n];
2005 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
2006 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
2007 if(mv1) { /* duplicate motion data for 1-MV block */
2008 s->current_picture.motion_val[0][xy + 1][0] = 0;
2009 s->current_picture.motion_val[0][xy + 1][1] = 0;
2010 s->current_picture.motion_val[0][xy + wrap][0] = 0;
2011 s->current_picture.motion_val[0][xy + wrap][1] = 0;
2012 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
2013 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
2018 C = s->current_picture.motion_val[0][xy - 1];
2019 A = s->current_picture.motion_val[0][xy - wrap];
2021 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
2023 //in 4-MV mode different blocks have different B predictor position
2026 off = (s->mb_x > 0) ? -1 : 1;
2029 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
2038 B = s->current_picture.motion_val[0][xy - wrap + off];
2040 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
2041 if(s->mb_width == 1) {
2045 px = mid_pred(A[0], B[0], C[0]);
2046 py = mid_pred(A[1], B[1], C[1]);
2048 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
2054 /* Pullback MV as specified in 8.3.5.3.4 */
2057 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
2058 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
2059 X = (s->mb_width << 6) - 4;
2060 Y = (s->mb_height << 6) - 4;
2062 if(qx + px < -60) px = -60 - qx;
2063 if(qy + py < -60) py = -60 - qy;
2065 if(qx + px < -28) px = -28 - qx;
2066 if(qy + py < -28) py = -28 - qy;
2068 if(qx + px > X) px = X - qx;
2069 if(qy + py > Y) py = Y - qy;
2071 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2072 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
2073 if(is_intra[xy - wrap])
2074 sum = FFABS(px) + FFABS(py);
2076 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2078 if(get_bits1(&s->gb)) {
2086 if(is_intra[xy - 1])
2087 sum = FFABS(px) + FFABS(py);
2089 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2091 if(get_bits1(&s->gb)) {
2101 /* store MV using signed modulus of MV range defined in 4.11 */
2102 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
2103 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
2104 if(mv1) { /* duplicate motion data for 1-MV block */
2105 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
2106 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
2107 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
2108 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
2109 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
2110 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
2114 /** Motion compensation for direct or interpolated blocks in B-frames
2116 static void vc1_interp_mc(VC1Context *v)
2118 MpegEncContext *s = &v->s;
2119 DSPContext *dsp = &v->s.dsp;
2120 uint8_t *srcY, *srcU, *srcV;
2121 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2123 if(!v->s.next_picture.data[0])return;
2125 mx = s->mv[1][0][0];
2126 my = s->mv[1][0][1];
2127 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2128 uvmy = (my + ((my & 3) == 3)) >> 1;
2130 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
2131 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
2133 srcY = s->next_picture.data[0];
2134 srcU = s->next_picture.data[1];
2135 srcV = s->next_picture.data[2];
2137 src_x = s->mb_x * 16 + (mx >> 2);
2138 src_y = s->mb_y * 16 + (my >> 2);
2139 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2140 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2142 if(v->profile != PROFILE_ADVANCED){
2143 src_x = av_clip( src_x, -16, s->mb_width * 16);
2144 src_y = av_clip( src_y, -16, s->mb_height * 16);
2145 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
2146 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
2148 src_x = av_clip( src_x, -17, s->avctx->coded_width);
2149 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
2150 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
2151 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
2154 srcY += src_y * s->linesize + src_x;
2155 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2156 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2158 /* for grayscale we should not try to read from unknown area */
2159 if(s->flags & CODEC_FLAG_GRAY) {
2160 srcU = s->edge_emu_buffer + 18 * s->linesize;
2161 srcV = s->edge_emu_buffer + 18 * s->linesize;
2165 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2166 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2167 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2169 srcY -= s->mspel * (1 + s->linesize);
2170 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2171 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2172 srcY = s->edge_emu_buffer;
2173 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2174 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2175 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2176 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2179 /* if we deal with range reduction we need to scale source blocks */
2180 if(v->rangeredfrm) {
2182 uint8_t *src, *src2;
2185 for(j = 0; j < 17 + s->mspel*2; j++) {
2186 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2189 src = srcU; src2 = srcV;
2190 for(j = 0; j < 9; j++) {
2191 for(i = 0; i < 9; i++) {
2192 src[i] = ((src[i] - 128) >> 1) + 128;
2193 src2[i] = ((src2[i] - 128) >> 1) + 128;
2195 src += s->uvlinesize;
2196 src2 += s->uvlinesize;
2199 srcY += s->mspel * (1 + s->linesize);
2204 dxy = ((my & 1) << 1) | (mx & 1);
2206 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2208 if(s->flags & CODEC_FLAG_GRAY) return;
2209 /* Chroma MC always uses qpel blilinear */
2210 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2213 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2214 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2217 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2221 #if B_FRACTION_DEN==256
2225 return 2 * ((value * n + 255) >> 9);
2226 return (value * n + 128) >> 8;
2229 n -= B_FRACTION_DEN;
2231 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2232 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2236 /** Reconstruct motion vector for B-frame and do motion compensation
2238 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2241 v->mv_mode2 = v->mv_mode;
2242 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2247 if(v->use_ic) v->mv_mode = v->mv_mode2;
2250 if(mode == BMV_TYPE_INTERPOLATED) {
2253 if(v->use_ic) v->mv_mode = v->mv_mode2;
2257 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2258 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2259 if(v->use_ic) v->mv_mode = v->mv_mode2;
2262 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2264 MpegEncContext *s = &v->s;
2265 int xy, wrap, off = 0;
2270 const uint8_t *is_intra = v->mb_type[0];
2274 /* scale MV difference to be quad-pel */
2275 dmv_x[0] <<= 1 - s->quarter_sample;
2276 dmv_y[0] <<= 1 - s->quarter_sample;
2277 dmv_x[1] <<= 1 - s->quarter_sample;
2278 dmv_y[1] <<= 1 - s->quarter_sample;
2280 wrap = s->b8_stride;
2281 xy = s->block_index[0];
2284 s->current_picture.motion_val[0][xy][0] =
2285 s->current_picture.motion_val[0][xy][1] =
2286 s->current_picture.motion_val[1][xy][0] =
2287 s->current_picture.motion_val[1][xy][1] = 0;
2290 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2291 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2292 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2293 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2295 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2296 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2297 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2298 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2302 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2303 C = s->current_picture.motion_val[0][xy - 2];
2304 A = s->current_picture.motion_val[0][xy - wrap*2];
2305 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2306 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2308 if(!s->first_slice_line) { // predictor A is not out of bounds
2309 if(s->mb_width == 1) {
2313 px = mid_pred(A[0], B[0], C[0]);
2314 py = mid_pred(A[1], B[1], C[1]);
2316 } else if(s->mb_x) { // predictor C is not out of bounds
2322 /* Pullback MV as specified in 8.3.5.3.4 */
2325 if(v->profile < PROFILE_ADVANCED) {
2326 qx = (s->mb_x << 5);
2327 qy = (s->mb_y << 5);
2328 X = (s->mb_width << 5) - 4;
2329 Y = (s->mb_height << 5) - 4;
2330 if(qx + px < -28) px = -28 - qx;
2331 if(qy + py < -28) py = -28 - qy;
2332 if(qx + px > X) px = X - qx;
2333 if(qy + py > Y) py = Y - qy;
2335 qx = (s->mb_x << 6);
2336 qy = (s->mb_y << 6);
2337 X = (s->mb_width << 6) - 4;
2338 Y = (s->mb_height << 6) - 4;
2339 if(qx + px < -60) px = -60 - qx;
2340 if(qy + py < -60) py = -60 - qy;
2341 if(qx + px > X) px = X - qx;
2342 if(qy + py > Y) py = Y - qy;
2345 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2346 if(0 && !s->first_slice_line && s->mb_x) {
2347 if(is_intra[xy - wrap])
2348 sum = FFABS(px) + FFABS(py);
2350 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2352 if(get_bits1(&s->gb)) {
2360 if(is_intra[xy - 2])
2361 sum = FFABS(px) + FFABS(py);
2363 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2365 if(get_bits1(&s->gb)) {
2375 /* store MV using signed modulus of MV range defined in 4.11 */
2376 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2377 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2379 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2380 C = s->current_picture.motion_val[1][xy - 2];
2381 A = s->current_picture.motion_val[1][xy - wrap*2];
2382 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2383 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2385 if(!s->first_slice_line) { // predictor A is not out of bounds
2386 if(s->mb_width == 1) {
2390 px = mid_pred(A[0], B[0], C[0]);
2391 py = mid_pred(A[1], B[1], C[1]);
2393 } else if(s->mb_x) { // predictor C is not out of bounds
2399 /* Pullback MV as specified in 8.3.5.3.4 */
2402 if(v->profile < PROFILE_ADVANCED) {
2403 qx = (s->mb_x << 5);
2404 qy = (s->mb_y << 5);
2405 X = (s->mb_width << 5) - 4;
2406 Y = (s->mb_height << 5) - 4;
2407 if(qx + px < -28) px = -28 - qx;
2408 if(qy + py < -28) py = -28 - qy;
2409 if(qx + px > X) px = X - qx;
2410 if(qy + py > Y) py = Y - qy;
2412 qx = (s->mb_x << 6);
2413 qy = (s->mb_y << 6);
2414 X = (s->mb_width << 6) - 4;
2415 Y = (s->mb_height << 6) - 4;
2416 if(qx + px < -60) px = -60 - qx;
2417 if(qy + py < -60) py = -60 - qy;
2418 if(qx + px > X) px = X - qx;
2419 if(qy + py > Y) py = Y - qy;
2422 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2423 if(0 && !s->first_slice_line && s->mb_x) {
2424 if(is_intra[xy - wrap])
2425 sum = FFABS(px) + FFABS(py);
2427 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2429 if(get_bits1(&s->gb)) {
2437 if(is_intra[xy - 2])
2438 sum = FFABS(px) + FFABS(py);
2440 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2442 if(get_bits1(&s->gb)) {
2452 /* store MV using signed modulus of MV range defined in 4.11 */
2454 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2455 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2457 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2458 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2459 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2460 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2463 /** Get predicted DC value for I-frames only
2464 * prediction dir: left=0, top=1
2465 * @param s MpegEncContext
2466 * @param[in] n block index in the current MB
2467 * @param dc_val_ptr Pointer to DC predictor
2468 * @param dir_ptr Prediction direction for use in AC prediction
2470 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2471 int16_t **dc_val_ptr, int *dir_ptr)
2473 int a, b, c, wrap, pred, scale;
2475 static const uint16_t dcpred[32] = {
2476 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2477 114, 102, 93, 85, 79, 73, 68, 64,
2478 60, 57, 54, 51, 49, 47, 45, 43,
2479 41, 39, 38, 37, 35, 34, 33
2482 /* find prediction - wmv3_dc_scale always used here in fact */
2483 if (n < 4) scale = s->y_dc_scale;
2484 else scale = s->c_dc_scale;
2486 wrap = s->block_wrap[n];
2487 dc_val= s->dc_val[0] + s->block_index[n];
2493 b = dc_val[ - 1 - wrap];
2494 a = dc_val[ - wrap];
2496 if (pq < 9 || !overlap)
2498 /* Set outer values */
2499 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2500 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2504 /* Set outer values */
2505 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2506 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2509 if (abs(a - b) <= abs(b - c)) {
2517 /* update predictor */
2518 *dc_val_ptr = &dc_val[0];
2523 /** Get predicted DC value
2524 * prediction dir: left=0, top=1
2525 * @param s MpegEncContext
2526 * @param[in] n block index in the current MB
2527 * @param dc_val_ptr Pointer to DC predictor
2528 * @param dir_ptr Prediction direction for use in AC prediction
2530 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2531 int a_avail, int c_avail,
2532 int16_t **dc_val_ptr, int *dir_ptr)
2534 int a, b, c, wrap, pred, scale;
2536 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2539 /* find prediction - wmv3_dc_scale always used here in fact */
2540 if (n < 4) scale = s->y_dc_scale;
2541 else scale = s->c_dc_scale;
2543 wrap = s->block_wrap[n];
2544 dc_val= s->dc_val[0] + s->block_index[n];
2550 b = dc_val[ - 1 - wrap];
2551 a = dc_val[ - wrap];
2552 /* scale predictors if needed */
2553 q1 = s->current_picture.qscale_table[mb_pos];
2554 if(c_avail && (n!= 1 && n!=3)) {
2555 q2 = s->current_picture.qscale_table[mb_pos - 1];
2557 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2559 if(a_avail && (n!= 2 && n!=3)) {
2560 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2562 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2564 if(a_avail && c_avail && (n!=3)) {
2567 if(n != 2) off -= s->mb_stride;
2568 q2 = s->current_picture.qscale_table[off];
2570 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2573 if(a_avail && c_avail) {
2574 if(abs(a - b) <= abs(b - c)) {
2581 } else if(a_avail) {
2584 } else if(c_avail) {
2592 /* update predictor */
2593 *dc_val_ptr = &dc_val[0];
2599 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2600 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2604 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2606 int xy, wrap, pred, a, b, c;
2608 xy = s->block_index[n];
2609 wrap = s->b8_stride;
2614 a = s->coded_block[xy - 1 ];
2615 b = s->coded_block[xy - 1 - wrap];
2616 c = s->coded_block[xy - wrap];
2625 *coded_block_ptr = &s->coded_block[xy];
2631 * Decode one AC coefficient
2632 * @param v The VC1 context
2633 * @param last Last coefficient
2634 * @param skip How much zero coefficients to skip
2635 * @param value Decoded AC coefficient value
2638 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2640 GetBitContext *gb = &v->s.gb;
2641 int index, escape, run = 0, level = 0, lst = 0;
2643 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2644 if (index != vc1_ac_sizes[codingset] - 1) {
2645 run = vc1_index_decode_table[codingset][index][0];
2646 level = vc1_index_decode_table[codingset][index][1];
2647 lst = index >= vc1_last_decode_table[codingset];
2651 escape = decode210(gb);
2653 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2654 run = vc1_index_decode_table[codingset][index][0];
2655 level = vc1_index_decode_table[codingset][index][1];
2656 lst = index >= vc1_last_decode_table[codingset];
2659 level += vc1_last_delta_level_table[codingset][run];
2661 level += vc1_delta_level_table[codingset][run];
2664 run += vc1_last_delta_run_table[codingset][level] + 1;
2666 run += vc1_delta_run_table[codingset][level] + 1;
2672 lst = get_bits(gb, 1);
2673 if(v->s.esc3_level_length == 0) {
2674 if(v->pq < 8 || v->dquantfrm) { // table 59
2675 v->s.esc3_level_length = get_bits(gb, 3);
2676 if(!v->s.esc3_level_length)
2677 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2679 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2681 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2683 run = get_bits(gb, v->s.esc3_run_length);
2684 sign = get_bits(gb, 1);
2685 level = get_bits(gb, v->s.esc3_level_length);
2696 /** Decode intra block in intra frames - should be faster than decode_intra_block
2697 * @param v VC1Context
2698 * @param block block to decode
2699 * @param coded are AC coeffs present or not
2700 * @param codingset set of VLC to decode data
2702 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2704 GetBitContext *gb = &v->s.gb;
2705 MpegEncContext *s = &v->s;
2706 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2709 int16_t *ac_val, *ac_val2;
2712 /* Get DC differential */
2714 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2716 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2719 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2724 if (dcdiff == 119 /* ESC index value */)
2726 /* TODO: Optimize */
2727 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2728 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2729 else dcdiff = get_bits(gb, 8);
2734 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2735 else if (v->pq == 2)
2736 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2738 if (get_bits(gb, 1))
2743 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2746 /* Store the quantized DC coeff, used for prediction */
2748 block[0] = dcdiff * s->y_dc_scale;
2750 block[0] = dcdiff * s->c_dc_scale;
2763 int last = 0, skip, value;
2764 const int8_t *zz_table;
2768 scale = v->pq * 2 + v->halfpq;
2772 zz_table = vc1_horizontal_zz;
2774 zz_table = vc1_vertical_zz;
2776 zz_table = vc1_normal_zz;
2778 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2780 if(dc_pred_dir) //left
2783 ac_val -= 16 * s->block_wrap[n];
2786 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2790 block[zz_table[i++]] = value;
2793 /* apply AC prediction if needed */
2795 if(dc_pred_dir) { //left
2796 for(k = 1; k < 8; k++)
2797 block[k << 3] += ac_val[k];
2799 for(k = 1; k < 8; k++)
2800 block[k] += ac_val[k + 8];
2803 /* save AC coeffs for further prediction */
2804 for(k = 1; k < 8; k++) {
2805 ac_val2[k] = block[k << 3];
2806 ac_val2[k + 8] = block[k];
2809 /* scale AC coeffs */
2810 for(k = 1; k < 64; k++)
2814 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2817 if(s->ac_pred) i = 63;
2823 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2826 scale = v->pq * 2 + v->halfpq;
2827 memset(ac_val2, 0, 16 * 2);
2828 if(dc_pred_dir) {//left
2831 memcpy(ac_val2, ac_val, 8 * 2);
2833 ac_val -= 16 * s->block_wrap[n];
2835 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2838 /* apply AC prediction if needed */
2840 if(dc_pred_dir) { //left
2841 for(k = 1; k < 8; k++) {
2842 block[k << 3] = ac_val[k] * scale;
2843 if(!v->pquantizer && block[k << 3])
2844 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2847 for(k = 1; k < 8; k++) {
2848 block[k] = ac_val[k + 8] * scale;
2849 if(!v->pquantizer && block[k])
2850 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2856 s->block_last_index[n] = i;
2861 /** Decode intra block in intra frames - should be faster than decode_intra_block
2862 * @param v VC1Context
2863 * @param block block to decode
2864 * @param coded are AC coeffs present or not
2865 * @param codingset set of VLC to decode data
2867 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2869 GetBitContext *gb = &v->s.gb;
2870 MpegEncContext *s = &v->s;
2871 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2874 int16_t *ac_val, *ac_val2;
2876 int a_avail = v->a_avail, c_avail = v->c_avail;
2877 int use_pred = s->ac_pred;
2880 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2882 /* Get DC differential */
2884 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2886 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2889 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2894 if (dcdiff == 119 /* ESC index value */)
2896 /* TODO: Optimize */
2897 if (mquant == 1) dcdiff = get_bits(gb, 10);
2898 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2899 else dcdiff = get_bits(gb, 8);
2904 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2905 else if (mquant == 2)
2906 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2908 if (get_bits(gb, 1))
2913 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2916 /* Store the quantized DC coeff, used for prediction */
2918 block[0] = dcdiff * s->y_dc_scale;
2920 block[0] = dcdiff * s->c_dc_scale;
2929 /* check if AC is needed at all and adjust direction if needed */
2930 if(!a_avail) dc_pred_dir = 1;
2931 if(!c_avail) dc_pred_dir = 0;
2932 if(!a_avail && !c_avail) use_pred = 0;
2933 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2936 scale = mquant * 2 + v->halfpq;
2938 if(dc_pred_dir) //left
2941 ac_val -= 16 * s->block_wrap[n];
2943 q1 = s->current_picture.qscale_table[mb_pos];
2944 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
2945 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2946 if(n && n<4) q2 = q1;
2949 int last = 0, skip, value;
2950 const int8_t *zz_table;
2955 zz_table = vc1_horizontal_zz;
2957 zz_table = vc1_vertical_zz;
2959 zz_table = vc1_normal_zz;
2962 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2966 block[zz_table[i++]] = value;
2969 /* apply AC prediction if needed */
2971 /* scale predictors if needed*/
2973 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2974 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2976 if(dc_pred_dir) { //left
2977 for(k = 1; k < 8; k++)
2978 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2980 for(k = 1; k < 8; k++)
2981 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2984 if(dc_pred_dir) { //left
2985 for(k = 1; k < 8; k++)
2986 block[k << 3] += ac_val[k];
2988 for(k = 1; k < 8; k++)
2989 block[k] += ac_val[k + 8];
2993 /* save AC coeffs for further prediction */
2994 for(k = 1; k < 8; k++) {
2995 ac_val2[k] = block[k << 3];
2996 ac_val2[k + 8] = block[k];
2999 /* scale AC coeffs */
3000 for(k = 1; k < 64; k++)
3004 block[k] += (block[k] < 0) ? -mquant : mquant;
3007 if(use_pred) i = 63;
3008 } else { // no AC coeffs
3011 memset(ac_val2, 0, 16 * 2);
3012 if(dc_pred_dir) {//left
3014 memcpy(ac_val2, ac_val, 8 * 2);
3016 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3017 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3018 for(k = 1; k < 8; k++)
3019 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3024 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3026 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3027 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3028 for(k = 1; k < 8; k++)
3029 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3034 /* apply AC prediction if needed */
3036 if(dc_pred_dir) { //left
3037 for(k = 1; k < 8; k++) {
3038 block[k << 3] = ac_val2[k] * scale;
3039 if(!v->pquantizer && block[k << 3])
3040 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3043 for(k = 1; k < 8; k++) {
3044 block[k] = ac_val2[k + 8] * scale;
3045 if(!v->pquantizer && block[k])
3046 block[k] += (block[k] < 0) ? -mquant : mquant;
3052 s->block_last_index[n] = i;
3057 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3058 * @param v VC1Context
3059 * @param block block to decode
3060 * @param coded are AC coeffs present or not
3061 * @param mquant block quantizer
3062 * @param codingset set of VLC to decode data
3064 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
3066 GetBitContext *gb = &v->s.gb;
3067 MpegEncContext *s = &v->s;
3068 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3071 int16_t *ac_val, *ac_val2;
3073 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3074 int a_avail = v->a_avail, c_avail = v->c_avail;
3075 int use_pred = s->ac_pred;
3079 /* XXX: Guard against dumb values of mquant */
3080 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
3082 /* Set DC scale - y and c use the same */
3083 s->y_dc_scale = s->y_dc_scale_table[mquant];
3084 s->c_dc_scale = s->c_dc_scale_table[mquant];
3086 /* Get DC differential */
3088 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3090 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3093 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3098 if (dcdiff == 119 /* ESC index value */)
3100 /* TODO: Optimize */
3101 if (mquant == 1) dcdiff = get_bits(gb, 10);
3102 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3103 else dcdiff = get_bits(gb, 8);
3108 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
3109 else if (mquant == 2)
3110 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
3112 if (get_bits(gb, 1))
3117 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3120 /* Store the quantized DC coeff, used for prediction */
3123 block[0] = dcdiff * s->y_dc_scale;
3125 block[0] = dcdiff * s->c_dc_scale;
3134 /* check if AC is needed at all and adjust direction if needed */
3135 if(!a_avail) dc_pred_dir = 1;
3136 if(!c_avail) dc_pred_dir = 0;
3137 if(!a_avail && !c_avail) use_pred = 0;
3138 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3141 scale = mquant * 2 + v->halfpq;
3143 if(dc_pred_dir) //left
3146 ac_val -= 16 * s->block_wrap[n];
3148 q1 = s->current_picture.qscale_table[mb_pos];
3149 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
3150 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3151 if(n && n<4) q2 = q1;
3154 int last = 0, skip, value;
3155 const int8_t *zz_table;
3158 zz_table = vc1_simple_progressive_8x8_zz;
3161 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3165 block[zz_table[i++]] = value;
3168 /* apply AC prediction if needed */
3170 /* scale predictors if needed*/
3172 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3173 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3175 if(dc_pred_dir) { //left
3176 for(k = 1; k < 8; k++)
3177 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3179 for(k = 1; k < 8; k++)
3180 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3183 if(dc_pred_dir) { //left
3184 for(k = 1; k < 8; k++)
3185 block[k << 3] += ac_val[k];
3187 for(k = 1; k < 8; k++)
3188 block[k] += ac_val[k + 8];
3192 /* save AC coeffs for further prediction */
3193 for(k = 1; k < 8; k++) {
3194 ac_val2[k] = block[k << 3];
3195 ac_val2[k + 8] = block[k];
3198 /* scale AC coeffs */
3199 for(k = 1; k < 64; k++)
3203 block[k] += (block[k] < 0) ? -mquant : mquant;
3206 if(use_pred) i = 63;
3207 } else { // no AC coeffs
3210 memset(ac_val2, 0, 16 * 2);
3211 if(dc_pred_dir) {//left
3213 memcpy(ac_val2, ac_val, 8 * 2);
3215 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3216 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3217 for(k = 1; k < 8; k++)
3218 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3223 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3225 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3226 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3227 for(k = 1; k < 8; k++)
3228 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3233 /* apply AC prediction if needed */
3235 if(dc_pred_dir) { //left
3236 for(k = 1; k < 8; k++) {
3237 block[k << 3] = ac_val2[k] * scale;
3238 if(!v->pquantizer && block[k << 3])
3239 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3242 for(k = 1; k < 8; k++) {
3243 block[k] = ac_val2[k + 8] * scale;
3244 if(!v->pquantizer && block[k])
3245 block[k] += (block[k] < 0) ? -mquant : mquant;
3251 s->block_last_index[n] = i;
3258 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3260 MpegEncContext *s = &v->s;
3261 GetBitContext *gb = &s->gb;
3264 int scale, off, idx, last, skip, value;
3265 int ttblk = ttmb & 7;
3268 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3270 if(ttblk == TT_4X4) {
3271 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3273 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3274 subblkpat = decode012(gb);
3275 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3276 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3277 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3279 scale = 2 * mquant + v->halfpq;
3281 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3282 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3283 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3286 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3287 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3295 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3299 idx = vc1_simple_progressive_8x8_zz[i++];
3300 block[idx] = value * scale;
3302 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3304 s->dsp.vc1_inv_trans_8x8(block);
3307 for(j = 0; j < 4; j++) {
3308 last = subblkpat & (1 << (3 - j));
3310 off = (j & 1) * 4 + (j & 2) * 16;
3312 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3316 idx = vc1_simple_progressive_4x4_zz[i++];
3317 block[idx + off] = value * scale;
3319 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3321 if(!(subblkpat & (1 << (3 - j))))
3322 s->dsp.vc1_inv_trans_4x4(block, j);
3326 for(j = 0; j < 2; j++) {
3327 last = subblkpat & (1 << (1 - j));
3331 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3335 if(v->profile < PROFILE_ADVANCED)
3336 idx = vc1_simple_progressive_8x4_zz[i++];
3338 idx = vc1_adv_progressive_8x4_zz[i++];
3339 block[idx + off] = value * scale;
3341 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3343 if(!(subblkpat & (1 << (1 - j))))
3344 s->dsp.vc1_inv_trans_8x4(block, j);
3348 for(j = 0; j < 2; j++) {
3349 last = subblkpat & (1 << (1 - j));
3353 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3357 if(v->profile < PROFILE_ADVANCED)
3358 idx = vc1_simple_progressive_4x8_zz[i++];
3360 idx = vc1_adv_progressive_4x8_zz[i++];
3361 block[idx + off] = value * scale;
3363 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3365 if(!(subblkpat & (1 << (1 - j))))
3366 s->dsp.vc1_inv_trans_4x8(block, j);
3374 /** Decode one P-frame MB (in Simple/Main profile)
3376 static int vc1_decode_p_mb(VC1Context *v)
3378 MpegEncContext *s = &v->s;
3379 GetBitContext *gb = &s->gb;
3381 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3382 int cbp; /* cbp decoding stuff */
3383 int mqdiff, mquant; /* MB quantization */
3384 int ttmb = v->ttfrm; /* MB Transform type */
3387 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3388 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3389 int mb_has_coeffs = 1; /* last_flag */
3390 int dmv_x, dmv_y; /* Differential MV components */
3391 int index, index1; /* LUT indices */
3392 int val, sign; /* temp values */
3393 int first_block = 1;
3395 int skipped, fourmv;
3397 mquant = v->pq; /* Loosy initialization */
3399 if (v->mv_type_is_raw)
3400 fourmv = get_bits1(gb);
3402 fourmv = v->mv_type_mb_plane[mb_pos];
3404 skipped = get_bits1(gb);
3406 skipped = v->s.mbskip_table[mb_pos];
3408 s->dsp.clear_blocks(s->block[0]);
3410 if (!fourmv) /* 1MV mode */
3414 GET_MVDATA(dmv_x, dmv_y);
3417 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3418 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3420 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3421 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3423 /* FIXME Set DC val for inter block ? */
3424 if (s->mb_intra && !mb_has_coeffs)
3427 s->ac_pred = get_bits(gb, 1);
3430 else if (mb_has_coeffs)
3432 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3433 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3441 s->current_picture.qscale_table[mb_pos] = mquant;
3443 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3444 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3445 VC1_TTMB_VLC_BITS, 2);
3446 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3450 s->dc_val[0][s->block_index[i]] = 0;
3452 val = ((cbp >> (5 - i)) & 1);
3453 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3454 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3456 /* check if prediction blocks A and C are available */
3457 v->a_avail = v->c_avail = 0;
3458 if(i == 2 || i == 3 || !s->first_slice_line)
3459 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3460 if(i == 1 || i == 3 || s->mb_x)
3461 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3463 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3464 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3465 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3466 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3467 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3468 if(!v->res_fasttx && v->res_x8) for(j = 0; j < 64; j++) s->block[i][j] += 16;
3469 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3470 if(v->pq >= 9 && v->overlap) {
3472 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3474 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3477 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3478 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3480 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3481 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3488 for(i = 0; i < 6; i++) {
3489 v->mb_type[0][s->block_index[i]] = 0;
3490 s->dc_val[0][s->block_index[i]] = 0;
3492 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3493 s->current_picture.qscale_table[mb_pos] = 0;
3494 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3501 if (!skipped /* unskipped MB */)
3503 int intra_count = 0, coded_inter = 0;
3504 int is_intra[6], is_coded[6];
3506 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3509 val = ((cbp >> (5 - i)) & 1);
3510 s->dc_val[0][s->block_index[i]] = 0;
3517 GET_MVDATA(dmv_x, dmv_y);
3519 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3520 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3521 intra_count += s->mb_intra;
3522 is_intra[i] = s->mb_intra;
3523 is_coded[i] = mb_has_coeffs;
3526 is_intra[i] = (intra_count >= 3);
3529 if(i == 4) vc1_mc_4mv_chroma(v);
3530 v->mb_type[0][s->block_index[i]] = is_intra[i];
3531 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3533 // if there are no coded blocks then don't do anything more
3534 if(!intra_count && !coded_inter) return 0;
3537 s->current_picture.qscale_table[mb_pos] = mquant;
3538 /* test if block is intra and has pred */
3543 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3544 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3549 if(intrapred)s->ac_pred = get_bits(gb, 1);
3550 else s->ac_pred = 0;
3552 if (!v->ttmbf && coded_inter)
3553 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3557 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3558 s->mb_intra = is_intra[i];
3560 /* check if prediction blocks A and C are available */
3561 v->a_avail = v->c_avail = 0;
3562 if(i == 2 || i == 3 || !s->first_slice_line)
3563 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3564 if(i == 1 || i == 3 || s->mb_x)
3565 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3567 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3568 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3569 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3570 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3571 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3572 if(!v->res_fasttx && v->res_x8) for(j = 0; j < 64; j++) s->block[i][j] += 16;
3573 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3574 if(v->pq >= 9 && v->overlap) {
3576 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3578 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3580 } else if(is_coded[i]) {
3581 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3582 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3584 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3585 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3593 s->current_picture.qscale_table[mb_pos] = 0;
3594 for (i=0; i<6; i++) {
3595 v->mb_type[0][s->block_index[i]] = 0;
3596 s->dc_val[0][s->block_index[i]] = 0;
3600 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3601 vc1_mc_4mv_luma(v, i);
3603 vc1_mc_4mv_chroma(v);
3604 s->current_picture.qscale_table[mb_pos] = 0;
3609 /* Should never happen */
3613 /** Decode one B-frame MB (in Main profile)
3615 static void vc1_decode_b_mb(VC1Context *v)
3617 MpegEncContext *s = &v->s;
3618 GetBitContext *gb = &s->gb;
3620 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3621 int cbp = 0; /* cbp decoding stuff */
3622 int mqdiff, mquant; /* MB quantization */
3623 int ttmb = v->ttfrm; /* MB Transform type */
3625 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3626 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3627 int mb_has_coeffs = 0; /* last_flag */
3628 int index, index1; /* LUT indices */
3629 int val, sign; /* temp values */
3630 int first_block = 1;
3632 int skipped, direct;
3633 int dmv_x[2], dmv_y[2];
3634 int bmvtype = BMV_TYPE_BACKWARD;
3636 mquant = v->pq; /* Loosy initialization */
3640 direct = get_bits1(gb);
3642 direct = v->direct_mb_plane[mb_pos];
3644 skipped = get_bits1(gb);
3646 skipped = v->s.mbskip_table[mb_pos];
3648 s->dsp.clear_blocks(s->block[0]);
3649 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3650 for(i = 0; i < 6; i++) {
3651 v->mb_type[0][s->block_index[i]] = 0;
3652 s->dc_val[0][s->block_index[i]] = 0;
3654 s->current_picture.qscale_table[mb_pos] = 0;
3658 GET_MVDATA(dmv_x[0], dmv_y[0]);
3659 dmv_x[1] = dmv_x[0];
3660 dmv_y[1] = dmv_y[0];
3662 if(skipped || !s->mb_intra) {
3663 bmvtype = decode012(gb);
3666 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3669 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3672 bmvtype = BMV_TYPE_INTERPOLATED;
3673 dmv_x[0] = dmv_y[0] = 0;
3677 for(i = 0; i < 6; i++)
3678 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3681 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3682 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3683 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3687 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3691 s->current_picture.qscale_table[mb_pos] = mquant;
3693 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3694 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3695 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3696 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3698 if(!mb_has_coeffs && !s->mb_intra) {
3699 /* no coded blocks - effectively skipped */
3700 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3701 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3704 if(s->mb_intra && !mb_has_coeffs) {
3706 s->current_picture.qscale_table[mb_pos] = mquant;
3707 s->ac_pred = get_bits1(gb);
3709 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3711 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3712 GET_MVDATA(dmv_x[0], dmv_y[0]);
3713 if(!mb_has_coeffs) {
3714 /* interpolated skipped block */
3715 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3716 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3720 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3722 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3725 s->ac_pred = get_bits1(gb);
3726 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3728 s->current_picture.qscale_table[mb_pos] = mquant;
3729 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3730 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3736 s->dc_val[0][s->block_index[i]] = 0;
3738 val = ((cbp >> (5 - i)) & 1);
3739 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3740 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3742 /* check if prediction blocks A and C are available */
3743 v->a_avail = v->c_avail = 0;
3744 if(i == 2 || i == 3 || !s->first_slice_line)
3745 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3746 if(i == 1 || i == 3 || s->mb_x)
3747 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3749 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3750 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3751 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3752 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3753 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3754 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3756 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3757 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3759 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3760 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3765 /** Decode blocks of I-frame
3767 static void vc1_decode_i_blocks(VC1Context *v)
3770 MpegEncContext *s = &v->s;
3775 /* select codingmode used for VLC tables selection */
3776 switch(v->y_ac_table_index){
3778 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3781 v->codingset = CS_HIGH_MOT_INTRA;
3784 v->codingset = CS_MID_RATE_INTRA;
3788 switch(v->c_ac_table_index){
3790 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3793 v->codingset2 = CS_HIGH_MOT_INTER;
3796 v->codingset2 = CS_MID_RATE_INTER;
3800 /* Set DC scale - y and c use the same */
3801 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3802 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3805 s->mb_x = s->mb_y = 0;
3807 s->first_slice_line = 1;
3808 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3809 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3810 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3811 ff_init_block_index(s);
3812 ff_update_block_index(s);
3813 s->dsp.clear_blocks(s->block[0]);
3814 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3815 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3816 s->current_picture.qscale_table[mb_pos] = v->pq;
3817 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3818 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3820 // do actual MB decoding and displaying
3821 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3822 v->s.ac_pred = get_bits(&v->s.gb, 1);
3824 for(k = 0; k < 6; k++) {
3825 val = ((cbp >> (5 - k)) & 1);
3828 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3832 cbp |= val << (5 - k);
3834 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3836 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3837 if(v->pq >= 9 && v->overlap) {
3838 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3842 vc1_put_block(v, s->block);
3843 if(v->pq >= 9 && v->overlap) {
3845 s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
3846 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3847 if(!(s->flags & CODEC_FLAG_GRAY)) {
3848 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
3849 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
3852 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
3853 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3854 if(!s->first_slice_line) {
3855 s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
3856 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
3857 if(!(s->flags & CODEC_FLAG_GRAY)) {
3858 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
3859 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
3862 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3863 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3866 if(get_bits_count(&s->gb) > v->bits) {
3867 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3871 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3872 s->first_slice_line = 0;
3876 /** Decode blocks of I-frame for advanced profile
3878 static void vc1_decode_i_blocks_adv(VC1Context *v)
3881 MpegEncContext *s = &v->s;
3888 GetBitContext *gb = &s->gb;
3890 /* select codingmode used for VLC tables selection */
3891 switch(v->y_ac_table_index){
3893 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3896 v->codingset = CS_HIGH_MOT_INTRA;
3899 v->codingset = CS_MID_RATE_INTRA;
3903 switch(v->c_ac_table_index){
3905 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3908 v->codingset2 = CS_HIGH_MOT_INTER;
3911 v->codingset2 = CS_MID_RATE_INTER;
3916 s->mb_x = s->mb_y = 0;
3918 s->first_slice_line = 1;
3919 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3920 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3921 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3922 ff_init_block_index(s);
3923 ff_update_block_index(s);
3924 s->dsp.clear_blocks(s->block[0]);
3925 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3926 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3927 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3928 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3930 // do actual MB decoding and displaying
3931 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3932 if(v->acpred_is_raw)
3933 v->s.ac_pred = get_bits(&v->s.gb, 1);
3935 v->s.ac_pred = v->acpred_plane[mb_pos];
3937 if(v->condover == CONDOVER_SELECT) {
3938 if(v->overflg_is_raw)
3939 overlap = get_bits(&v->s.gb, 1);
3941 overlap = v->over_flags_plane[mb_pos];
3943 overlap = (v->condover == CONDOVER_ALL);
3947 s->current_picture.qscale_table[mb_pos] = mquant;
3948 /* Set DC scale - y and c use the same */
3949 s->y_dc_scale = s->y_dc_scale_table[mquant];
3950 s->c_dc_scale = s->c_dc_scale_table[mquant];
3952 for(k = 0; k < 6; k++) {
3953 val = ((cbp >> (5 - k)) & 1);
3956 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3960 cbp |= val << (5 - k);
3962 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3963 v->c_avail = !!s->mb_x || (k==1 || k==3);
3965 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3967 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3968 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3971 vc1_put_block(v, s->block);
3974 s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
3975 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3976 if(!(s->flags & CODEC_FLAG_GRAY)) {
3977 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
3978 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
3981 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
3982 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3983 if(!s->first_slice_line) {
3984 s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
3985 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
3986 if(!(s->flags & CODEC_FLAG_GRAY)) {
3987 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
3988 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
3991 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3992 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3995 if(get_bits_count(&s->gb) > v->bits) {
3996 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
4000 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4001 s->first_slice_line = 0;
4005 static void vc1_decode_p_blocks(VC1Context *v)
4007 MpegEncContext *s = &v->s;
4009 /* select codingmode used for VLC tables selection */
4010 switch(v->c_ac_table_index){
4012 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4015 v->codingset = CS_HIGH_MOT_INTRA;
4018 v->codingset = CS_MID_RATE_INTRA;
4022 switch(v->c_ac_table_index){
4024 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4027 v->codingset2 = CS_HIGH_MOT_INTER;
4030 v->codingset2 = CS_MID_RATE_INTER;
4034 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4035 s->first_slice_line = 1;
4036 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4037 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
4038 ff_init_block_index(s);
4039 ff_update_block_index(s);
4040 s->dsp.clear_blocks(s->block[0]);
4043 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4044 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
4048 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4049 s->first_slice_line = 0;
4053 static void vc1_decode_b_blocks(VC1Context *v)
4055 MpegEncContext *s = &v->s;
4057 /* select codingmode used for VLC tables selection */
4058 switch(v->c_ac_table_index){
4060 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4063 v->codingset = CS_HIGH_MOT_INTRA;
4066 v->codingset = CS_MID_RATE_INTRA;
4070 switch(v->c_ac_table_index){
4072 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4075 v->codingset2 = CS_HIGH_MOT_INTER;
4078 v->codingset2 = CS_MID_RATE_INTER;
4082 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4083 s->first_slice_line = 1;
4084 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4085 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
4086 ff_init_block_index(s);
4087 ff_update_block_index(s);
4088 s->dsp.clear_blocks(s->block[0]);
4091 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4092 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
4096 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4097 s->first_slice_line = 0;
4101 static void vc1_decode_skip_blocks(VC1Context *v)
4103 MpegEncContext *s = &v->s;
4105 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4106 s->first_slice_line = 1;
4107 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4109 ff_init_block_index(s);
4110 ff_update_block_index(s);
4111 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4112 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4113 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4114 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4115 s->first_slice_line = 0;
4117 s->pict_type = P_TYPE;
4120 static void vc1_decode_blocks(VC1Context *v)
4123 v->s.esc3_level_length = 0;
4125 switch(v->s.pict_type) {
4127 if(v->profile == PROFILE_ADVANCED)
4128 vc1_decode_i_blocks_adv(v);
4130 vc1_decode_i_blocks(v);
4133 if(v->p_frame_skipped)
4134 vc1_decode_skip_blocks(v);
4136 vc1_decode_p_blocks(v);
4140 if(v->profile == PROFILE_ADVANCED)
4141 vc1_decode_i_blocks_adv(v);
4143 vc1_decode_i_blocks(v);
4145 vc1_decode_b_blocks(v);
4150 #define IS_MARKER(x) (((x) & ~0xFF) == VC1_CODE_RES0)
4152 /** Find VC-1 marker in buffer
4153 * @return position where next marker starts or end of buffer if no marker found
4155 static av_always_inline uint8_t* find_next_marker(uint8_t *src, uint8_t *end)
4157 uint32_t mrk = 0xFFFFFFFF;
4159 if(end-src < 4) return end;
4161 mrk = (mrk << 8) | *src++;
4168 static av_always_inline int vc1_unescape_buffer(uint8_t *src, int size, uint8_t *dst)
4173 for(dsize = 0; dsize < size; dsize++) *dst++ = *src++;
4176 for(i = 0; i < size; i++, src++) {
4177 if(src[0] == 3 && i >= 2 && !src[-1] && !src[-2] && i < size-1 && src[1] < 4) {
4178 dst[dsize++] = src[1];
4182 dst[dsize++] = *src;
4187 /** Initialize a VC1/WMV3 decoder
4188 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4189 * @todo TODO: Decypher remaining bits in extra_data
4191 static int vc1_decode_init(AVCodecContext *avctx)
4193 VC1Context *v = avctx->priv_data;
4194 MpegEncContext *s = &v->s;
4197 if (!avctx->extradata_size || !avctx->extradata) return -1;
4198 if (!(avctx->flags & CODEC_FLAG_GRAY))
4199 avctx->pix_fmt = PIX_FMT_YUV420P;
4201 avctx->pix_fmt = PIX_FMT_GRAY8;
4203 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4204 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4206 if(ff_h263_decode_init(avctx) < 0)
4208 if (vc1_init_common(v) < 0) return -1;
4210 avctx->coded_width = avctx->width;
4211 avctx->coded_height = avctx->height;
4212 if (avctx->codec_id == CODEC_ID_WMV3)
4216 // looks like WMV3 has a sequence header stored in the extradata
4217 // advanced sequence header may be before the first frame
4218 // the last byte of the extradata is a version number, 1 for the
4219 // samples we can decode
4221 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4223 if (decode_sequence_header(avctx, &gb) < 0)
4226 count = avctx->extradata_size*8 - get_bits_count(&gb);
4229 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4230 count, get_bits(&gb, count));
4234 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4236 } else { // VC1/WVC1
4237 uint8_t *start = avctx->extradata, *end = avctx->extradata + avctx->extradata_size;
4238 uint8_t *next; int size, buf2_size;
4239 uint8_t *buf2 = NULL;
4240 int seq_inited = 0, ep_inited = 0;
4242 if(avctx->extradata_size < 16) {
4243 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
4247 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
4248 if(start[0]) start++; // in WVC1 extradata first byte is its size
4250 for(; next < end; start = next){
4251 next = find_next_marker(start + 4, end);
4252 size = next - start - 4;
4253 if(size <= 0) continue;
4254 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
4255 init_get_bits(&gb, buf2, buf2_size * 8);
4256 switch(AV_RB32(start)){
4257 case VC1_CODE_SEQHDR:
4258 if(decode_sequence_header(avctx, &gb) < 0){
4264 case VC1_CODE_ENTRYPOINT:
4265 if(decode_entry_point(avctx, &gb) < 0){
4274 if(!seq_inited || !ep_inited){
4275 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
4279 avctx->has_b_frames= !!(avctx->max_b_frames);
4280 s->low_delay = !avctx->has_b_frames;
4282 s->mb_width = (avctx->coded_width+15)>>4;
4283 s->mb_height = (avctx->coded_height+15)>>4;
4285 /* Allocate mb bitplanes */
4286 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4287 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4288 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4289 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4291 /* allocate block type info in that way so it could be used with s->block_index[] */
4292 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4293 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4294 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4295 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4297 /* Init coded blocks info */
4298 if (v->profile == PROFILE_ADVANCED)
4300 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4302 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4310 /** Decode a VC1/WMV3 frame
4311 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4313 static int vc1_decode_frame(AVCodecContext *avctx,
4314 void *data, int *data_size,
4315 uint8_t *buf, int buf_size)
4317 VC1Context *v = avctx->priv_data;
4318 MpegEncContext *s = &v->s;
4319 AVFrame *pict = data;
4320 uint8_t *buf2 = NULL;
4322 /* no supplementary picture */
4323 if (buf_size == 0) {
4324 /* special case for last picture */
4325 if (s->low_delay==0 && s->next_picture_ptr) {
4326 *pict= *(AVFrame*)s->next_picture_ptr;
4327 s->next_picture_ptr= NULL;
4329 *data_size = sizeof(AVFrame);
4335 //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
4336 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4337 int i= ff_find_unused_picture(s, 0);
4338 s->current_picture_ptr= &s->picture[i];
4341 //for advanced profile we may need to parse and unescape data
4342 if (avctx->codec_id == CODEC_ID_VC1) {
4344 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4346 if(IS_MARKER(AV_RB32(buf))){ /* frame starts with marker and needs to be parsed */
4347 uint8_t *dst = buf2, *start, *end, *next;
4351 for(start = buf, end = buf + buf_size; next < end; start = next){
4352 next = find_next_marker(start + 4, end);
4353 size = next - start - 4;
4354 if(size <= 0) continue;
4355 switch(AV_RB32(start)){
4356 case VC1_CODE_FRAME:
4357 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
4359 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
4360 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
4361 init_get_bits(&s->gb, buf2, buf_size2*8);
4362 decode_entry_point(avctx, &s->gb);
4364 case VC1_CODE_SLICE:
4365 av_log(avctx, AV_LOG_ERROR, "Sliced decoding is not implemented (yet)\n");
4370 }else if(v->interlace && ((buf[0] & 0xC0) == 0xC0)){ /* WVC1 interlaced stores both fields divided by marker */
4373 divider = find_next_marker(buf, buf + buf_size);
4374 if((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD){
4375 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
4379 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
4381 av_free(buf2);return -1;
4383 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
4385 init_get_bits(&s->gb, buf2, buf_size2*8);
4387 init_get_bits(&s->gb, buf, buf_size*8);
4388 // do parse frame header
4389 if(v->profile < PROFILE_ADVANCED) {
4390 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4395 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4401 if(s->pict_type != I_TYPE && !v->res_rtm_flag){
4407 s->current_picture.pict_type= s->pict_type;
4408 s->current_picture.key_frame= s->pict_type == I_TYPE;
4410 /* skip B-frames if we don't have reference frames */
4411 if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
4413 return -1;//buf_size;
4415 /* skip b frames if we are in a hurry */
4416 if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
4417 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
4418 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
4419 || avctx->skip_frame >= AVDISCARD_ALL) {
4423 /* skip everything if we are in a hurry>=5 */
4424 if(avctx->hurry_up>=5) {
4426 return -1;//buf_size;
4429 if(s->next_p_frame_damaged){
4430 if(s->pict_type==B_TYPE)
4433 s->next_p_frame_damaged=0;
4436 if(MPV_frame_start(s, avctx) < 0) {
4441 ff_er_frame_start(s);
4443 v->bits = buf_size * 8;
4444 vc1_decode_blocks(v);
4445 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4446 // if(get_bits_count(&s->gb) > buf_size * 8)
4452 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4453 assert(s->current_picture.pict_type == s->pict_type);
4454 if (s->pict_type == B_TYPE || s->low_delay) {
4455 *pict= *(AVFrame*)s->current_picture_ptr;
4456 } else if (s->last_picture_ptr != NULL) {
4457 *pict= *(AVFrame*)s->last_picture_ptr;
4460 if(s->last_picture_ptr || s->low_delay){
4461 *data_size = sizeof(AVFrame);
4462 ff_print_debug_info(s, pict);
4465 /* Return the Picture timestamp as the frame number */
4466 /* we substract 1 because it is added on utils.c */
4467 avctx->frame_number = s->picture_number - 1;
4474 /** Close a VC1/WMV3 decoder
4475 * @warning Initial try at using MpegEncContext stuff
4477 static int vc1_decode_end(AVCodecContext *avctx)
4479 VC1Context *v = avctx->priv_data;
4481 av_freep(&v->hrd_rate);
4482 av_freep(&v->hrd_buffer);
4483 MPV_common_end(&v->s);
4484 av_freep(&v->mv_type_mb_plane);
4485 av_freep(&v->direct_mb_plane);
4486 av_freep(&v->acpred_plane);
4487 av_freep(&v->over_flags_plane);
4488 av_freep(&v->mb_type_base);
4493 AVCodec vc1_decoder = {
4506 AVCodec wmv3_decoder = {
4519 #ifdef CONFIG_VC1_PARSER
4521 * finds the end of the current frame in the bitstream.
4522 * @return the position of the first byte of the next frame, or -1
4524 static int vc1_find_frame_end(ParseContext *pc, const uint8_t *buf,
4529 pic_found= pc->frame_start_found;
4534 for(i=0; i<buf_size; i++){
4535 state= (state<<8) | buf[i];
4536 if(state == VC1_CODE_FRAME || state == VC1_CODE_FIELD){
4545 /* EOF considered as end of frame */
4548 for(; i<buf_size; i++){
4549 state= (state<<8) | buf[i];
4550 if(IS_MARKER(state) && state != VC1_CODE_FIELD && state != VC1_CODE_SLICE){
4551 pc->frame_start_found=0;
4557 pc->frame_start_found= pic_found;
4559 return END_NOT_FOUND;
4562 static int vc1_parse(AVCodecParserContext *s,
4563 AVCodecContext *avctx,
4564 uint8_t **poutbuf, int *poutbuf_size,
4565 const uint8_t *buf, int buf_size)
4567 ParseContext *pc = s->priv_data;
4570 if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){
4573 next= vc1_find_frame_end(pc, buf, buf_size);
4575 if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) {
4581 *poutbuf = (uint8_t *)buf;
4582 *poutbuf_size = buf_size;
4586 int vc1_split(AVCodecContext *avctx,
4587 const uint8_t *buf, int buf_size)
4592 for(i=0; i<buf_size; i++){
4593 state= (state<<8) | buf[i];
4594 if(IS_MARKER(state) && state != VC1_CODE_SEQHDR && state != VC1_CODE_ENTRYPOINT)
4600 AVCodecParser vc1_parser = {
4602 sizeof(ParseContext1),
4608 #endif /* CONFIG_VC1_PARSER */