]> rtime.felk.cvut.cz Git - frescor/ffmpeg.git/blob - libavcodec/huffyuv.c
frsh: Export information about the last RTP contract and VRES
[frescor/ffmpeg.git] / libavcodec / huffyuv.c
1 /*
2  * huffyuv codec for libavcodec
3  *
4  * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25
26 /**
27  * @file libavcodec/huffyuv.c
28  * huffyuv codec for libavcodec.
29  */
30
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35
36 #define VLC_BITS 11
37
38 #ifdef WORDS_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #else
43 #define B 0
44 #define G 1
45 #define R 2
46 #endif
47
48 typedef enum Predictor{
49     LEFT= 0,
50     PLANE,
51     MEDIAN,
52 } Predictor;
53
54 typedef struct HYuvContext{
55     AVCodecContext *avctx;
56     Predictor predictor;
57     GetBitContext gb;
58     PutBitContext pb;
59     int interlaced;
60     int decorrelate;
61     int bitstream_bpp;
62     int version;
63     int yuy2;                               //use yuy2 instead of 422P
64     int bgr32;                              //use bgr32 instead of bgr24
65     int width, height;
66     int flags;
67     int context;
68     int picture_number;
69     int last_slice_end;
70     uint8_t *temp[3];
71     uint64_t stats[3][256];
72     uint8_t len[3][256];
73     uint32_t bits[3][256];
74     uint32_t pix_bgr_map[1<<VLC_BITS];
75     VLC vlc[6];                             //Y,U,V,YY,YU,YV
76     AVFrame picture;
77     uint8_t *bitstream_buffer;
78     unsigned int bitstream_buffer_size;
79     DSPContext dsp;
80 }HYuvContext;
81
82 static const unsigned char classic_shift_luma[] = {
83   34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84   16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
85   69,68, 0
86 };
87
88 static const unsigned char classic_shift_chroma[] = {
89   66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90   56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91   214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
92 };
93
94 static const unsigned char classic_add_luma[256] = {
95     3,  9,  5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96    73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97    68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98    35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99    37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100    35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101    27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102    15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103    12, 17, 19, 13,  4,  9,  2, 11,  1,  7,  8,  0, 16,  3, 14,  6,
104    12, 10,  5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105    18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106    28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107    28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108    62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109    54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110    46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13,  7,  8,
111 };
112
113 static const unsigned char classic_add_chroma[256] = {
114     3,  1,  2,  2,  2,  2,  3,  3,  7,  5,  7,  5,  8,  6, 11,  9,
115     7, 13, 11, 10,  9,  8,  7,  5,  9,  7,  6,  4,  7,  5,  8,  7,
116    11,  8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117    43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118   143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119    80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120    17, 14,  5,  6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121   112,113,114,115,  4,117,118, 92, 94,121,122,  3,124,103,  2,  1,
122     0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123   135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124    52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125    19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10,  9,  8, 36,
126     7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127    83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128    14, 16, 17, 18, 20, 21, 12, 14, 15,  9, 10,  6,  9,  6,  5,  8,
129     6, 12,  8, 10,  7,  9,  6,  4,  6,  2,  2,  3,  3,  3,  3,  2,
130 };
131
132 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
133     int i;
134
135     for(i=0; i<w-1; i++){
136         acc+= src[i];
137         dst[i]= acc;
138         i++;
139         acc+= src[i];
140         dst[i]= acc;
141     }
142
143     for(; i<w; i++){
144         acc+= src[i];
145         dst[i]= acc;
146     }
147
148     return acc;
149 }
150
151 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
152     int i;
153     int r,g,b;
154     r= *red;
155     g= *green;
156     b= *blue;
157
158     for(i=0; i<w; i++){
159         b+= src[4*i+B];
160         g+= src[4*i+G];
161         r+= src[4*i+R];
162
163         dst[4*i+B]= b;
164         dst[4*i+G]= g;
165         dst[4*i+R]= r;
166     }
167
168     *red= r;
169     *green= g;
170     *blue= b;
171 }
172
173 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
174     int i;
175     if(w<32){
176         for(i=0; i<w; i++){
177             const int temp= src[i];
178             dst[i]= temp - left;
179             left= temp;
180         }
181         return left;
182     }else{
183         for(i=0; i<16; i++){
184             const int temp= src[i];
185             dst[i]= temp - left;
186             left= temp;
187         }
188         s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
189         return src[w-1];
190     }
191 }
192
193 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
194     int i;
195     int r,g,b;
196     r= *red;
197     g= *green;
198     b= *blue;
199     for(i=0; i<FFMIN(w,4); i++){
200         const int rt= src[i*4+R];
201         const int gt= src[i*4+G];
202         const int bt= src[i*4+B];
203         dst[i*4+R]= rt - r;
204         dst[i*4+G]= gt - g;
205         dst[i*4+B]= bt - b;
206         r = rt;
207         g = gt;
208         b = bt;
209     }
210     s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
211     *red=   src[(w-1)*4+R];
212     *green= src[(w-1)*4+G];
213     *blue=  src[(w-1)*4+B];
214 }
215
216 static void read_len_table(uint8_t *dst, GetBitContext *gb){
217     int i, val, repeat;
218
219     for(i=0; i<256;){
220         repeat= get_bits(gb, 3);
221         val   = get_bits(gb, 5);
222         if(repeat==0)
223             repeat= get_bits(gb, 8);
224 //printf("%d %d\n", val, repeat);
225         while (repeat--)
226             dst[i++] = val;
227     }
228 }
229
230 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
231     int len, index;
232     uint32_t bits=0;
233
234     for(len=32; len>0; len--){
235         for(index=0; index<256; index++){
236             if(len_table[index]==len)
237                 dst[index]= bits++;
238         }
239         if(bits & 1){
240             av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
241             return -1;
242         }
243         bits >>= 1;
244     }
245     return 0;
246 }
247
248 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
249 typedef struct {
250     uint64_t val;
251     int name;
252 } HeapElem;
253
254 static void heap_sift(HeapElem *h, int root, int size)
255 {
256     while(root*2+1 < size) {
257         int child = root*2+1;
258         if(child < size-1 && h[child].val > h[child+1].val)
259             child++;
260         if(h[root].val > h[child].val) {
261             FFSWAP(HeapElem, h[root], h[child]);
262             root = child;
263         } else
264             break;
265     }
266 }
267
268 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
269     HeapElem h[size];
270     int up[2*size];
271     int len[2*size];
272     int offset, i, next;
273
274     for(offset=1; ; offset<<=1){
275         for(i=0; i<size; i++){
276             h[i].name = i;
277             h[i].val = (stats[i] << 8) + offset;
278         }
279         for(i=size/2-1; i>=0; i--)
280             heap_sift(h, i, size);
281
282         for(next=size; next<size*2-1; next++){
283             // merge the two smallest entries, and put it back in the heap
284             uint64_t min1v = h[0].val;
285             up[h[0].name] = next;
286             h[0].val = INT64_MAX;
287             heap_sift(h, 0, size);
288             up[h[0].name] = next;
289             h[0].name = next;
290             h[0].val += min1v;
291             heap_sift(h, 0, size);
292         }
293
294         len[2*size-2] = 0;
295         for(i=2*size-3; i>=size; i--)
296             len[i] = len[up[i]] + 1;
297         for(i=0; i<size; i++) {
298             dst[i] = len[up[i]] + 1;
299             if(dst[i] >= 32) break;
300         }
301         if(i==size) break;
302     }
303 }
304 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
305
306 static void generate_joint_tables(HYuvContext *s){
307     uint16_t symbols[1<<VLC_BITS];
308     uint16_t bits[1<<VLC_BITS];
309     uint8_t len[1<<VLC_BITS];
310     if(s->bitstream_bpp < 24){
311         int p, i, y, u;
312         for(p=0; p<3; p++){
313             for(i=y=0; y<256; y++){
314                 int len0 = s->len[0][y];
315                 int limit = VLC_BITS - len0;
316                 if(limit <= 0)
317                     continue;
318                 for(u=0; u<256; u++){
319                     int len1 = s->len[p][u];
320                     if(len1 > limit)
321                         continue;
322                     len[i] = len0 + len1;
323                     bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
324                     symbols[i] = (y<<8) + u;
325                     if(symbols[i] != 0xffff) // reserved to mean "invalid"
326                         i++;
327                 }
328             }
329             free_vlc(&s->vlc[3+p]);
330             init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
331         }
332     }else{
333         uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
334         int i, b, g, r, code;
335         int p0 = s->decorrelate;
336         int p1 = !s->decorrelate;
337         // restrict the range to +/-16 becaues that's pretty much guaranteed to
338         // cover all the combinations that fit in 11 bits total, and it doesn't
339         // matter if we miss a few rare codes.
340         for(i=0, g=-16; g<16; g++){
341             int len0 = s->len[p0][g&255];
342             int limit0 = VLC_BITS - len0;
343             if(limit0 < 2)
344                 continue;
345             for(b=-16; b<16; b++){
346                 int len1 = s->len[p1][b&255];
347                 int limit1 = limit0 - len1;
348                 if(limit1 < 1)
349                     continue;
350                 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
351                 for(r=-16; r<16; r++){
352                     int len2 = s->len[2][r&255];
353                     if(len2 > limit1)
354                         continue;
355                     len[i] = len0 + len1 + len2;
356                     bits[i] = (code << len2) + s->bits[2][r&255];
357                     if(s->decorrelate){
358                         map[i][G] = g;
359                         map[i][B] = g+b;
360                         map[i][R] = g+r;
361                     }else{
362                         map[i][B] = g;
363                         map[i][G] = b;
364                         map[i][R] = r;
365                     }
366                     i++;
367                 }
368             }
369         }
370         free_vlc(&s->vlc[3]);
371         init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
372     }
373 }
374
375 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
376     GetBitContext gb;
377     int i;
378
379     init_get_bits(&gb, src, length*8);
380
381     for(i=0; i<3; i++){
382         read_len_table(s->len[i], &gb);
383
384         if(generate_bits_table(s->bits[i], s->len[i])<0){
385             return -1;
386         }
387 #if 0
388 for(j=0; j<256; j++){
389 printf("%6X, %2d,  %3d\n", s->bits[i][j], s->len[i][j], j);
390 }
391 #endif
392         free_vlc(&s->vlc[i]);
393         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
394     }
395
396     generate_joint_tables(s);
397
398     return (get_bits_count(&gb)+7)/8;
399 }
400
401 static int read_old_huffman_tables(HYuvContext *s){
402 #if 1
403     GetBitContext gb;
404     int i;
405
406     init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
407     read_len_table(s->len[0], &gb);
408     init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
409     read_len_table(s->len[1], &gb);
410
411     for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma  [i];
412     for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
413
414     if(s->bitstream_bpp >= 24){
415         memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
416         memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
417     }
418     memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
419     memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
420
421     for(i=0; i<3; i++){
422         free_vlc(&s->vlc[i]);
423         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
424     }
425
426     generate_joint_tables(s);
427
428     return 0;
429 #else
430     av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
431     return -1;
432 #endif
433 }
434
435 static av_cold void alloc_temp(HYuvContext *s){
436     int i;
437
438     if(s->bitstream_bpp<24){
439         for(i=0; i<3; i++){
440             s->temp[i]= av_malloc(s->width + 16);
441         }
442     }else{
443         for(i=0; i<2; i++){
444             s->temp[i]= av_malloc(4*s->width + 16);
445         }
446     }
447 }
448
449 static av_cold int common_init(AVCodecContext *avctx){
450     HYuvContext *s = avctx->priv_data;
451
452     s->avctx= avctx;
453     s->flags= avctx->flags;
454
455     dsputil_init(&s->dsp, avctx);
456
457     s->width= avctx->width;
458     s->height= avctx->height;
459     assert(s->width>0 && s->height>0);
460
461     return 0;
462 }
463
464 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
465 static av_cold int decode_init(AVCodecContext *avctx)
466 {
467     HYuvContext *s = avctx->priv_data;
468
469     common_init(avctx);
470     memset(s->vlc, 0, 3*sizeof(VLC));
471
472     avctx->coded_frame= &s->picture;
473     s->interlaced= s->height > 288;
474
475 s->bgr32=1;
476 //if(avctx->extradata)
477 //  printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
478     if(avctx->extradata_size){
479         if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
480             s->version=1; // do such files exist at all?
481         else
482             s->version=2;
483     }else
484         s->version=0;
485
486     if(s->version==2){
487         int method, interlace;
488
489         method= ((uint8_t*)avctx->extradata)[0];
490         s->decorrelate= method&64 ? 1 : 0;
491         s->predictor= method&63;
492         s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
493         if(s->bitstream_bpp==0)
494             s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
495         interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
496         s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
497         s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
498
499         if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
500             return -1;
501     }else{
502         switch(avctx->bits_per_coded_sample&7){
503         case 1:
504             s->predictor= LEFT;
505             s->decorrelate= 0;
506             break;
507         case 2:
508             s->predictor= LEFT;
509             s->decorrelate= 1;
510             break;
511         case 3:
512             s->predictor= PLANE;
513             s->decorrelate= avctx->bits_per_coded_sample >= 24;
514             break;
515         case 4:
516             s->predictor= MEDIAN;
517             s->decorrelate= 0;
518             break;
519         default:
520             s->predictor= LEFT; //OLD
521             s->decorrelate= 0;
522             break;
523         }
524         s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
525         s->context= 0;
526
527         if(read_old_huffman_tables(s) < 0)
528             return -1;
529     }
530
531     switch(s->bitstream_bpp){
532     case 12:
533         avctx->pix_fmt = PIX_FMT_YUV420P;
534         break;
535     case 16:
536         if(s->yuy2){
537             avctx->pix_fmt = PIX_FMT_YUYV422;
538         }else{
539             avctx->pix_fmt = PIX_FMT_YUV422P;
540         }
541         break;
542     case 24:
543     case 32:
544         if(s->bgr32){
545             avctx->pix_fmt = PIX_FMT_RGB32;
546         }else{
547             avctx->pix_fmt = PIX_FMT_BGR24;
548         }
549         break;
550     default:
551         assert(0);
552     }
553
554     alloc_temp(s);
555
556 //    av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
557
558     return 0;
559 }
560 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
561
562 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
563 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
564     int i;
565     int index= 0;
566
567     for(i=0; i<256;){
568         int val= len[i];
569         int repeat=0;
570
571         for(; i<256 && len[i]==val && repeat<255; i++)
572             repeat++;
573
574         assert(val < 32 && val >0 && repeat<256 && repeat>0);
575         if(repeat>7){
576             buf[index++]= val;
577             buf[index++]= repeat;
578         }else{
579             buf[index++]= val | (repeat<<5);
580         }
581     }
582
583     return index;
584 }
585
586 static av_cold int encode_init(AVCodecContext *avctx)
587 {
588     HYuvContext *s = avctx->priv_data;
589     int i, j;
590
591     common_init(avctx);
592
593     avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
594     avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
595     s->version=2;
596
597     avctx->coded_frame= &s->picture;
598
599     switch(avctx->pix_fmt){
600     case PIX_FMT_YUV420P:
601         s->bitstream_bpp= 12;
602         break;
603     case PIX_FMT_YUV422P:
604         s->bitstream_bpp= 16;
605         break;
606     case PIX_FMT_RGB32:
607         s->bitstream_bpp= 24;
608         break;
609     default:
610         av_log(avctx, AV_LOG_ERROR, "format not supported\n");
611         return -1;
612     }
613     avctx->bits_per_coded_sample= s->bitstream_bpp;
614     s->decorrelate= s->bitstream_bpp >= 24;
615     s->predictor= avctx->prediction_method;
616     s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
617     if(avctx->context_model==1){
618         s->context= avctx->context_model;
619         if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
620             av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
621             return -1;
622         }
623     }else s->context= 0;
624
625     if(avctx->codec->id==CODEC_ID_HUFFYUV){
626         if(avctx->pix_fmt==PIX_FMT_YUV420P){
627             av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
628             return -1;
629         }
630         if(avctx->context_model){
631             av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
632             return -1;
633         }
634         if(s->interlaced != ( s->height > 288 ))
635             av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
636     }
637
638     if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
639         av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
640         return -1;
641     }
642
643     ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
644     ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
645     ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
646     if(s->context)
647         ((uint8_t*)avctx->extradata)[2]|= 0x40;
648     ((uint8_t*)avctx->extradata)[3]= 0;
649     s->avctx->extradata_size= 4;
650
651     if(avctx->stats_in){
652         char *p= avctx->stats_in;
653
654         for(i=0; i<3; i++)
655             for(j=0; j<256; j++)
656                 s->stats[i][j]= 1;
657
658         for(;;){
659             for(i=0; i<3; i++){
660                 char *next;
661
662                 for(j=0; j<256; j++){
663                     s->stats[i][j]+= strtol(p, &next, 0);
664                     if(next==p) return -1;
665                     p=next;
666                 }
667             }
668             if(p[0]==0 || p[1]==0 || p[2]==0) break;
669         }
670     }else{
671         for(i=0; i<3; i++)
672             for(j=0; j<256; j++){
673                 int d= FFMIN(j, 256-j);
674
675                 s->stats[i][j]= 100000000/(d+1);
676             }
677     }
678
679     for(i=0; i<3; i++){
680         generate_len_table(s->len[i], s->stats[i], 256);
681
682         if(generate_bits_table(s->bits[i], s->len[i])<0){
683             return -1;
684         }
685
686         s->avctx->extradata_size+=
687         store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
688     }
689
690     if(s->context){
691         for(i=0; i<3; i++){
692             int pels = s->width*s->height / (i?40:10);
693             for(j=0; j<256; j++){
694                 int d= FFMIN(j, 256-j);
695                 s->stats[i][j]= pels/(d+1);
696             }
697         }
698     }else{
699         for(i=0; i<3; i++)
700             for(j=0; j<256; j++)
701                 s->stats[i][j]= 0;
702     }
703
704 //    printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
705
706     alloc_temp(s);
707
708     s->picture_number=0;
709
710     return 0;
711 }
712 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
713
714 /* TODO instead of restarting the read when the code isn't in the first level
715  * of the joint table, jump into the 2nd level of the individual table. */
716 #define READ_2PIX(dst0, dst1, plane1){\
717     uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
718     if(code != 0xffff){\
719         dst0 = code>>8;\
720         dst1 = code;\
721     }else{\
722         dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
723         dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
724     }\
725 }
726
727 static void decode_422_bitstream(HYuvContext *s, int count){
728     int i;
729
730     count/=2;
731
732     for(i=0; i<count; i++){
733         READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
734         READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
735     }
736 }
737
738 static void decode_gray_bitstream(HYuvContext *s, int count){
739     int i;
740
741     count/=2;
742
743     for(i=0; i<count; i++){
744         READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
745     }
746 }
747
748 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
749 static int encode_422_bitstream(HYuvContext *s, int count){
750     int i;
751
752     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
753         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
754         return -1;
755     }
756
757 #define LOAD4\
758             int y0 = s->temp[0][2*i];\
759             int y1 = s->temp[0][2*i+1];\
760             int u0 = s->temp[1][i];\
761             int v0 = s->temp[2][i];
762
763     count/=2;
764     if(s->flags&CODEC_FLAG_PASS1){
765         for(i=0; i<count; i++){
766             LOAD4;
767             s->stats[0][y0]++;
768             s->stats[1][u0]++;
769             s->stats[0][y1]++;
770             s->stats[2][v0]++;
771         }
772     }
773     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
774         return 0;
775     if(s->context){
776         for(i=0; i<count; i++){
777             LOAD4;
778             s->stats[0][y0]++;
779             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
780             s->stats[1][u0]++;
781             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
782             s->stats[0][y1]++;
783             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
784             s->stats[2][v0]++;
785             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
786         }
787     }else{
788         for(i=0; i<count; i++){
789             LOAD4;
790             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
791             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
792             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
793             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
794         }
795     }
796     return 0;
797 }
798
799 static int encode_gray_bitstream(HYuvContext *s, int count){
800     int i;
801
802     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
803         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
804         return -1;
805     }
806
807 #define LOAD2\
808             int y0 = s->temp[0][2*i];\
809             int y1 = s->temp[0][2*i+1];
810 #define STAT2\
811             s->stats[0][y0]++;\
812             s->stats[0][y1]++;
813 #define WRITE2\
814             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
815             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
816
817     count/=2;
818     if(s->flags&CODEC_FLAG_PASS1){
819         for(i=0; i<count; i++){
820             LOAD2;
821             STAT2;
822         }
823     }
824     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
825         return 0;
826
827     if(s->context){
828         for(i=0; i<count; i++){
829             LOAD2;
830             STAT2;
831             WRITE2;
832         }
833     }else{
834         for(i=0; i<count; i++){
835             LOAD2;
836             WRITE2;
837         }
838     }
839     return 0;
840 }
841 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
842
843 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
844     int i;
845     for(i=0; i<count; i++){
846         int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
847         if(code != -1){
848             *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
849         }else if(decorrelate){
850             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
851             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
852             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
853         }else{
854             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
855             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
856             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
857         }
858         if(alpha)
859             get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
860     }
861 }
862
863 static void decode_bgr_bitstream(HYuvContext *s, int count){
864     if(s->decorrelate){
865         if(s->bitstream_bpp==24)
866             decode_bgr_1(s, count, 1, 0);
867         else
868             decode_bgr_1(s, count, 1, 1);
869     }else{
870         if(s->bitstream_bpp==24)
871             decode_bgr_1(s, count, 0, 0);
872         else
873             decode_bgr_1(s, count, 0, 1);
874     }
875 }
876
877 static int encode_bgr_bitstream(HYuvContext *s, int count){
878     int i;
879
880     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
881         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
882         return -1;
883     }
884
885 #define LOAD3\
886             int g= s->temp[0][4*i+G];\
887             int b= (s->temp[0][4*i+B] - g) & 0xff;\
888             int r= (s->temp[0][4*i+R] - g) & 0xff;
889 #define STAT3\
890             s->stats[0][b]++;\
891             s->stats[1][g]++;\
892             s->stats[2][r]++;
893 #define WRITE3\
894             put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
895             put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
896             put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
897
898     if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
899         for(i=0; i<count; i++){
900             LOAD3;
901             STAT3;
902         }
903     }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
904         for(i=0; i<count; i++){
905             LOAD3;
906             STAT3;
907             WRITE3;
908         }
909     }else{
910         for(i=0; i<count; i++){
911             LOAD3;
912             WRITE3;
913         }
914     }
915     return 0;
916 }
917
918 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
919 static void draw_slice(HYuvContext *s, int y){
920     int h, cy;
921     int offset[4];
922
923     if(s->avctx->draw_horiz_band==NULL)
924         return;
925
926     h= y - s->last_slice_end;
927     y -= h;
928
929     if(s->bitstream_bpp==12){
930         cy= y>>1;
931     }else{
932         cy= y;
933     }
934
935     offset[0] = s->picture.linesize[0]*y;
936     offset[1] = s->picture.linesize[1]*cy;
937     offset[2] = s->picture.linesize[2]*cy;
938     offset[3] = 0;
939     emms_c();
940
941     s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
942
943     s->last_slice_end= y + h;
944 }
945
946 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
947     const uint8_t *buf = avpkt->data;
948     int buf_size = avpkt->size;
949     HYuvContext *s = avctx->priv_data;
950     const int width= s->width;
951     const int width2= s->width>>1;
952     const int height= s->height;
953     int fake_ystride, fake_ustride, fake_vstride;
954     AVFrame * const p= &s->picture;
955     int table_size= 0;
956
957     AVFrame *picture = data;
958
959     av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
960     if (!s->bitstream_buffer)
961         return AVERROR(ENOMEM);
962
963     s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
964
965     if(p->data[0])
966         avctx->release_buffer(avctx, p);
967
968     p->reference= 0;
969     if(avctx->get_buffer(avctx, p) < 0){
970         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
971         return -1;
972     }
973
974     if(s->context){
975         table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
976         if(table_size < 0)
977             return -1;
978     }
979
980     if((unsigned)(buf_size-table_size) >= INT_MAX/8)
981         return -1;
982
983     init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
984
985     fake_ystride= s->interlaced ? p->linesize[0]*2  : p->linesize[0];
986     fake_ustride= s->interlaced ? p->linesize[1]*2  : p->linesize[1];
987     fake_vstride= s->interlaced ? p->linesize[2]*2  : p->linesize[2];
988
989     s->last_slice_end= 0;
990
991     if(s->bitstream_bpp<24){
992         int y, cy;
993         int lefty, leftu, leftv;
994         int lefttopy, lefttopu, lefttopv;
995
996         if(s->yuy2){
997             p->data[0][3]= get_bits(&s->gb, 8);
998             p->data[0][2]= get_bits(&s->gb, 8);
999             p->data[0][1]= get_bits(&s->gb, 8);
1000             p->data[0][0]= get_bits(&s->gb, 8);
1001
1002             av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1003             return -1;
1004         }else{
1005
1006             leftv= p->data[2][0]= get_bits(&s->gb, 8);
1007             lefty= p->data[0][1]= get_bits(&s->gb, 8);
1008             leftu= p->data[1][0]= get_bits(&s->gb, 8);
1009                    p->data[0][0]= get_bits(&s->gb, 8);
1010
1011             switch(s->predictor){
1012             case LEFT:
1013             case PLANE:
1014                 decode_422_bitstream(s, width-2);
1015                 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1016                 if(!(s->flags&CODEC_FLAG_GRAY)){
1017                     leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1018                     leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1019                 }
1020
1021                 for(cy=y=1; y<s->height; y++,cy++){
1022                     uint8_t *ydst, *udst, *vdst;
1023
1024                     if(s->bitstream_bpp==12){
1025                         decode_gray_bitstream(s, width);
1026
1027                         ydst= p->data[0] + p->linesize[0]*y;
1028
1029                         lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1030                         if(s->predictor == PLANE){
1031                             if(y>s->interlaced)
1032                                 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1033                         }
1034                         y++;
1035                         if(y>=s->height) break;
1036                     }
1037
1038                     draw_slice(s, y);
1039
1040                     ydst= p->data[0] + p->linesize[0]*y;
1041                     udst= p->data[1] + p->linesize[1]*cy;
1042                     vdst= p->data[2] + p->linesize[2]*cy;
1043
1044                     decode_422_bitstream(s, width);
1045                     lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1046                     if(!(s->flags&CODEC_FLAG_GRAY)){
1047                         leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1048                         leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1049                     }
1050                     if(s->predictor == PLANE){
1051                         if(cy>s->interlaced){
1052                             s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1053                             if(!(s->flags&CODEC_FLAG_GRAY)){
1054                                 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1055                                 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1056                             }
1057                         }
1058                     }
1059                 }
1060                 draw_slice(s, height);
1061
1062                 break;
1063             case MEDIAN:
1064                 /* first line except first 2 pixels is left predicted */
1065                 decode_422_bitstream(s, width-2);
1066                 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1067                 if(!(s->flags&CODEC_FLAG_GRAY)){
1068                     leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1069                     leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1070                 }
1071
1072                 cy=y=1;
1073
1074                 /* second line is left predicted for interlaced case */
1075                 if(s->interlaced){
1076                     decode_422_bitstream(s, width);
1077                     lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1078                     if(!(s->flags&CODEC_FLAG_GRAY)){
1079                         leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1080                         leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1081                     }
1082                     y++; cy++;
1083                 }
1084
1085                 /* next 4 pixels are left predicted too */
1086                 decode_422_bitstream(s, 4);
1087                 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1088                 if(!(s->flags&CODEC_FLAG_GRAY)){
1089                     leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1090                     leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1091                 }
1092
1093                 /* next line except the first 4 pixels is median predicted */
1094                 lefttopy= p->data[0][3];
1095                 decode_422_bitstream(s, width-4);
1096                 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1097                 if(!(s->flags&CODEC_FLAG_GRAY)){
1098                     lefttopu= p->data[1][1];
1099                     lefttopv= p->data[2][1];
1100                     s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1101                     s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1102                 }
1103                 y++; cy++;
1104
1105                 for(; y<height; y++,cy++){
1106                     uint8_t *ydst, *udst, *vdst;
1107
1108                     if(s->bitstream_bpp==12){
1109                         while(2*cy > y){
1110                             decode_gray_bitstream(s, width);
1111                             ydst= p->data[0] + p->linesize[0]*y;
1112                             s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1113                             y++;
1114                         }
1115                         if(y>=height) break;
1116                     }
1117                     draw_slice(s, y);
1118
1119                     decode_422_bitstream(s, width);
1120
1121                     ydst= p->data[0] + p->linesize[0]*y;
1122                     udst= p->data[1] + p->linesize[1]*cy;
1123                     vdst= p->data[2] + p->linesize[2]*cy;
1124
1125                     s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1126                     if(!(s->flags&CODEC_FLAG_GRAY)){
1127                         s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1128                         s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1129                     }
1130                 }
1131
1132                 draw_slice(s, height);
1133                 break;
1134             }
1135         }
1136     }else{
1137         int y;
1138         int leftr, leftg, leftb;
1139         const int last_line= (height-1)*p->linesize[0];
1140
1141         if(s->bitstream_bpp==32){
1142             skip_bits(&s->gb, 8);
1143             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1144             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1145             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1146         }else{
1147             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1148             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1149             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1150             skip_bits(&s->gb, 8);
1151         }
1152
1153         if(s->bgr32){
1154             switch(s->predictor){
1155             case LEFT:
1156             case PLANE:
1157                 decode_bgr_bitstream(s, width-1);
1158                 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1159
1160                 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1161                     decode_bgr_bitstream(s, width);
1162
1163                     add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1164                     if(s->predictor == PLANE){
1165                         if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1166                             s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1167                                              p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1168                         }
1169                     }
1170                 }
1171                 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1172                 break;
1173             default:
1174                 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1175             }
1176         }else{
1177
1178             av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1179             return -1;
1180         }
1181     }
1182     emms_c();
1183
1184     *picture= *p;
1185     *data_size = sizeof(AVFrame);
1186
1187     return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1188 }
1189 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1190
1191 static int common_end(HYuvContext *s){
1192     int i;
1193
1194     for(i=0; i<3; i++){
1195         av_freep(&s->temp[i]);
1196     }
1197     return 0;
1198 }
1199
1200 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1201 static av_cold int decode_end(AVCodecContext *avctx)
1202 {
1203     HYuvContext *s = avctx->priv_data;
1204     int i;
1205
1206     common_end(s);
1207     av_freep(&s->bitstream_buffer);
1208
1209     for(i=0; i<6; i++){
1210         free_vlc(&s->vlc[i]);
1211     }
1212
1213     return 0;
1214 }
1215 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1216
1217 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1218 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1219     HYuvContext *s = avctx->priv_data;
1220     AVFrame *pict = data;
1221     const int width= s->width;
1222     const int width2= s->width>>1;
1223     const int height= s->height;
1224     const int fake_ystride= s->interlaced ? pict->linesize[0]*2  : pict->linesize[0];
1225     const int fake_ustride= s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
1226     const int fake_vstride= s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
1227     AVFrame * const p= &s->picture;
1228     int i, j, size=0;
1229
1230     *p = *pict;
1231     p->pict_type= FF_I_TYPE;
1232     p->key_frame= 1;
1233
1234     if(s->context){
1235         for(i=0; i<3; i++){
1236             generate_len_table(s->len[i], s->stats[i], 256);
1237             if(generate_bits_table(s->bits[i], s->len[i])<0)
1238                 return -1;
1239             size+= store_table(s, s->len[i], &buf[size]);
1240         }
1241
1242         for(i=0; i<3; i++)
1243             for(j=0; j<256; j++)
1244                 s->stats[i][j] >>= 1;
1245     }
1246
1247     init_put_bits(&s->pb, buf+size, buf_size-size);
1248
1249     if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1250         int lefty, leftu, leftv, y, cy;
1251
1252         put_bits(&s->pb, 8, leftv= p->data[2][0]);
1253         put_bits(&s->pb, 8, lefty= p->data[0][1]);
1254         put_bits(&s->pb, 8, leftu= p->data[1][0]);
1255         put_bits(&s->pb, 8,        p->data[0][0]);
1256
1257         lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1258         leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1259         leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1260
1261         encode_422_bitstream(s, width-2);
1262
1263         if(s->predictor==MEDIAN){
1264             int lefttopy, lefttopu, lefttopv;
1265             cy=y=1;
1266             if(s->interlaced){
1267                 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1268                 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1269                 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1270
1271                 encode_422_bitstream(s, width);
1272                 y++; cy++;
1273             }
1274
1275             lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1276             leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1277             leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1278
1279             encode_422_bitstream(s, 4);
1280
1281             lefttopy= p->data[0][3];
1282             lefttopu= p->data[1][1];
1283             lefttopv= p->data[2][1];
1284             s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1285             s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1286             s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1287             encode_422_bitstream(s, width-4);
1288             y++; cy++;
1289
1290             for(; y<height; y++,cy++){
1291                 uint8_t *ydst, *udst, *vdst;
1292
1293                 if(s->bitstream_bpp==12){
1294                     while(2*cy > y){
1295                         ydst= p->data[0] + p->linesize[0]*y;
1296                         s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1297                         encode_gray_bitstream(s, width);
1298                         y++;
1299                     }
1300                     if(y>=height) break;
1301                 }
1302                 ydst= p->data[0] + p->linesize[0]*y;
1303                 udst= p->data[1] + p->linesize[1]*cy;
1304                 vdst= p->data[2] + p->linesize[2]*cy;
1305
1306                 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1307                 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1308                 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1309
1310                 encode_422_bitstream(s, width);
1311             }
1312         }else{
1313             for(cy=y=1; y<height; y++,cy++){
1314                 uint8_t *ydst, *udst, *vdst;
1315
1316                 /* encode a luma only line & y++ */
1317                 if(s->bitstream_bpp==12){
1318                     ydst= p->data[0] + p->linesize[0]*y;
1319
1320                     if(s->predictor == PLANE && s->interlaced < y){
1321                         s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1322
1323                         lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1324                     }else{
1325                         lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1326                     }
1327                     encode_gray_bitstream(s, width);
1328                     y++;
1329                     if(y>=height) break;
1330                 }
1331
1332                 ydst= p->data[0] + p->linesize[0]*y;
1333                 udst= p->data[1] + p->linesize[1]*cy;
1334                 vdst= p->data[2] + p->linesize[2]*cy;
1335
1336                 if(s->predictor == PLANE && s->interlaced < cy){
1337                     s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1338                     s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1339                     s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1340
1341                     lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1342                     leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1343                     leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1344                 }else{
1345                     lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1346                     leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1347                     leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1348                 }
1349
1350                 encode_422_bitstream(s, width);
1351             }
1352         }
1353     }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1354         uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1355         const int stride = -p->linesize[0];
1356         const int fake_stride = -fake_ystride;
1357         int y;
1358         int leftr, leftg, leftb;
1359
1360         put_bits(&s->pb, 8, leftr= data[R]);
1361         put_bits(&s->pb, 8, leftg= data[G]);
1362         put_bits(&s->pb, 8, leftb= data[B]);
1363         put_bits(&s->pb, 8, 0);
1364
1365         sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1366         encode_bgr_bitstream(s, width-1);
1367
1368         for(y=1; y<s->height; y++){
1369             uint8_t *dst = data + y*stride;
1370             if(s->predictor == PLANE && s->interlaced < y){
1371                 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1372                 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1373             }else{
1374                 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1375             }
1376             encode_bgr_bitstream(s, width);
1377         }
1378     }else{
1379         av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1380     }
1381     emms_c();
1382
1383     size+= (put_bits_count(&s->pb)+31)/8;
1384     put_bits(&s->pb, 16, 0);
1385     put_bits(&s->pb, 15, 0);
1386     size/= 4;
1387
1388     if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1389         int j;
1390         char *p= avctx->stats_out;
1391         char *end= p + 1024*30;
1392         for(i=0; i<3; i++){
1393             for(j=0; j<256; j++){
1394                 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1395                 p+= strlen(p);
1396                 s->stats[i][j]= 0;
1397             }
1398             snprintf(p, end-p, "\n");
1399             p++;
1400         }
1401     } else
1402         avctx->stats_out[0] = '\0';
1403     if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1404         flush_put_bits(&s->pb);
1405         s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1406     }
1407
1408     s->picture_number++;
1409
1410     return size*4;
1411 }
1412
1413 static av_cold int encode_end(AVCodecContext *avctx)
1414 {
1415     HYuvContext *s = avctx->priv_data;
1416
1417     common_end(s);
1418
1419     av_freep(&avctx->extradata);
1420     av_freep(&avctx->stats_out);
1421
1422     return 0;
1423 }
1424 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1425
1426 #if CONFIG_HUFFYUV_DECODER
1427 AVCodec huffyuv_decoder = {
1428     "huffyuv",
1429     CODEC_TYPE_VIDEO,
1430     CODEC_ID_HUFFYUV,
1431     sizeof(HYuvContext),
1432     decode_init,
1433     NULL,
1434     decode_end,
1435     decode_frame,
1436     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1437     NULL,
1438     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1439 };
1440 #endif
1441
1442 #if CONFIG_FFVHUFF_DECODER
1443 AVCodec ffvhuff_decoder = {
1444     "ffvhuff",
1445     CODEC_TYPE_VIDEO,
1446     CODEC_ID_FFVHUFF,
1447     sizeof(HYuvContext),
1448     decode_init,
1449     NULL,
1450     decode_end,
1451     decode_frame,
1452     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1453     NULL,
1454     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1455 };
1456 #endif
1457
1458 #if CONFIG_HUFFYUV_ENCODER
1459 AVCodec huffyuv_encoder = {
1460     "huffyuv",
1461     CODEC_TYPE_VIDEO,
1462     CODEC_ID_HUFFYUV,
1463     sizeof(HYuvContext),
1464     encode_init,
1465     encode_frame,
1466     encode_end,
1467     .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1468     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1469 };
1470 #endif
1471
1472 #if CONFIG_FFVHUFF_ENCODER
1473 AVCodec ffvhuff_encoder = {
1474     "ffvhuff",
1475     CODEC_TYPE_VIDEO,
1476     CODEC_ID_FFVHUFF,
1477     sizeof(HYuvContext),
1478     encode_init,
1479     encode_frame,
1480     encode_end,
1481     .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1482     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1483 };
1484 #endif