]> rtime.felk.cvut.cz Git - frescor/ffmpeg.git/blob - libavcodec/huffyuv.c
59437b1a0f18b15e9c2ad825d98a7c299ae0b4d8
[frescor/ffmpeg.git] / libavcodec / huffyuv.c
1 /*
2  * huffyuv codec for libavcodec
3  *
4  * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25
26 /**
27  * @file libavcodec/huffyuv.c
28  * huffyuv codec for libavcodec.
29  */
30
31 #include "avcodec.h"
32 #include "bitstream.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35
36 #define VLC_BITS 11
37
38 #ifdef WORDS_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #else
43 #define B 0
44 #define G 1
45 #define R 2
46 #endif
47
48 typedef enum Predictor{
49     LEFT= 0,
50     PLANE,
51     MEDIAN,
52 } Predictor;
53
54 typedef struct HYuvContext{
55     AVCodecContext *avctx;
56     Predictor predictor;
57     GetBitContext gb;
58     PutBitContext pb;
59     int interlaced;
60     int decorrelate;
61     int bitstream_bpp;
62     int version;
63     int yuy2;                               //use yuy2 instead of 422P
64     int bgr32;                              //use bgr32 instead of bgr24
65     int width, height;
66     int flags;
67     int context;
68     int picture_number;
69     int last_slice_end;
70     uint8_t *temp[3];
71     uint64_t stats[3][256];
72     uint8_t len[3][256];
73     uint32_t bits[3][256];
74     uint32_t pix_bgr_map[1<<VLC_BITS];
75     VLC vlc[6];                             //Y,U,V,YY,YU,YV
76     AVFrame picture;
77     uint8_t *bitstream_buffer;
78     unsigned int bitstream_buffer_size;
79     DSPContext dsp;
80 }HYuvContext;
81
82 static const unsigned char classic_shift_luma[] = {
83   34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84   16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
85   69,68, 0
86 };
87
88 static const unsigned char classic_shift_chroma[] = {
89   66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90   56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91   214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
92 };
93
94 static const unsigned char classic_add_luma[256] = {
95     3,  9,  5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96    73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97    68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98    35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99    37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100    35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101    27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102    15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103    12, 17, 19, 13,  4,  9,  2, 11,  1,  7,  8,  0, 16,  3, 14,  6,
104    12, 10,  5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105    18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106    28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107    28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108    62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109    54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110    46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13,  7,  8,
111 };
112
113 static const unsigned char classic_add_chroma[256] = {
114     3,  1,  2,  2,  2,  2,  3,  3,  7,  5,  7,  5,  8,  6, 11,  9,
115     7, 13, 11, 10,  9,  8,  7,  5,  9,  7,  6,  4,  7,  5,  8,  7,
116    11,  8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117    43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118   143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119    80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120    17, 14,  5,  6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121   112,113,114,115,  4,117,118, 92, 94,121,122,  3,124,103,  2,  1,
122     0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123   135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124    52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125    19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10,  9,  8, 36,
126     7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127    83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128    14, 16, 17, 18, 20, 21, 12, 14, 15,  9, 10,  6,  9,  6,  5,  8,
129     6, 12,  8, 10,  7,  9,  6,  4,  6,  2,  2,  3,  3,  3,  3,  2,
130 };
131
132 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
133     int i;
134
135     for(i=0; i<w-1; i++){
136         acc+= src[i];
137         dst[i]= acc;
138         i++;
139         acc+= src[i];
140         dst[i]= acc;
141     }
142
143     for(; i<w; i++){
144         acc+= src[i];
145         dst[i]= acc;
146     }
147
148     return acc;
149 }
150
151 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
152     int i;
153     int r,g,b;
154     r= *red;
155     g= *green;
156     b= *blue;
157
158     for(i=0; i<w; i++){
159         b+= src[4*i+B];
160         g+= src[4*i+G];
161         r+= src[4*i+R];
162
163         dst[4*i+B]= b;
164         dst[4*i+G]= g;
165         dst[4*i+R]= r;
166     }
167
168     *red= r;
169     *green= g;
170     *blue= b;
171 }
172
173 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
174     int i;
175     if(w<32){
176         for(i=0; i<w; i++){
177             const int temp= src[i];
178             dst[i]= temp - left;
179             left= temp;
180         }
181         return left;
182     }else{
183         for(i=0; i<16; i++){
184             const int temp= src[i];
185             dst[i]= temp - left;
186             left= temp;
187         }
188         s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
189         return src[w-1];
190     }
191 }
192
193 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
194     int i;
195     int r,g,b;
196     r= *red;
197     g= *green;
198     b= *blue;
199     for(i=0; i<FFMIN(w,4); i++){
200         const int rt= src[i*4+R];
201         const int gt= src[i*4+G];
202         const int bt= src[i*4+B];
203         dst[i*4+R]= rt - r;
204         dst[i*4+G]= gt - g;
205         dst[i*4+B]= bt - b;
206         r = rt;
207         g = gt;
208         b = bt;
209     }
210     s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
211     *red=   src[(w-1)*4+R];
212     *green= src[(w-1)*4+G];
213     *blue=  src[(w-1)*4+B];
214 }
215
216 static void read_len_table(uint8_t *dst, GetBitContext *gb){
217     int i, val, repeat;
218
219     for(i=0; i<256;){
220         repeat= get_bits(gb, 3);
221         val   = get_bits(gb, 5);
222         if(repeat==0)
223             repeat= get_bits(gb, 8);
224 //printf("%d %d\n", val, repeat);
225         while (repeat--)
226             dst[i++] = val;
227     }
228 }
229
230 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
231     int len, index;
232     uint32_t bits=0;
233
234     for(len=32; len>0; len--){
235         for(index=0; index<256; index++){
236             if(len_table[index]==len)
237                 dst[index]= bits++;
238         }
239         if(bits & 1){
240             av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
241             return -1;
242         }
243         bits >>= 1;
244     }
245     return 0;
246 }
247
248 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
249 typedef struct {
250     uint64_t val;
251     int name;
252 } HeapElem;
253
254 static void heap_sift(HeapElem *h, int root, int size)
255 {
256     while(root*2+1 < size) {
257         int child = root*2+1;
258         if(child < size-1 && h[child].val > h[child+1].val)
259             child++;
260         if(h[root].val > h[child].val) {
261             FFSWAP(HeapElem, h[root], h[child]);
262             root = child;
263         } else
264             break;
265     }
266 }
267
268 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
269     HeapElem h[size];
270     int up[2*size];
271     int len[2*size];
272     int offset, i, next;
273
274     for(offset=1; ; offset<<=1){
275         for(i=0; i<size; i++){
276             h[i].name = i;
277             h[i].val = (stats[i] << 8) + offset;
278         }
279         for(i=size/2-1; i>=0; i--)
280             heap_sift(h, i, size);
281
282         for(next=size; next<size*2-1; next++){
283             // merge the two smallest entries, and put it back in the heap
284             uint64_t min1v = h[0].val;
285             up[h[0].name] = next;
286             h[0].val = INT64_MAX;
287             heap_sift(h, 0, size);
288             up[h[0].name] = next;
289             h[0].name = next;
290             h[0].val += min1v;
291             heap_sift(h, 0, size);
292         }
293
294         len[2*size-2] = 0;
295         for(i=2*size-3; i>=size; i--)
296             len[i] = len[up[i]] + 1;
297         for(i=0; i<size; i++) {
298             dst[i] = len[up[i]] + 1;
299             if(dst[i] >= 32) break;
300         }
301         if(i==size) break;
302     }
303 }
304 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
305
306 static void generate_joint_tables(HYuvContext *s){
307     uint16_t symbols[1<<VLC_BITS];
308     uint16_t bits[1<<VLC_BITS];
309     uint8_t len[1<<VLC_BITS];
310     if(s->bitstream_bpp < 24){
311         int p, i, y, u;
312         for(p=0; p<3; p++){
313             for(i=y=0; y<256; y++){
314                 int len0 = s->len[0][y];
315                 int limit = VLC_BITS - len0;
316                 if(limit <= 0)
317                     continue;
318                 for(u=0; u<256; u++){
319                     int len1 = s->len[p][u];
320                     if(len1 > limit)
321                         continue;
322                     len[i] = len0 + len1;
323                     bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
324                     symbols[i] = (y<<8) + u;
325                     if(symbols[i] != 0xffff) // reserved to mean "invalid"
326                         i++;
327                 }
328             }
329             free_vlc(&s->vlc[3+p]);
330             init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
331         }
332     }else{
333         uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
334         int i, b, g, r, code;
335         int p0 = s->decorrelate;
336         int p1 = !s->decorrelate;
337         // restrict the range to +/-16 becaues that's pretty much guaranteed to
338         // cover all the combinations that fit in 11 bits total, and it doesn't
339         // matter if we miss a few rare codes.
340         for(i=0, g=-16; g<16; g++){
341             int len0 = s->len[p0][g&255];
342             int limit0 = VLC_BITS - len0;
343             if(limit0 < 2)
344                 continue;
345             for(b=-16; b<16; b++){
346                 int len1 = s->len[p1][b&255];
347                 int limit1 = limit0 - len1;
348                 if(limit1 < 1)
349                     continue;
350                 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
351                 for(r=-16; r<16; r++){
352                     int len2 = s->len[2][r&255];
353                     if(len2 > limit1)
354                         continue;
355                     len[i] = len0 + len1 + len2;
356                     bits[i] = (code << len2) + s->bits[2][r&255];
357                     if(s->decorrelate){
358                         map[i][G] = g;
359                         map[i][B] = g+b;
360                         map[i][R] = g+r;
361                     }else{
362                         map[i][B] = g;
363                         map[i][G] = b;
364                         map[i][R] = r;
365                     }
366                     i++;
367                 }
368             }
369         }
370         free_vlc(&s->vlc[3]);
371         init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
372     }
373 }
374
375 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
376     GetBitContext gb;
377     int i;
378
379     init_get_bits(&gb, src, length*8);
380
381     for(i=0; i<3; i++){
382         read_len_table(s->len[i], &gb);
383
384         if(generate_bits_table(s->bits[i], s->len[i])<0){
385             return -1;
386         }
387 #if 0
388 for(j=0; j<256; j++){
389 printf("%6X, %2d,  %3d\n", s->bits[i][j], s->len[i][j], j);
390 }
391 #endif
392         free_vlc(&s->vlc[i]);
393         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
394     }
395
396     generate_joint_tables(s);
397
398     return (get_bits_count(&gb)+7)/8;
399 }
400
401 static int read_old_huffman_tables(HYuvContext *s){
402 #if 1
403     GetBitContext gb;
404     int i;
405
406     init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
407     read_len_table(s->len[0], &gb);
408     init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
409     read_len_table(s->len[1], &gb);
410
411     for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma  [i];
412     for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
413
414     if(s->bitstream_bpp >= 24){
415         memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
416         memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
417     }
418     memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
419     memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
420
421     for(i=0; i<3; i++){
422         free_vlc(&s->vlc[i]);
423         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
424     }
425
426     generate_joint_tables(s);
427
428     return 0;
429 #else
430     av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
431     return -1;
432 #endif
433 }
434
435 static av_cold void alloc_temp(HYuvContext *s){
436     int i;
437
438     if(s->bitstream_bpp<24){
439         for(i=0; i<3; i++){
440             s->temp[i]= av_malloc(s->width + 16);
441         }
442     }else{
443         for(i=0; i<2; i++){
444             s->temp[i]= av_malloc(4*s->width + 16);
445         }
446     }
447 }
448
449 static av_cold int common_init(AVCodecContext *avctx){
450     HYuvContext *s = avctx->priv_data;
451
452     s->avctx= avctx;
453     s->flags= avctx->flags;
454
455     dsputil_init(&s->dsp, avctx);
456
457     s->width= avctx->width;
458     s->height= avctx->height;
459     assert(s->width>0 && s->height>0);
460
461     return 0;
462 }
463
464 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
465 static av_cold int decode_init(AVCodecContext *avctx)
466 {
467     HYuvContext *s = avctx->priv_data;
468
469     common_init(avctx);
470     memset(s->vlc, 0, 3*sizeof(VLC));
471
472     avctx->coded_frame= &s->picture;
473     s->interlaced= s->height > 288;
474
475 s->bgr32=1;
476 //if(avctx->extradata)
477 //  printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
478     if(avctx->extradata_size){
479         if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
480             s->version=1; // do such files exist at all?
481         else
482             s->version=2;
483     }else
484         s->version=0;
485
486     if(s->version==2){
487         int method, interlace;
488
489         method= ((uint8_t*)avctx->extradata)[0];
490         s->decorrelate= method&64 ? 1 : 0;
491         s->predictor= method&63;
492         s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
493         if(s->bitstream_bpp==0)
494             s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
495         interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
496         s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
497         s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
498
499         if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
500             return -1;
501     }else{
502         switch(avctx->bits_per_coded_sample&7){
503         case 1:
504             s->predictor= LEFT;
505             s->decorrelate= 0;
506             break;
507         case 2:
508             s->predictor= LEFT;
509             s->decorrelate= 1;
510             break;
511         case 3:
512             s->predictor= PLANE;
513             s->decorrelate= avctx->bits_per_coded_sample >= 24;
514             break;
515         case 4:
516             s->predictor= MEDIAN;
517             s->decorrelate= 0;
518             break;
519         default:
520             s->predictor= LEFT; //OLD
521             s->decorrelate= 0;
522             break;
523         }
524         s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
525         s->context= 0;
526
527         if(read_old_huffman_tables(s) < 0)
528             return -1;
529     }
530
531     switch(s->bitstream_bpp){
532     case 12:
533         avctx->pix_fmt = PIX_FMT_YUV420P;
534         break;
535     case 16:
536         if(s->yuy2){
537             avctx->pix_fmt = PIX_FMT_YUYV422;
538         }else{
539             avctx->pix_fmt = PIX_FMT_YUV422P;
540         }
541         break;
542     case 24:
543     case 32:
544         if(s->bgr32){
545             avctx->pix_fmt = PIX_FMT_RGB32;
546         }else{
547             avctx->pix_fmt = PIX_FMT_BGR24;
548         }
549         break;
550     default:
551         assert(0);
552     }
553
554     alloc_temp(s);
555
556 //    av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
557
558     return 0;
559 }
560 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
561
562 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
563 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
564     int i;
565     int index= 0;
566
567     for(i=0; i<256;){
568         int val= len[i];
569         int repeat=0;
570
571         for(; i<256 && len[i]==val && repeat<255; i++)
572             repeat++;
573
574         assert(val < 32 && val >0 && repeat<256 && repeat>0);
575         if(repeat>7){
576             buf[index++]= val;
577             buf[index++]= repeat;
578         }else{
579             buf[index++]= val | (repeat<<5);
580         }
581     }
582
583     return index;
584 }
585
586 static av_cold int encode_init(AVCodecContext *avctx)
587 {
588     HYuvContext *s = avctx->priv_data;
589     int i, j;
590
591     common_init(avctx);
592
593     avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
594     avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
595     s->version=2;
596
597     avctx->coded_frame= &s->picture;
598
599     switch(avctx->pix_fmt){
600     case PIX_FMT_YUV420P:
601         s->bitstream_bpp= 12;
602         break;
603     case PIX_FMT_YUV422P:
604         s->bitstream_bpp= 16;
605         break;
606     case PIX_FMT_RGB32:
607         s->bitstream_bpp= 24;
608         break;
609     default:
610         av_log(avctx, AV_LOG_ERROR, "format not supported\n");
611         return -1;
612     }
613     avctx->bits_per_coded_sample= s->bitstream_bpp;
614     s->decorrelate= s->bitstream_bpp >= 24;
615     s->predictor= avctx->prediction_method;
616     s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
617     if(avctx->context_model==1){
618         s->context= avctx->context_model;
619         if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
620             av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
621             return -1;
622         }
623     }else s->context= 0;
624
625     if(avctx->codec->id==CODEC_ID_HUFFYUV){
626         if(avctx->pix_fmt==PIX_FMT_YUV420P){
627             av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
628             return -1;
629         }
630         if(avctx->context_model){
631             av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
632             return -1;
633         }
634         if(s->interlaced != ( s->height > 288 ))
635             av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
636     }
637
638     if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
639         av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
640         return -1;
641     }
642
643     ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
644     ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
645     ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
646     if(s->context)
647         ((uint8_t*)avctx->extradata)[2]|= 0x40;
648     ((uint8_t*)avctx->extradata)[3]= 0;
649     s->avctx->extradata_size= 4;
650
651     if(avctx->stats_in){
652         char *p= avctx->stats_in;
653
654         for(i=0; i<3; i++)
655             for(j=0; j<256; j++)
656                 s->stats[i][j]= 1;
657
658         for(;;){
659             for(i=0; i<3; i++){
660                 char *next;
661
662                 for(j=0; j<256; j++){
663                     s->stats[i][j]+= strtol(p, &next, 0);
664                     if(next==p) return -1;
665                     p=next;
666                 }
667             }
668             if(p[0]==0 || p[1]==0 || p[2]==0) break;
669         }
670     }else{
671         for(i=0; i<3; i++)
672             for(j=0; j<256; j++){
673                 int d= FFMIN(j, 256-j);
674
675                 s->stats[i][j]= 100000000/(d+1);
676             }
677     }
678
679     for(i=0; i<3; i++){
680         generate_len_table(s->len[i], s->stats[i], 256);
681
682         if(generate_bits_table(s->bits[i], s->len[i])<0){
683             return -1;
684         }
685
686         s->avctx->extradata_size+=
687         store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
688     }
689
690     if(s->context){
691         for(i=0; i<3; i++){
692             int pels = s->width*s->height / (i?40:10);
693             for(j=0; j<256; j++){
694                 int d= FFMIN(j, 256-j);
695                 s->stats[i][j]= pels/(d+1);
696             }
697         }
698     }else{
699         for(i=0; i<3; i++)
700             for(j=0; j<256; j++)
701                 s->stats[i][j]= 0;
702     }
703
704 //    printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
705
706     alloc_temp(s);
707
708     s->picture_number=0;
709
710     return 0;
711 }
712 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
713
714 /* TODO instead of restarting the read when the code isn't in the first level
715  * of the joint table, jump into the 2nd level of the individual table. */
716 #define READ_2PIX(dst0, dst1, plane1){\
717     uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
718     if(code != 0xffff){\
719         dst0 = code>>8;\
720         dst1 = code;\
721     }else{\
722         dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
723         dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
724     }\
725 }
726
727 static void decode_422_bitstream(HYuvContext *s, int count){
728     int i;
729
730     count/=2;
731
732     for(i=0; i<count; i++){
733         READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
734         READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
735     }
736 }
737
738 static void decode_gray_bitstream(HYuvContext *s, int count){
739     int i;
740
741     count/=2;
742
743     for(i=0; i<count; i++){
744         READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
745     }
746 }
747
748 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
749 static int encode_422_bitstream(HYuvContext *s, int count){
750     int i;
751
752     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
753         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
754         return -1;
755     }
756
757 #define LOAD4\
758             int y0 = s->temp[0][2*i];\
759             int y1 = s->temp[0][2*i+1];\
760             int u0 = s->temp[1][i];\
761             int v0 = s->temp[2][i];
762
763     count/=2;
764     if(s->flags&CODEC_FLAG_PASS1){
765         for(i=0; i<count; i++){
766             LOAD4;
767             s->stats[0][y0]++;
768             s->stats[1][u0]++;
769             s->stats[0][y1]++;
770             s->stats[2][v0]++;
771         }
772     }
773     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
774         return 0;
775     if(s->context){
776         for(i=0; i<count; i++){
777             LOAD4;
778             s->stats[0][y0]++;
779             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
780             s->stats[1][u0]++;
781             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
782             s->stats[0][y1]++;
783             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
784             s->stats[2][v0]++;
785             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
786         }
787     }else{
788         for(i=0; i<count; i++){
789             LOAD4;
790             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
791             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
792             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
793             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
794         }
795     }
796     return 0;
797 }
798
799 static int encode_gray_bitstream(HYuvContext *s, int count){
800     int i;
801
802     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
803         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
804         return -1;
805     }
806
807 #define LOAD2\
808             int y0 = s->temp[0][2*i];\
809             int y1 = s->temp[0][2*i+1];
810 #define STAT2\
811             s->stats[0][y0]++;\
812             s->stats[0][y1]++;
813 #define WRITE2\
814             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
815             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
816
817     count/=2;
818     if(s->flags&CODEC_FLAG_PASS1){
819         for(i=0; i<count; i++){
820             LOAD2;
821             STAT2;
822         }
823     }
824     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
825         return 0;
826
827     if(s->context){
828         for(i=0; i<count; i++){
829             LOAD2;
830             STAT2;
831             WRITE2;
832         }
833     }else{
834         for(i=0; i<count; i++){
835             LOAD2;
836             WRITE2;
837         }
838     }
839     return 0;
840 }
841 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
842
843 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
844     int i;
845     for(i=0; i<count; i++){
846         int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
847         if(code != -1){
848             *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
849         }else if(decorrelate){
850             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
851             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
852             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
853         }else{
854             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
855             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
856             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
857         }
858         if(alpha)
859             get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
860     }
861 }
862
863 static void decode_bgr_bitstream(HYuvContext *s, int count){
864     if(s->decorrelate){
865         if(s->bitstream_bpp==24)
866             decode_bgr_1(s, count, 1, 0);
867         else
868             decode_bgr_1(s, count, 1, 1);
869     }else{
870         if(s->bitstream_bpp==24)
871             decode_bgr_1(s, count, 0, 0);
872         else
873             decode_bgr_1(s, count, 0, 1);
874     }
875 }
876
877 static int encode_bgr_bitstream(HYuvContext *s, int count){
878     int i;
879
880     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
881         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
882         return -1;
883     }
884
885 #define LOAD3\
886             int g= s->temp[0][4*i+G];\
887             int b= (s->temp[0][4*i+B] - g) & 0xff;\
888             int r= (s->temp[0][4*i+R] - g) & 0xff;
889 #define STAT3\
890             s->stats[0][b]++;\
891             s->stats[1][g]++;\
892             s->stats[2][r]++;
893 #define WRITE3\
894             put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
895             put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
896             put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
897
898     if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
899         for(i=0; i<count; i++){
900             LOAD3;
901             STAT3;
902         }
903     }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
904         for(i=0; i<count; i++){
905             LOAD3;
906             STAT3;
907             WRITE3;
908         }
909     }else{
910         for(i=0; i<count; i++){
911             LOAD3;
912             WRITE3;
913         }
914     }
915     return 0;
916 }
917
918 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
919 static void draw_slice(HYuvContext *s, int y){
920     int h, cy;
921     int offset[4];
922
923     if(s->avctx->draw_horiz_band==NULL)
924         return;
925
926     h= y - s->last_slice_end;
927     y -= h;
928
929     if(s->bitstream_bpp==12){
930         cy= y>>1;
931     }else{
932         cy= y;
933     }
934
935     offset[0] = s->picture.linesize[0]*y;
936     offset[1] = s->picture.linesize[1]*cy;
937     offset[2] = s->picture.linesize[2]*cy;
938     offset[3] = 0;
939     emms_c();
940
941     s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
942
943     s->last_slice_end= y + h;
944 }
945
946 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
947     const uint8_t *buf = avpkt->data;
948     int buf_size = avpkt->size;
949     HYuvContext *s = avctx->priv_data;
950     const int width= s->width;
951     const int width2= s->width>>1;
952     const int height= s->height;
953     int fake_ystride, fake_ustride, fake_vstride;
954     AVFrame * const p= &s->picture;
955     int table_size= 0;
956
957     AVFrame *picture = data;
958
959     s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
960
961     s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
962
963     if(p->data[0])
964         avctx->release_buffer(avctx, p);
965
966     p->reference= 0;
967     if(avctx->get_buffer(avctx, p) < 0){
968         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
969         return -1;
970     }
971
972     if(s->context){
973         table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
974         if(table_size < 0)
975             return -1;
976     }
977
978     if((unsigned)(buf_size-table_size) >= INT_MAX/8)
979         return -1;
980
981     init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
982
983     fake_ystride= s->interlaced ? p->linesize[0]*2  : p->linesize[0];
984     fake_ustride= s->interlaced ? p->linesize[1]*2  : p->linesize[1];
985     fake_vstride= s->interlaced ? p->linesize[2]*2  : p->linesize[2];
986
987     s->last_slice_end= 0;
988
989     if(s->bitstream_bpp<24){
990         int y, cy;
991         int lefty, leftu, leftv;
992         int lefttopy, lefttopu, lefttopv;
993
994         if(s->yuy2){
995             p->data[0][3]= get_bits(&s->gb, 8);
996             p->data[0][2]= get_bits(&s->gb, 8);
997             p->data[0][1]= get_bits(&s->gb, 8);
998             p->data[0][0]= get_bits(&s->gb, 8);
999
1000             av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1001             return -1;
1002         }else{
1003
1004             leftv= p->data[2][0]= get_bits(&s->gb, 8);
1005             lefty= p->data[0][1]= get_bits(&s->gb, 8);
1006             leftu= p->data[1][0]= get_bits(&s->gb, 8);
1007                    p->data[0][0]= get_bits(&s->gb, 8);
1008
1009             switch(s->predictor){
1010             case LEFT:
1011             case PLANE:
1012                 decode_422_bitstream(s, width-2);
1013                 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1014                 if(!(s->flags&CODEC_FLAG_GRAY)){
1015                     leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1016                     leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1017                 }
1018
1019                 for(cy=y=1; y<s->height; y++,cy++){
1020                     uint8_t *ydst, *udst, *vdst;
1021
1022                     if(s->bitstream_bpp==12){
1023                         decode_gray_bitstream(s, width);
1024
1025                         ydst= p->data[0] + p->linesize[0]*y;
1026
1027                         lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1028                         if(s->predictor == PLANE){
1029                             if(y>s->interlaced)
1030                                 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1031                         }
1032                         y++;
1033                         if(y>=s->height) break;
1034                     }
1035
1036                     draw_slice(s, y);
1037
1038                     ydst= p->data[0] + p->linesize[0]*y;
1039                     udst= p->data[1] + p->linesize[1]*cy;
1040                     vdst= p->data[2] + p->linesize[2]*cy;
1041
1042                     decode_422_bitstream(s, width);
1043                     lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1044                     if(!(s->flags&CODEC_FLAG_GRAY)){
1045                         leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1046                         leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1047                     }
1048                     if(s->predictor == PLANE){
1049                         if(cy>s->interlaced){
1050                             s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1051                             if(!(s->flags&CODEC_FLAG_GRAY)){
1052                                 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1053                                 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1054                             }
1055                         }
1056                     }
1057                 }
1058                 draw_slice(s, height);
1059
1060                 break;
1061             case MEDIAN:
1062                 /* first line except first 2 pixels is left predicted */
1063                 decode_422_bitstream(s, width-2);
1064                 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1065                 if(!(s->flags&CODEC_FLAG_GRAY)){
1066                     leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1067                     leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1068                 }
1069
1070                 cy=y=1;
1071
1072                 /* second line is left predicted for interlaced case */
1073                 if(s->interlaced){
1074                     decode_422_bitstream(s, width);
1075                     lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1076                     if(!(s->flags&CODEC_FLAG_GRAY)){
1077                         leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1078                         leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1079                     }
1080                     y++; cy++;
1081                 }
1082
1083                 /* next 4 pixels are left predicted too */
1084                 decode_422_bitstream(s, 4);
1085                 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1086                 if(!(s->flags&CODEC_FLAG_GRAY)){
1087                     leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1088                     leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1089                 }
1090
1091                 /* next line except the first 4 pixels is median predicted */
1092                 lefttopy= p->data[0][3];
1093                 decode_422_bitstream(s, width-4);
1094                 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1095                 if(!(s->flags&CODEC_FLAG_GRAY)){
1096                     lefttopu= p->data[1][1];
1097                     lefttopv= p->data[2][1];
1098                     s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1099                     s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1100                 }
1101                 y++; cy++;
1102
1103                 for(; y<height; y++,cy++){
1104                     uint8_t *ydst, *udst, *vdst;
1105
1106                     if(s->bitstream_bpp==12){
1107                         while(2*cy > y){
1108                             decode_gray_bitstream(s, width);
1109                             ydst= p->data[0] + p->linesize[0]*y;
1110                             s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1111                             y++;
1112                         }
1113                         if(y>=height) break;
1114                     }
1115                     draw_slice(s, y);
1116
1117                     decode_422_bitstream(s, width);
1118
1119                     ydst= p->data[0] + p->linesize[0]*y;
1120                     udst= p->data[1] + p->linesize[1]*cy;
1121                     vdst= p->data[2] + p->linesize[2]*cy;
1122
1123                     s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1124                     if(!(s->flags&CODEC_FLAG_GRAY)){
1125                         s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1126                         s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1127                     }
1128                 }
1129
1130                 draw_slice(s, height);
1131                 break;
1132             }
1133         }
1134     }else{
1135         int y;
1136         int leftr, leftg, leftb;
1137         const int last_line= (height-1)*p->linesize[0];
1138
1139         if(s->bitstream_bpp==32){
1140             skip_bits(&s->gb, 8);
1141             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1142             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1143             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1144         }else{
1145             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1146             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1147             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1148             skip_bits(&s->gb, 8);
1149         }
1150
1151         if(s->bgr32){
1152             switch(s->predictor){
1153             case LEFT:
1154             case PLANE:
1155                 decode_bgr_bitstream(s, width-1);
1156                 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1157
1158                 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1159                     decode_bgr_bitstream(s, width);
1160
1161                     add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1162                     if(s->predictor == PLANE){
1163                         if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1164                             s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1165                                              p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1166                         }
1167                     }
1168                 }
1169                 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1170                 break;
1171             default:
1172                 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1173             }
1174         }else{
1175
1176             av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1177             return -1;
1178         }
1179     }
1180     emms_c();
1181
1182     *picture= *p;
1183     *data_size = sizeof(AVFrame);
1184
1185     return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1186 }
1187 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1188
1189 static int common_end(HYuvContext *s){
1190     int i;
1191
1192     for(i=0; i<3; i++){
1193         av_freep(&s->temp[i]);
1194     }
1195     return 0;
1196 }
1197
1198 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1199 static av_cold int decode_end(AVCodecContext *avctx)
1200 {
1201     HYuvContext *s = avctx->priv_data;
1202     int i;
1203
1204     common_end(s);
1205     av_freep(&s->bitstream_buffer);
1206
1207     for(i=0; i<6; i++){
1208         free_vlc(&s->vlc[i]);
1209     }
1210
1211     return 0;
1212 }
1213 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1214
1215 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1216 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1217     HYuvContext *s = avctx->priv_data;
1218     AVFrame *pict = data;
1219     const int width= s->width;
1220     const int width2= s->width>>1;
1221     const int height= s->height;
1222     const int fake_ystride= s->interlaced ? pict->linesize[0]*2  : pict->linesize[0];
1223     const int fake_ustride= s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
1224     const int fake_vstride= s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
1225     AVFrame * const p= &s->picture;
1226     int i, j, size=0;
1227
1228     *p = *pict;
1229     p->pict_type= FF_I_TYPE;
1230     p->key_frame= 1;
1231
1232     if(s->context){
1233         for(i=0; i<3; i++){
1234             generate_len_table(s->len[i], s->stats[i], 256);
1235             if(generate_bits_table(s->bits[i], s->len[i])<0)
1236                 return -1;
1237             size+= store_table(s, s->len[i], &buf[size]);
1238         }
1239
1240         for(i=0; i<3; i++)
1241             for(j=0; j<256; j++)
1242                 s->stats[i][j] >>= 1;
1243     }
1244
1245     init_put_bits(&s->pb, buf+size, buf_size-size);
1246
1247     if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1248         int lefty, leftu, leftv, y, cy;
1249
1250         put_bits(&s->pb, 8, leftv= p->data[2][0]);
1251         put_bits(&s->pb, 8, lefty= p->data[0][1]);
1252         put_bits(&s->pb, 8, leftu= p->data[1][0]);
1253         put_bits(&s->pb, 8,        p->data[0][0]);
1254
1255         lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1256         leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1257         leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1258
1259         encode_422_bitstream(s, width-2);
1260
1261         if(s->predictor==MEDIAN){
1262             int lefttopy, lefttopu, lefttopv;
1263             cy=y=1;
1264             if(s->interlaced){
1265                 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1266                 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1267                 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1268
1269                 encode_422_bitstream(s, width);
1270                 y++; cy++;
1271             }
1272
1273             lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1274             leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1275             leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1276
1277             encode_422_bitstream(s, 4);
1278
1279             lefttopy= p->data[0][3];
1280             lefttopu= p->data[1][1];
1281             lefttopv= p->data[2][1];
1282             s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1283             s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1284             s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1285             encode_422_bitstream(s, width-4);
1286             y++; cy++;
1287
1288             for(; y<height; y++,cy++){
1289                 uint8_t *ydst, *udst, *vdst;
1290
1291                 if(s->bitstream_bpp==12){
1292                     while(2*cy > y){
1293                         ydst= p->data[0] + p->linesize[0]*y;
1294                         s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1295                         encode_gray_bitstream(s, width);
1296                         y++;
1297                     }
1298                     if(y>=height) break;
1299                 }
1300                 ydst= p->data[0] + p->linesize[0]*y;
1301                 udst= p->data[1] + p->linesize[1]*cy;
1302                 vdst= p->data[2] + p->linesize[2]*cy;
1303
1304                 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1305                 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1306                 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1307
1308                 encode_422_bitstream(s, width);
1309             }
1310         }else{
1311             for(cy=y=1; y<height; y++,cy++){
1312                 uint8_t *ydst, *udst, *vdst;
1313
1314                 /* encode a luma only line & y++ */
1315                 if(s->bitstream_bpp==12){
1316                     ydst= p->data[0] + p->linesize[0]*y;
1317
1318                     if(s->predictor == PLANE && s->interlaced < y){
1319                         s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1320
1321                         lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1322                     }else{
1323                         lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1324                     }
1325                     encode_gray_bitstream(s, width);
1326                     y++;
1327                     if(y>=height) break;
1328                 }
1329
1330                 ydst= p->data[0] + p->linesize[0]*y;
1331                 udst= p->data[1] + p->linesize[1]*cy;
1332                 vdst= p->data[2] + p->linesize[2]*cy;
1333
1334                 if(s->predictor == PLANE && s->interlaced < cy){
1335                     s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1336                     s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1337                     s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1338
1339                     lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1340                     leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1341                     leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1342                 }else{
1343                     lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1344                     leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1345                     leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1346                 }
1347
1348                 encode_422_bitstream(s, width);
1349             }
1350         }
1351     }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1352         uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1353         const int stride = -p->linesize[0];
1354         const int fake_stride = -fake_ystride;
1355         int y;
1356         int leftr, leftg, leftb;
1357
1358         put_bits(&s->pb, 8, leftr= data[R]);
1359         put_bits(&s->pb, 8, leftg= data[G]);
1360         put_bits(&s->pb, 8, leftb= data[B]);
1361         put_bits(&s->pb, 8, 0);
1362
1363         sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1364         encode_bgr_bitstream(s, width-1);
1365
1366         for(y=1; y<s->height; y++){
1367             uint8_t *dst = data + y*stride;
1368             if(s->predictor == PLANE && s->interlaced < y){
1369                 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1370                 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1371             }else{
1372                 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1373             }
1374             encode_bgr_bitstream(s, width);
1375         }
1376     }else{
1377         av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1378     }
1379     emms_c();
1380
1381     size+= (put_bits_count(&s->pb)+31)/8;
1382     put_bits(&s->pb, 16, 0);
1383     put_bits(&s->pb, 15, 0);
1384     size/= 4;
1385
1386     if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1387         int j;
1388         char *p= avctx->stats_out;
1389         char *end= p + 1024*30;
1390         for(i=0; i<3; i++){
1391             for(j=0; j<256; j++){
1392                 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1393                 p+= strlen(p);
1394                 s->stats[i][j]= 0;
1395             }
1396             snprintf(p, end-p, "\n");
1397             p++;
1398         }
1399     } else
1400         avctx->stats_out[0] = '\0';
1401     if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1402         flush_put_bits(&s->pb);
1403         s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1404     }
1405
1406     s->picture_number++;
1407
1408     return size*4;
1409 }
1410
1411 static av_cold int encode_end(AVCodecContext *avctx)
1412 {
1413     HYuvContext *s = avctx->priv_data;
1414
1415     common_end(s);
1416
1417     av_freep(&avctx->extradata);
1418     av_freep(&avctx->stats_out);
1419
1420     return 0;
1421 }
1422 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1423
1424 #if CONFIG_HUFFYUV_DECODER
1425 AVCodec huffyuv_decoder = {
1426     "huffyuv",
1427     CODEC_TYPE_VIDEO,
1428     CODEC_ID_HUFFYUV,
1429     sizeof(HYuvContext),
1430     decode_init,
1431     NULL,
1432     decode_end,
1433     decode_frame,
1434     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1435     NULL,
1436     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1437 };
1438 #endif
1439
1440 #if CONFIG_FFVHUFF_DECODER
1441 AVCodec ffvhuff_decoder = {
1442     "ffvhuff",
1443     CODEC_TYPE_VIDEO,
1444     CODEC_ID_FFVHUFF,
1445     sizeof(HYuvContext),
1446     decode_init,
1447     NULL,
1448     decode_end,
1449     decode_frame,
1450     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1451     NULL,
1452     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1453 };
1454 #endif
1455
1456 #if CONFIG_HUFFYUV_ENCODER
1457 AVCodec huffyuv_encoder = {
1458     "huffyuv",
1459     CODEC_TYPE_VIDEO,
1460     CODEC_ID_HUFFYUV,
1461     sizeof(HYuvContext),
1462     encode_init,
1463     encode_frame,
1464     encode_end,
1465     .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1466     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1467 };
1468 #endif
1469
1470 #if CONFIG_FFVHUFF_ENCODER
1471 AVCodec ffvhuff_encoder = {
1472     "ffvhuff",
1473     CODEC_TYPE_VIDEO,
1474     CODEC_ID_FFVHUFF,
1475     sizeof(HYuvContext),
1476     encode_init,
1477     encode_frame,
1478     encode_end,
1479     .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1480     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1481 };
1482 #endif