2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
48 typedef enum Predictor{
54 typedef struct HYuvContext{
55 AVCodecContext *avctx;
63 int yuy2; //use yuy2 instead of 422P
64 int bgr32; //use bgr32 instead of bgr24
71 uint64_t stats[3][256];
73 uint32_t bits[3][256];
74 uint32_t pix_bgr_map[1<<VLC_BITS];
75 VLC vlc[6]; //Y,U,V,YY,YU,YV
77 uint8_t *bitstream_buffer;
78 unsigned int bitstream_buffer_size;
82 static const unsigned char classic_shift_luma[] = {
83 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
88 static const unsigned char classic_shift_chroma[] = {
89 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 static const unsigned char classic_add_luma[256] = {
95 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
104 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
113 static const unsigned char classic_add_chroma[256] = {
114 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
115 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
116 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
122 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
126 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
129 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
132 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
136 const int temp= src[i];
143 const int temp= src[i];
147 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
152 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
158 for(i=0; i<FFMIN(w,4); i++){
159 const int rt= src[i*4+R];
160 const int gt= src[i*4+G];
161 const int bt= src[i*4+B];
169 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
170 *red= src[(w-1)*4+R];
171 *green= src[(w-1)*4+G];
172 *blue= src[(w-1)*4+B];
175 static int read_len_table(uint8_t *dst, GetBitContext *gb){
179 repeat= get_bits(gb, 3);
180 val = get_bits(gb, 5);
182 repeat= get_bits(gb, 8);
183 //printf("%d %d\n", val, repeat);
185 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
194 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
198 for(len=32; len>0; len--){
199 for(index=0; index<256; index++){
200 if(len_table[index]==len)
204 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
212 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
218 static void heap_sift(HeapElem *h, int root, int size)
220 while(root*2+1 < size) {
221 int child = root*2+1;
222 if(child < size-1 && h[child].val > h[child+1].val)
224 if(h[root].val > h[child].val) {
225 FFSWAP(HeapElem, h[root], h[child]);
232 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
238 for(offset=1; ; offset<<=1){
239 for(i=0; i<size; i++){
241 h[i].val = (stats[i] << 8) + offset;
243 for(i=size/2-1; i>=0; i--)
244 heap_sift(h, i, size);
246 for(next=size; next<size*2-1; next++){
247 // merge the two smallest entries, and put it back in the heap
248 uint64_t min1v = h[0].val;
249 up[h[0].name] = next;
250 h[0].val = INT64_MAX;
251 heap_sift(h, 0, size);
252 up[h[0].name] = next;
255 heap_sift(h, 0, size);
259 for(i=2*size-3; i>=size; i--)
260 len[i] = len[up[i]] + 1;
261 for(i=0; i<size; i++) {
262 dst[i] = len[up[i]] + 1;
263 if(dst[i] >= 32) break;
268 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
270 static void generate_joint_tables(HYuvContext *s){
271 uint16_t symbols[1<<VLC_BITS];
272 uint16_t bits[1<<VLC_BITS];
273 uint8_t len[1<<VLC_BITS];
274 if(s->bitstream_bpp < 24){
277 for(i=y=0; y<256; y++){
278 int len0 = s->len[0][y];
279 int limit = VLC_BITS - len0;
282 for(u=0; u<256; u++){
283 int len1 = s->len[p][u];
286 len[i] = len0 + len1;
287 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
288 symbols[i] = (y<<8) + u;
289 if(symbols[i] != 0xffff) // reserved to mean "invalid"
293 free_vlc(&s->vlc[3+p]);
294 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
297 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
298 int i, b, g, r, code;
299 int p0 = s->decorrelate;
300 int p1 = !s->decorrelate;
301 // restrict the range to +/-16 becaues that's pretty much guaranteed to
302 // cover all the combinations that fit in 11 bits total, and it doesn't
303 // matter if we miss a few rare codes.
304 for(i=0, g=-16; g<16; g++){
305 int len0 = s->len[p0][g&255];
306 int limit0 = VLC_BITS - len0;
309 for(b=-16; b<16; b++){
310 int len1 = s->len[p1][b&255];
311 int limit1 = limit0 - len1;
314 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
315 for(r=-16; r<16; r++){
316 int len2 = s->len[2][r&255];
319 len[i] = len0 + len1 + len2;
320 bits[i] = (code << len2) + s->bits[2][r&255];
334 free_vlc(&s->vlc[3]);
335 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
339 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
343 init_get_bits(&gb, src, length*8);
346 if(read_len_table(s->len[i], &gb)<0)
348 if(generate_bits_table(s->bits[i], s->len[i])<0){
352 for(j=0; j<256; j++){
353 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
356 free_vlc(&s->vlc[i]);
357 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
360 generate_joint_tables(s);
362 return (get_bits_count(&gb)+7)/8;
365 static int read_old_huffman_tables(HYuvContext *s){
370 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
371 if(read_len_table(s->len[0], &gb)<0)
373 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
374 if(read_len_table(s->len[1], &gb)<0)
377 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
378 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
380 if(s->bitstream_bpp >= 24){
381 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
382 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
384 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
385 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
388 free_vlc(&s->vlc[i]);
389 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
392 generate_joint_tables(s);
396 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
401 static av_cold void alloc_temp(HYuvContext *s){
404 if(s->bitstream_bpp<24){
406 s->temp[i]= av_malloc(s->width + 16);
409 s->temp[0]= av_malloc(4*s->width + 16);
413 static av_cold int common_init(AVCodecContext *avctx){
414 HYuvContext *s = avctx->priv_data;
417 s->flags= avctx->flags;
419 dsputil_init(&s->dsp, avctx);
421 s->width= avctx->width;
422 s->height= avctx->height;
423 assert(s->width>0 && s->height>0);
428 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
429 static av_cold int decode_init(AVCodecContext *avctx)
431 HYuvContext *s = avctx->priv_data;
434 memset(s->vlc, 0, 3*sizeof(VLC));
436 avctx->coded_frame= &s->picture;
437 s->interlaced= s->height > 288;
440 //if(avctx->extradata)
441 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
442 if(avctx->extradata_size){
443 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
444 s->version=1; // do such files exist at all?
451 int method, interlace;
453 method= ((uint8_t*)avctx->extradata)[0];
454 s->decorrelate= method&64 ? 1 : 0;
455 s->predictor= method&63;
456 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
457 if(s->bitstream_bpp==0)
458 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
459 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
460 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
461 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
463 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
466 switch(avctx->bits_per_coded_sample&7){
477 s->decorrelate= avctx->bits_per_coded_sample >= 24;
480 s->predictor= MEDIAN;
484 s->predictor= LEFT; //OLD
488 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
491 if(read_old_huffman_tables(s) < 0)
495 switch(s->bitstream_bpp){
497 avctx->pix_fmt = PIX_FMT_YUV420P;
501 avctx->pix_fmt = PIX_FMT_YUYV422;
503 avctx->pix_fmt = PIX_FMT_YUV422P;
509 avctx->pix_fmt = PIX_FMT_RGB32;
511 avctx->pix_fmt = PIX_FMT_BGR24;
520 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
524 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
526 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
527 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
535 for(; i<256 && len[i]==val && repeat<255; i++)
538 assert(val < 32 && val >0 && repeat<256 && repeat>0);
541 buf[index++]= repeat;
543 buf[index++]= val | (repeat<<5);
550 static av_cold int encode_init(AVCodecContext *avctx)
552 HYuvContext *s = avctx->priv_data;
557 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
558 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
561 avctx->coded_frame= &s->picture;
563 switch(avctx->pix_fmt){
564 case PIX_FMT_YUV420P:
565 s->bitstream_bpp= 12;
567 case PIX_FMT_YUV422P:
568 s->bitstream_bpp= 16;
571 s->bitstream_bpp= 24;
574 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
577 avctx->bits_per_coded_sample= s->bitstream_bpp;
578 s->decorrelate= s->bitstream_bpp >= 24;
579 s->predictor= avctx->prediction_method;
580 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
581 if(avctx->context_model==1){
582 s->context= avctx->context_model;
583 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
584 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
589 if(avctx->codec->id==CODEC_ID_HUFFYUV){
590 if(avctx->pix_fmt==PIX_FMT_YUV420P){
591 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
594 if(avctx->context_model){
595 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
598 if(s->interlaced != ( s->height > 288 ))
599 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
602 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
603 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
607 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
608 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
609 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
611 ((uint8_t*)avctx->extradata)[2]|= 0x40;
612 ((uint8_t*)avctx->extradata)[3]= 0;
613 s->avctx->extradata_size= 4;
616 char *p= avctx->stats_in;
626 for(j=0; j<256; j++){
627 s->stats[i][j]+= strtol(p, &next, 0);
628 if(next==p) return -1;
632 if(p[0]==0 || p[1]==0 || p[2]==0) break;
636 for(j=0; j<256; j++){
637 int d= FFMIN(j, 256-j);
639 s->stats[i][j]= 100000000/(d+1);
644 generate_len_table(s->len[i], s->stats[i], 256);
646 if(generate_bits_table(s->bits[i], s->len[i])<0){
650 s->avctx->extradata_size+=
651 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
656 int pels = s->width*s->height / (i?40:10);
657 for(j=0; j<256; j++){
658 int d= FFMIN(j, 256-j);
659 s->stats[i][j]= pels/(d+1);
668 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
676 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
678 /* TODO instead of restarting the read when the code isn't in the first level
679 * of the joint table, jump into the 2nd level of the individual table. */
680 #define READ_2PIX(dst0, dst1, plane1){\
681 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
686 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
687 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
691 static void decode_422_bitstream(HYuvContext *s, int count){
696 if(count >= (get_bits_left(&s->gb))/(31*4)){
697 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
698 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
699 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
702 for(i=0; i<count; i++){
703 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
704 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
709 static void decode_gray_bitstream(HYuvContext *s, int count){
714 if(count >= (get_bits_left(&s->gb))/(31*2)){
715 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
716 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
719 for(i=0; i<count; i++){
720 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
725 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
726 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
728 const uint8_t *y = s->temp[0] + offset;
729 const uint8_t *u = s->temp[1] + offset/2;
730 const uint8_t *v = s->temp[2] + offset/2;
732 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
733 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
744 if(s->flags&CODEC_FLAG_PASS1){
745 for(i=0; i<count; i++){
753 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
756 for(i=0; i<count; i++){
759 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
761 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
763 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
765 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
768 for(i=0; i<count; i++){
770 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
771 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
772 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
773 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
779 static int encode_gray_bitstream(HYuvContext *s, int count){
782 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
783 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
788 int y0 = s->temp[0][2*i];\
789 int y1 = s->temp[0][2*i+1];
794 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
795 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
798 if(s->flags&CODEC_FLAG_PASS1){
799 for(i=0; i<count; i++){
804 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
808 for(i=0; i<count; i++){
814 for(i=0; i<count; i++){
821 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
823 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
825 for(i=0; i<count; i++){
826 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
828 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
829 }else if(decorrelate){
830 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
831 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
832 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
834 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
835 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
836 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
839 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
843 static void decode_bgr_bitstream(HYuvContext *s, int count){
845 if(s->bitstream_bpp==24)
846 decode_bgr_1(s, count, 1, 0);
848 decode_bgr_1(s, count, 1, 1);
850 if(s->bitstream_bpp==24)
851 decode_bgr_1(s, count, 0, 0);
853 decode_bgr_1(s, count, 0, 1);
857 static int encode_bgr_bitstream(HYuvContext *s, int count){
860 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
861 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
866 int g= s->temp[0][4*i+G];\
867 int b= (s->temp[0][4*i+B] - g) & 0xff;\
868 int r= (s->temp[0][4*i+R] - g) & 0xff;
874 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
875 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
876 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
878 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
879 for(i=0; i<count; i++){
883 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
884 for(i=0; i<count; i++){
890 for(i=0; i<count; i++){
898 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
899 static void draw_slice(HYuvContext *s, int y){
903 if(s->avctx->draw_horiz_band==NULL)
906 h= y - s->last_slice_end;
909 if(s->bitstream_bpp==12){
915 offset[0] = s->picture.linesize[0]*y;
916 offset[1] = s->picture.linesize[1]*cy;
917 offset[2] = s->picture.linesize[2]*cy;
921 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
923 s->last_slice_end= y + h;
926 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
927 const uint8_t *buf = avpkt->data;
928 int buf_size = avpkt->size;
929 HYuvContext *s = avctx->priv_data;
930 const int width= s->width;
931 const int width2= s->width>>1;
932 const int height= s->height;
933 int fake_ystride, fake_ustride, fake_vstride;
934 AVFrame * const p= &s->picture;
937 AVFrame *picture = data;
939 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
940 if (!s->bitstream_buffer)
941 return AVERROR(ENOMEM);
943 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
944 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
947 avctx->release_buffer(avctx, p);
950 if(avctx->get_buffer(avctx, p) < 0){
951 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
956 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
961 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
964 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
966 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
967 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
968 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
970 s->last_slice_end= 0;
972 if(s->bitstream_bpp<24){
974 int lefty, leftu, leftv;
975 int lefttopy, lefttopu, lefttopv;
978 p->data[0][3]= get_bits(&s->gb, 8);
979 p->data[0][2]= get_bits(&s->gb, 8);
980 p->data[0][1]= get_bits(&s->gb, 8);
981 p->data[0][0]= get_bits(&s->gb, 8);
983 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
987 leftv= p->data[2][0]= get_bits(&s->gb, 8);
988 lefty= p->data[0][1]= get_bits(&s->gb, 8);
989 leftu= p->data[1][0]= get_bits(&s->gb, 8);
990 p->data[0][0]= get_bits(&s->gb, 8);
992 switch(s->predictor){
995 decode_422_bitstream(s, width-2);
996 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
997 if(!(s->flags&CODEC_FLAG_GRAY)){
998 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
999 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1002 for(cy=y=1; y<s->height; y++,cy++){
1003 uint8_t *ydst, *udst, *vdst;
1005 if(s->bitstream_bpp==12){
1006 decode_gray_bitstream(s, width);
1008 ydst= p->data[0] + p->linesize[0]*y;
1010 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1011 if(s->predictor == PLANE){
1013 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1016 if(y>=s->height) break;
1021 ydst= p->data[0] + p->linesize[0]*y;
1022 udst= p->data[1] + p->linesize[1]*cy;
1023 vdst= p->data[2] + p->linesize[2]*cy;
1025 decode_422_bitstream(s, width);
1026 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1027 if(!(s->flags&CODEC_FLAG_GRAY)){
1028 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1029 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1031 if(s->predictor == PLANE){
1032 if(cy>s->interlaced){
1033 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1034 if(!(s->flags&CODEC_FLAG_GRAY)){
1035 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1036 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1041 draw_slice(s, height);
1045 /* first line except first 2 pixels is left predicted */
1046 decode_422_bitstream(s, width-2);
1047 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1048 if(!(s->flags&CODEC_FLAG_GRAY)){
1049 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1050 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1055 /* second line is left predicted for interlaced case */
1057 decode_422_bitstream(s, width);
1058 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1059 if(!(s->flags&CODEC_FLAG_GRAY)){
1060 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1061 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1066 /* next 4 pixels are left predicted too */
1067 decode_422_bitstream(s, 4);
1068 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1069 if(!(s->flags&CODEC_FLAG_GRAY)){
1070 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1071 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1074 /* next line except the first 4 pixels is median predicted */
1075 lefttopy= p->data[0][3];
1076 decode_422_bitstream(s, width-4);
1077 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1078 if(!(s->flags&CODEC_FLAG_GRAY)){
1079 lefttopu= p->data[1][1];
1080 lefttopv= p->data[2][1];
1081 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1082 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1086 for(; y<height; y++,cy++){
1087 uint8_t *ydst, *udst, *vdst;
1089 if(s->bitstream_bpp==12){
1091 decode_gray_bitstream(s, width);
1092 ydst= p->data[0] + p->linesize[0]*y;
1093 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1096 if(y>=height) break;
1100 decode_422_bitstream(s, width);
1102 ydst= p->data[0] + p->linesize[0]*y;
1103 udst= p->data[1] + p->linesize[1]*cy;
1104 vdst= p->data[2] + p->linesize[2]*cy;
1106 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1107 if(!(s->flags&CODEC_FLAG_GRAY)){
1108 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1109 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1113 draw_slice(s, height);
1119 int leftr, leftg, leftb;
1120 const int last_line= (height-1)*p->linesize[0];
1122 if(s->bitstream_bpp==32){
1123 skip_bits(&s->gb, 8);
1124 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1125 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1126 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1128 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1129 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1130 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1131 skip_bits(&s->gb, 8);
1135 switch(s->predictor){
1138 decode_bgr_bitstream(s, width-1);
1139 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1141 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1142 decode_bgr_bitstream(s, width);
1144 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1145 if(s->predictor == PLANE){
1146 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1147 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1148 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1152 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1155 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1159 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1166 *data_size = sizeof(AVFrame);
1168 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1170 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1172 static int common_end(HYuvContext *s){
1176 av_freep(&s->temp[i]);
1181 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1182 static av_cold int decode_end(AVCodecContext *avctx)
1184 HYuvContext *s = avctx->priv_data;
1187 if (s->picture.data[0])
1188 avctx->release_buffer(avctx, &s->picture);
1191 av_freep(&s->bitstream_buffer);
1194 free_vlc(&s->vlc[i]);
1199 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1201 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1202 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1203 HYuvContext *s = avctx->priv_data;
1204 AVFrame *pict = data;
1205 const int width= s->width;
1206 const int width2= s->width>>1;
1207 const int height= s->height;
1208 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1209 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1210 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1211 AVFrame * const p= &s->picture;
1215 p->pict_type= FF_I_TYPE;
1220 generate_len_table(s->len[i], s->stats[i], 256);
1221 if(generate_bits_table(s->bits[i], s->len[i])<0)
1223 size+= store_table(s, s->len[i], &buf[size]);
1227 for(j=0; j<256; j++)
1228 s->stats[i][j] >>= 1;
1231 init_put_bits(&s->pb, buf+size, buf_size-size);
1233 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1234 int lefty, leftu, leftv, y, cy;
1236 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1237 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1238 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1239 put_bits(&s->pb, 8, p->data[0][0]);
1241 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1242 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1243 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1245 encode_422_bitstream(s, 2, width-2);
1247 if(s->predictor==MEDIAN){
1248 int lefttopy, lefttopu, lefttopv;
1251 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1252 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1253 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1255 encode_422_bitstream(s, 0, width);
1259 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1260 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1261 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1263 encode_422_bitstream(s, 0, 4);
1265 lefttopy= p->data[0][3];
1266 lefttopu= p->data[1][1];
1267 lefttopv= p->data[2][1];
1268 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1269 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1270 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1271 encode_422_bitstream(s, 0, width-4);
1274 for(; y<height; y++,cy++){
1275 uint8_t *ydst, *udst, *vdst;
1277 if(s->bitstream_bpp==12){
1279 ydst= p->data[0] + p->linesize[0]*y;
1280 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1281 encode_gray_bitstream(s, width);
1284 if(y>=height) break;
1286 ydst= p->data[0] + p->linesize[0]*y;
1287 udst= p->data[1] + p->linesize[1]*cy;
1288 vdst= p->data[2] + p->linesize[2]*cy;
1290 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1291 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1292 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1294 encode_422_bitstream(s, 0, width);
1297 for(cy=y=1; y<height; y++,cy++){
1298 uint8_t *ydst, *udst, *vdst;
1300 /* encode a luma only line & y++ */
1301 if(s->bitstream_bpp==12){
1302 ydst= p->data[0] + p->linesize[0]*y;
1304 if(s->predictor == PLANE && s->interlaced < y){
1305 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1307 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1309 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1311 encode_gray_bitstream(s, width);
1313 if(y>=height) break;
1316 ydst= p->data[0] + p->linesize[0]*y;
1317 udst= p->data[1] + p->linesize[1]*cy;
1318 vdst= p->data[2] + p->linesize[2]*cy;
1320 if(s->predictor == PLANE && s->interlaced < cy){
1321 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1322 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1323 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1325 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1326 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1327 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1329 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1330 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1331 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1334 encode_422_bitstream(s, 0, width);
1337 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1338 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1339 const int stride = -p->linesize[0];
1340 const int fake_stride = -fake_ystride;
1342 int leftr, leftg, leftb;
1344 put_bits(&s->pb, 8, leftr= data[R]);
1345 put_bits(&s->pb, 8, leftg= data[G]);
1346 put_bits(&s->pb, 8, leftb= data[B]);
1347 put_bits(&s->pb, 8, 0);
1349 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1350 encode_bgr_bitstream(s, width-1);
1352 for(y=1; y<s->height; y++){
1353 uint8_t *dst = data + y*stride;
1354 if(s->predictor == PLANE && s->interlaced < y){
1355 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1356 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1358 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1360 encode_bgr_bitstream(s, width);
1363 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1367 size+= (put_bits_count(&s->pb)+31)/8;
1368 put_bits(&s->pb, 16, 0);
1369 put_bits(&s->pb, 15, 0);
1372 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1374 char *p= avctx->stats_out;
1375 char *end= p + 1024*30;
1377 for(j=0; j<256; j++){
1378 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1382 snprintf(p, end-p, "\n");
1386 avctx->stats_out[0] = '\0';
1387 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1388 flush_put_bits(&s->pb);
1389 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1392 s->picture_number++;
1397 static av_cold int encode_end(AVCodecContext *avctx)
1399 HYuvContext *s = avctx->priv_data;
1403 av_freep(&avctx->extradata);
1404 av_freep(&avctx->stats_out);
1408 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1410 #if CONFIG_HUFFYUV_DECODER
1411 AVCodec huffyuv_decoder = {
1415 sizeof(HYuvContext),
1420 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1422 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1426 #if CONFIG_FFVHUFF_DECODER
1427 AVCodec ffvhuff_decoder = {
1431 sizeof(HYuvContext),
1436 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1438 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1442 #if CONFIG_HUFFYUV_ENCODER
1443 AVCodec huffyuv_encoder = {
1447 sizeof(HYuvContext),
1451 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1452 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1456 #if CONFIG_FFVHUFF_ENCODER
1457 AVCodec ffvhuff_encoder = {
1461 sizeof(HYuvContext),
1465 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1466 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),