2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
32 #include "bitstream.h"
38 #ifdef WORDS_BIGENDIAN
48 typedef enum Predictor{
54 typedef struct HYuvContext{
55 AVCodecContext *avctx;
63 int yuy2; //use yuy2 instead of 422P
64 int bgr32; //use bgr32 instead of bgr24
71 uint64_t stats[3][256];
73 uint32_t bits[3][256];
74 uint32_t pix_bgr_map[1<<VLC_BITS];
75 VLC vlc[6]; //Y,U,V,YY,YU,YV
77 uint8_t *bitstream_buffer;
78 unsigned int bitstream_buffer_size;
82 static const unsigned char classic_shift_luma[] = {
83 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
88 static const unsigned char classic_shift_chroma[] = {
89 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 static const unsigned char classic_add_luma[256] = {
95 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
104 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
113 static const unsigned char classic_add_chroma[256] = {
114 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
115 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
116 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
122 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
126 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
129 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
132 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
135 for(i=0; i<w-1; i++){
151 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
173 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
177 const int temp= src[i];
184 const int temp= src[i];
188 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
193 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
199 for(i=0; i<FFMIN(w,4); i++){
200 const int rt= src[i*4+R];
201 const int gt= src[i*4+G];
202 const int bt= src[i*4+B];
210 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
211 *red= src[(w-1)*4+R];
212 *green= src[(w-1)*4+G];
213 *blue= src[(w-1)*4+B];
216 static void read_len_table(uint8_t *dst, GetBitContext *gb){
220 repeat= get_bits(gb, 3);
221 val = get_bits(gb, 5);
223 repeat= get_bits(gb, 8);
224 //printf("%d %d\n", val, repeat);
230 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
234 for(len=32; len>0; len--){
235 for(index=0; index<256; index++){
236 if(len_table[index]==len)
240 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
248 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
254 static void heap_sift(HeapElem *h, int root, int size)
256 while(root*2+1 < size) {
257 int child = root*2+1;
258 if(child < size-1 && h[child].val > h[child+1].val)
260 if(h[root].val > h[child].val) {
261 FFSWAP(HeapElem, h[root], h[child]);
268 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
274 for(offset=1; ; offset<<=1){
275 for(i=0; i<size; i++){
277 h[i].val = (stats[i] << 8) + offset;
279 for(i=size/2-1; i>=0; i--)
280 heap_sift(h, i, size);
282 for(next=size; next<size*2-1; next++){
283 // merge the two smallest entries, and put it back in the heap
284 uint64_t min1v = h[0].val;
285 up[h[0].name] = next;
286 h[0].val = INT64_MAX;
287 heap_sift(h, 0, size);
288 up[h[0].name] = next;
291 heap_sift(h, 0, size);
295 for(i=2*size-3; i>=size; i--)
296 len[i] = len[up[i]] + 1;
297 for(i=0; i<size; i++) {
298 dst[i] = len[up[i]] + 1;
299 if(dst[i] >= 32) break;
304 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
306 static void generate_joint_tables(HYuvContext *s){
307 uint16_t symbols[1<<VLC_BITS];
308 uint16_t bits[1<<VLC_BITS];
309 uint8_t len[1<<VLC_BITS];
310 if(s->bitstream_bpp < 24){
313 for(i=y=0; y<256; y++){
314 int len0 = s->len[0][y];
315 int limit = VLC_BITS - len0;
318 for(u=0; u<256; u++){
319 int len1 = s->len[p][u];
322 len[i] = len0 + len1;
323 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
324 symbols[i] = (y<<8) + u;
325 if(symbols[i] != 0xffff) // reserved to mean "invalid"
329 free_vlc(&s->vlc[3+p]);
330 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
333 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
334 int i, b, g, r, code;
335 int p0 = s->decorrelate;
336 int p1 = !s->decorrelate;
337 // restrict the range to +/-16 becaues that's pretty much guaranteed to
338 // cover all the combinations that fit in 11 bits total, and it doesn't
339 // matter if we miss a few rare codes.
340 for(i=0, g=-16; g<16; g++){
341 int len0 = s->len[p0][g&255];
342 int limit0 = VLC_BITS - len0;
345 for(b=-16; b<16; b++){
346 int len1 = s->len[p1][b&255];
347 int limit1 = limit0 - len1;
350 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
351 for(r=-16; r<16; r++){
352 int len2 = s->len[2][r&255];
355 len[i] = len0 + len1 + len2;
356 bits[i] = (code << len2) + s->bits[2][r&255];
370 free_vlc(&s->vlc[3]);
371 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
375 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
379 init_get_bits(&gb, src, length*8);
382 read_len_table(s->len[i], &gb);
384 if(generate_bits_table(s->bits[i], s->len[i])<0){
388 for(j=0; j<256; j++){
389 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
392 free_vlc(&s->vlc[i]);
393 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
396 generate_joint_tables(s);
398 return (get_bits_count(&gb)+7)/8;
401 static int read_old_huffman_tables(HYuvContext *s){
406 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
407 read_len_table(s->len[0], &gb);
408 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
409 read_len_table(s->len[1], &gb);
411 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
412 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
414 if(s->bitstream_bpp >= 24){
415 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
416 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
418 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
419 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
422 free_vlc(&s->vlc[i]);
423 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
426 generate_joint_tables(s);
430 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
435 static av_cold void alloc_temp(HYuvContext *s){
438 if(s->bitstream_bpp<24){
440 s->temp[i]= av_malloc(s->width + 16);
444 s->temp[i]= av_malloc(4*s->width + 16);
449 static av_cold int common_init(AVCodecContext *avctx){
450 HYuvContext *s = avctx->priv_data;
453 s->flags= avctx->flags;
455 dsputil_init(&s->dsp, avctx);
457 s->width= avctx->width;
458 s->height= avctx->height;
459 assert(s->width>0 && s->height>0);
464 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
465 static av_cold int decode_init(AVCodecContext *avctx)
467 HYuvContext *s = avctx->priv_data;
470 memset(s->vlc, 0, 3*sizeof(VLC));
472 avctx->coded_frame= &s->picture;
473 s->interlaced= s->height > 288;
476 //if(avctx->extradata)
477 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
478 if(avctx->extradata_size){
479 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
480 s->version=1; // do such files exist at all?
487 int method, interlace;
489 method= ((uint8_t*)avctx->extradata)[0];
490 s->decorrelate= method&64 ? 1 : 0;
491 s->predictor= method&63;
492 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
493 if(s->bitstream_bpp==0)
494 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
495 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
496 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
497 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
499 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
502 switch(avctx->bits_per_coded_sample&7){
513 s->decorrelate= avctx->bits_per_coded_sample >= 24;
516 s->predictor= MEDIAN;
520 s->predictor= LEFT; //OLD
524 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
527 if(read_old_huffman_tables(s) < 0)
531 switch(s->bitstream_bpp){
533 avctx->pix_fmt = PIX_FMT_YUV420P;
537 avctx->pix_fmt = PIX_FMT_YUYV422;
539 avctx->pix_fmt = PIX_FMT_YUV422P;
545 avctx->pix_fmt = PIX_FMT_RGB32;
547 avctx->pix_fmt = PIX_FMT_BGR24;
556 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
560 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
562 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
563 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
571 for(; i<256 && len[i]==val && repeat<255; i++)
574 assert(val < 32 && val >0 && repeat<256 && repeat>0);
577 buf[index++]= repeat;
579 buf[index++]= val | (repeat<<5);
586 static av_cold int encode_init(AVCodecContext *avctx)
588 HYuvContext *s = avctx->priv_data;
593 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
594 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
597 avctx->coded_frame= &s->picture;
599 switch(avctx->pix_fmt){
600 case PIX_FMT_YUV420P:
601 s->bitstream_bpp= 12;
603 case PIX_FMT_YUV422P:
604 s->bitstream_bpp= 16;
607 s->bitstream_bpp= 24;
610 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
613 avctx->bits_per_coded_sample= s->bitstream_bpp;
614 s->decorrelate= s->bitstream_bpp >= 24;
615 s->predictor= avctx->prediction_method;
616 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
617 if(avctx->context_model==1){
618 s->context= avctx->context_model;
619 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
620 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
625 if(avctx->codec->id==CODEC_ID_HUFFYUV){
626 if(avctx->pix_fmt==PIX_FMT_YUV420P){
627 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
630 if(avctx->context_model){
631 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
634 if(s->interlaced != ( s->height > 288 ))
635 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
638 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
639 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
643 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
644 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
645 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
647 ((uint8_t*)avctx->extradata)[2]|= 0x40;
648 ((uint8_t*)avctx->extradata)[3]= 0;
649 s->avctx->extradata_size= 4;
652 char *p= avctx->stats_in;
662 for(j=0; j<256; j++){
663 s->stats[i][j]+= strtol(p, &next, 0);
664 if(next==p) return -1;
668 if(p[0]==0 || p[1]==0 || p[2]==0) break;
672 for(j=0; j<256; j++){
673 int d= FFMIN(j, 256-j);
675 s->stats[i][j]= 100000000/(d+1);
680 generate_len_table(s->len[i], s->stats[i], 256);
682 if(generate_bits_table(s->bits[i], s->len[i])<0){
686 s->avctx->extradata_size+=
687 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
692 int pels = s->width*s->height / (i?40:10);
693 for(j=0; j<256; j++){
694 int d= FFMIN(j, 256-j);
695 s->stats[i][j]= pels/(d+1);
704 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
712 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
714 /* TODO instead of restarting the read when the code isn't in the first level
715 * of the joint table, jump into the 2nd level of the individual table. */
716 #define READ_2PIX(dst0, dst1, plane1){\
717 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
722 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
723 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
727 static void decode_422_bitstream(HYuvContext *s, int count){
732 for(i=0; i<count; i++){
733 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
734 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
738 static void decode_gray_bitstream(HYuvContext *s, int count){
743 for(i=0; i<count; i++){
744 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
748 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
749 static int encode_422_bitstream(HYuvContext *s, int count){
752 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
753 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
758 int y0 = s->temp[0][2*i];\
759 int y1 = s->temp[0][2*i+1];\
760 int u0 = s->temp[1][i];\
761 int v0 = s->temp[2][i];
764 if(s->flags&CODEC_FLAG_PASS1){
765 for(i=0; i<count; i++){
773 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
776 for(i=0; i<count; i++){
779 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
781 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
783 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
785 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
788 for(i=0; i<count; i++){
790 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
791 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
792 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
793 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
799 static int encode_gray_bitstream(HYuvContext *s, int count){
802 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
803 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
808 int y0 = s->temp[0][2*i];\
809 int y1 = s->temp[0][2*i+1];
814 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
815 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
818 if(s->flags&CODEC_FLAG_PASS1){
819 for(i=0; i<count; i++){
824 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
828 for(i=0; i<count; i++){
834 for(i=0; i<count; i++){
841 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
843 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
845 for(i=0; i<count; i++){
846 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
848 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
849 }else if(decorrelate){
850 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
851 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
852 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
854 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
855 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
856 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
859 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
863 static void decode_bgr_bitstream(HYuvContext *s, int count){
865 if(s->bitstream_bpp==24)
866 decode_bgr_1(s, count, 1, 0);
868 decode_bgr_1(s, count, 1, 1);
870 if(s->bitstream_bpp==24)
871 decode_bgr_1(s, count, 0, 0);
873 decode_bgr_1(s, count, 0, 1);
877 static int encode_bgr_bitstream(HYuvContext *s, int count){
880 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
881 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
886 int g= s->temp[0][4*i+G];\
887 int b= (s->temp[0][4*i+B] - g) & 0xff;\
888 int r= (s->temp[0][4*i+R] - g) & 0xff;
894 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
895 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
896 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
898 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
899 for(i=0; i<count; i++){
903 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
904 for(i=0; i<count; i++){
910 for(i=0; i<count; i++){
918 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
919 static void draw_slice(HYuvContext *s, int y){
923 if(s->avctx->draw_horiz_band==NULL)
926 h= y - s->last_slice_end;
929 if(s->bitstream_bpp==12){
935 offset[0] = s->picture.linesize[0]*y;
936 offset[1] = s->picture.linesize[1]*cy;
937 offset[2] = s->picture.linesize[2]*cy;
941 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
943 s->last_slice_end= y + h;
946 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
947 const uint8_t *buf = avpkt->data;
948 int buf_size = avpkt->size;
949 HYuvContext *s = avctx->priv_data;
950 const int width= s->width;
951 const int width2= s->width>>1;
952 const int height= s->height;
953 int fake_ystride, fake_ustride, fake_vstride;
954 AVFrame * const p= &s->picture;
957 AVFrame *picture = data;
959 s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
961 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
964 avctx->release_buffer(avctx, p);
967 if(avctx->get_buffer(avctx, p) < 0){
968 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
973 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
978 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
981 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
983 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
984 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
985 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
987 s->last_slice_end= 0;
989 if(s->bitstream_bpp<24){
991 int lefty, leftu, leftv;
992 int lefttopy, lefttopu, lefttopv;
995 p->data[0][3]= get_bits(&s->gb, 8);
996 p->data[0][2]= get_bits(&s->gb, 8);
997 p->data[0][1]= get_bits(&s->gb, 8);
998 p->data[0][0]= get_bits(&s->gb, 8);
1000 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1004 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1005 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1006 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1007 p->data[0][0]= get_bits(&s->gb, 8);
1009 switch(s->predictor){
1012 decode_422_bitstream(s, width-2);
1013 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1014 if(!(s->flags&CODEC_FLAG_GRAY)){
1015 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1016 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1019 for(cy=y=1; y<s->height; y++,cy++){
1020 uint8_t *ydst, *udst, *vdst;
1022 if(s->bitstream_bpp==12){
1023 decode_gray_bitstream(s, width);
1025 ydst= p->data[0] + p->linesize[0]*y;
1027 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1028 if(s->predictor == PLANE){
1030 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1033 if(y>=s->height) break;
1038 ydst= p->data[0] + p->linesize[0]*y;
1039 udst= p->data[1] + p->linesize[1]*cy;
1040 vdst= p->data[2] + p->linesize[2]*cy;
1042 decode_422_bitstream(s, width);
1043 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1044 if(!(s->flags&CODEC_FLAG_GRAY)){
1045 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1046 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1048 if(s->predictor == PLANE){
1049 if(cy>s->interlaced){
1050 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1051 if(!(s->flags&CODEC_FLAG_GRAY)){
1052 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1053 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1058 draw_slice(s, height);
1062 /* first line except first 2 pixels is left predicted */
1063 decode_422_bitstream(s, width-2);
1064 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1065 if(!(s->flags&CODEC_FLAG_GRAY)){
1066 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1067 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1072 /* second line is left predicted for interlaced case */
1074 decode_422_bitstream(s, width);
1075 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1076 if(!(s->flags&CODEC_FLAG_GRAY)){
1077 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1078 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1083 /* next 4 pixels are left predicted too */
1084 decode_422_bitstream(s, 4);
1085 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1086 if(!(s->flags&CODEC_FLAG_GRAY)){
1087 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1088 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1091 /* next line except the first 4 pixels is median predicted */
1092 lefttopy= p->data[0][3];
1093 decode_422_bitstream(s, width-4);
1094 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1095 if(!(s->flags&CODEC_FLAG_GRAY)){
1096 lefttopu= p->data[1][1];
1097 lefttopv= p->data[2][1];
1098 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1099 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1103 for(; y<height; y++,cy++){
1104 uint8_t *ydst, *udst, *vdst;
1106 if(s->bitstream_bpp==12){
1108 decode_gray_bitstream(s, width);
1109 ydst= p->data[0] + p->linesize[0]*y;
1110 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1113 if(y>=height) break;
1117 decode_422_bitstream(s, width);
1119 ydst= p->data[0] + p->linesize[0]*y;
1120 udst= p->data[1] + p->linesize[1]*cy;
1121 vdst= p->data[2] + p->linesize[2]*cy;
1123 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1124 if(!(s->flags&CODEC_FLAG_GRAY)){
1125 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1126 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1130 draw_slice(s, height);
1136 int leftr, leftg, leftb;
1137 const int last_line= (height-1)*p->linesize[0];
1139 if(s->bitstream_bpp==32){
1140 skip_bits(&s->gb, 8);
1141 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1142 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1143 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1145 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1146 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1147 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1148 skip_bits(&s->gb, 8);
1152 switch(s->predictor){
1155 decode_bgr_bitstream(s, width-1);
1156 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1158 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1159 decode_bgr_bitstream(s, width);
1161 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1162 if(s->predictor == PLANE){
1163 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1164 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1165 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1169 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1172 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1176 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1183 *data_size = sizeof(AVFrame);
1185 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1187 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1189 static int common_end(HYuvContext *s){
1193 av_freep(&s->temp[i]);
1198 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1199 static av_cold int decode_end(AVCodecContext *avctx)
1201 HYuvContext *s = avctx->priv_data;
1205 av_freep(&s->bitstream_buffer);
1208 free_vlc(&s->vlc[i]);
1213 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1215 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1216 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1217 HYuvContext *s = avctx->priv_data;
1218 AVFrame *pict = data;
1219 const int width= s->width;
1220 const int width2= s->width>>1;
1221 const int height= s->height;
1222 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1223 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1224 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1225 AVFrame * const p= &s->picture;
1229 p->pict_type= FF_I_TYPE;
1234 generate_len_table(s->len[i], s->stats[i], 256);
1235 if(generate_bits_table(s->bits[i], s->len[i])<0)
1237 size+= store_table(s, s->len[i], &buf[size]);
1241 for(j=0; j<256; j++)
1242 s->stats[i][j] >>= 1;
1245 init_put_bits(&s->pb, buf+size, buf_size-size);
1247 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1248 int lefty, leftu, leftv, y, cy;
1250 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1251 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1252 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1253 put_bits(&s->pb, 8, p->data[0][0]);
1255 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1256 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1257 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1259 encode_422_bitstream(s, width-2);
1261 if(s->predictor==MEDIAN){
1262 int lefttopy, lefttopu, lefttopv;
1265 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1266 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1267 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1269 encode_422_bitstream(s, width);
1273 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1274 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1275 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1277 encode_422_bitstream(s, 4);
1279 lefttopy= p->data[0][3];
1280 lefttopu= p->data[1][1];
1281 lefttopv= p->data[2][1];
1282 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1283 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1284 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1285 encode_422_bitstream(s, width-4);
1288 for(; y<height; y++,cy++){
1289 uint8_t *ydst, *udst, *vdst;
1291 if(s->bitstream_bpp==12){
1293 ydst= p->data[0] + p->linesize[0]*y;
1294 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1295 encode_gray_bitstream(s, width);
1298 if(y>=height) break;
1300 ydst= p->data[0] + p->linesize[0]*y;
1301 udst= p->data[1] + p->linesize[1]*cy;
1302 vdst= p->data[2] + p->linesize[2]*cy;
1304 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1305 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1306 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1308 encode_422_bitstream(s, width);
1311 for(cy=y=1; y<height; y++,cy++){
1312 uint8_t *ydst, *udst, *vdst;
1314 /* encode a luma only line & y++ */
1315 if(s->bitstream_bpp==12){
1316 ydst= p->data[0] + p->linesize[0]*y;
1318 if(s->predictor == PLANE && s->interlaced < y){
1319 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1321 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1323 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1325 encode_gray_bitstream(s, width);
1327 if(y>=height) break;
1330 ydst= p->data[0] + p->linesize[0]*y;
1331 udst= p->data[1] + p->linesize[1]*cy;
1332 vdst= p->data[2] + p->linesize[2]*cy;
1334 if(s->predictor == PLANE && s->interlaced < cy){
1335 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1336 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1337 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1339 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1340 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1341 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1343 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1344 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1345 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1348 encode_422_bitstream(s, width);
1351 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1352 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1353 const int stride = -p->linesize[0];
1354 const int fake_stride = -fake_ystride;
1356 int leftr, leftg, leftb;
1358 put_bits(&s->pb, 8, leftr= data[R]);
1359 put_bits(&s->pb, 8, leftg= data[G]);
1360 put_bits(&s->pb, 8, leftb= data[B]);
1361 put_bits(&s->pb, 8, 0);
1363 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1364 encode_bgr_bitstream(s, width-1);
1366 for(y=1; y<s->height; y++){
1367 uint8_t *dst = data + y*stride;
1368 if(s->predictor == PLANE && s->interlaced < y){
1369 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1370 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1372 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1374 encode_bgr_bitstream(s, width);
1377 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1381 size+= (put_bits_count(&s->pb)+31)/8;
1382 put_bits(&s->pb, 16, 0);
1383 put_bits(&s->pb, 15, 0);
1386 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1388 char *p= avctx->stats_out;
1389 char *end= p + 1024*30;
1391 for(j=0; j<256; j++){
1392 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1396 snprintf(p, end-p, "\n");
1400 avctx->stats_out[0] = '\0';
1401 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1402 flush_put_bits(&s->pb);
1403 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1406 s->picture_number++;
1411 static av_cold int encode_end(AVCodecContext *avctx)
1413 HYuvContext *s = avctx->priv_data;
1417 av_freep(&avctx->extradata);
1418 av_freep(&avctx->stats_out);
1422 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1424 #if CONFIG_HUFFYUV_DECODER
1425 AVCodec huffyuv_decoder = {
1429 sizeof(HYuvContext),
1434 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1436 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1440 #if CONFIG_FFVHUFF_DECODER
1441 AVCodec ffvhuff_decoder = {
1445 sizeof(HYuvContext),
1450 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1452 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1456 #if CONFIG_HUFFYUV_ENCODER
1457 AVCodec huffyuv_encoder = {
1461 sizeof(HYuvContext),
1465 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1466 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1470 #if CONFIG_FFVHUFF_ENCODER
1471 AVCodec ffvhuff_encoder = {
1475 sizeof(HYuvContext),
1479 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1480 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),