2 * Intel Indeo 3 (IV31, IV32, etc.) video decoder for ffmpeg
3 * written, produced, and directed by Alan Smithee
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "bytestream.h"
31 #include "indeo3data.h"
38 unsigned int the_buf_size;
39 unsigned short y_w, y_h;
40 unsigned short uv_w, uv_h;
43 typedef struct Indeo3DecodeContext {
44 AVCodecContext *avctx;
54 uint8_t *corrector_type;
55 } Indeo3DecodeContext;
57 static const uint8_t corrector_type_0[24] = {
58 195, 159, 133, 115, 101, 93, 87, 77,
59 195, 159, 133, 115, 101, 93, 87, 77,
60 128, 79, 79, 79, 79, 79, 79, 79
63 static const uint8_t corrector_type_2[8] = { 9, 7, 6, 8, 5, 4, 3, 2 };
65 static av_cold int build_modpred(Indeo3DecodeContext *s)
69 if (!(s->ModPred = av_malloc(8 * 128)))
70 return AVERROR(ENOMEM);
72 for (i=0; i < 128; ++i) {
73 s->ModPred[i+0*128] = i > 126 ? 254 : 2*(i + 1 - ((i + 1) % 2));
74 s->ModPred[i+1*128] = i == 7 ? 20 :
76 i == 120 ? 236 : 2*(i + 2 - ((i + 1) % 3));
77 s->ModPred[i+2*128] = i > 125 ? 248 : 2*(i + 2 - ((i + 2) % 4));
78 s->ModPred[i+3*128] = 2*(i + 1 - ((i - 3) % 5));
79 s->ModPred[i+4*128] = i == 8 ? 20 : 2*(i + 1 - ((i - 3) % 6));
80 s->ModPred[i+5*128] = 2*(i + 4 - ((i + 3) % 7));
81 s->ModPred[i+6*128] = i > 123 ? 240 : 2*(i + 4 - ((i + 4) % 8));
82 s->ModPred[i+7*128] = 2*(i + 5 - ((i + 4) % 9));
85 if (!(s->corrector_type = av_malloc(24 * 256)))
86 return AVERROR(ENOMEM);
88 for (i=0; i < 24; ++i) {
89 for (j=0; j < 256; ++j) {
90 s->corrector_type[i*256+j] = j < corrector_type_0[i] ? 1 :
91 j < 248 || (i == 16 && j == 248) ? 0 :
92 corrector_type_2[j - 248];
99 static void iv_Decode_Chunk(Indeo3DecodeContext *s, uint8_t *cur,
100 uint8_t *ref, int width, int height, const uint8_t *buf1,
101 long fflags2, const uint8_t *hdr,
102 const uint8_t *buf2, int min_width_160);
104 static av_cold int iv_alloc_frames(Indeo3DecodeContext *s)
106 int luma_width, luma_height, luma_pixels, chroma_width, chroma_height,
108 unsigned int bufsize;
110 luma_width = (s->width + 3) & (~3);
111 luma_height = (s->height + 3) & (~3);
112 chroma_width = ((luma_width >> 2) + 3) & (~3);
113 chroma_height = ((luma_height>> 2) + 3) & (~3);
114 luma_pixels = luma_width * luma_height;
115 chroma_pixels = chroma_width * chroma_height;
117 bufsize = luma_pixels * 2 + luma_width * 3 +
118 (chroma_pixels + chroma_width) * 4;
120 if(!(s->buf = av_malloc(bufsize)))
121 return AVERROR(ENOMEM);
122 s->iv_frame[0].y_w = s->iv_frame[1].y_w = luma_width;
123 s->iv_frame[0].y_h = s->iv_frame[1].y_h = luma_height;
124 s->iv_frame[0].uv_w = s->iv_frame[1].uv_w = chroma_width;
125 s->iv_frame[0].uv_h = s->iv_frame[1].uv_h = chroma_height;
126 s->iv_frame[0].the_buf_size = bufsize;
128 s->iv_frame[0].Ybuf = s->buf + luma_width;
129 i = luma_pixels + luma_width * 2;
130 s->iv_frame[1].Ybuf = s->buf + i;
131 i += (luma_pixels + luma_width);
132 s->iv_frame[0].Ubuf = s->buf + i;
133 i += (chroma_pixels + chroma_width);
134 s->iv_frame[1].Ubuf = s->buf + i;
135 i += (chroma_pixels + chroma_width);
136 s->iv_frame[0].Vbuf = s->buf + i;
137 i += (chroma_pixels + chroma_width);
138 s->iv_frame[1].Vbuf = s->buf + i;
140 for(i = 1; i <= luma_width; i++)
141 s->iv_frame[0].Ybuf[-i] = s->iv_frame[1].Ybuf[-i] =
142 s->iv_frame[0].Ubuf[-i] = 0x80;
144 for(i = 1; i <= chroma_width; i++) {
145 s->iv_frame[1].Ubuf[-i] = 0x80;
146 s->iv_frame[0].Vbuf[-i] = 0x80;
147 s->iv_frame[1].Vbuf[-i] = 0x80;
148 s->iv_frame[1].Vbuf[chroma_pixels+i-1] = 0x80;
154 static av_cold void iv_free_func(Indeo3DecodeContext *s)
158 av_free(s->corrector_type);
161 static unsigned long iv_decode_frame(Indeo3DecodeContext *s,
162 const uint8_t *buf, int buf_size)
164 unsigned int hdr_width, hdr_height,
165 chroma_width, chroma_height;
166 unsigned long fflags1, fflags2, fflags3, offs1, offs2, offs3, offs;
167 const uint8_t *hdr_pos, *buf_pos;
172 fflags1 = bytestream_get_le16(&buf_pos);
173 fflags3 = bytestream_get_le32(&buf_pos);
174 fflags2 = *buf_pos++;
176 hdr_height = bytestream_get_le16(&buf_pos);
177 hdr_width = bytestream_get_le16(&buf_pos);
179 if(avcodec_check_dimensions(NULL, hdr_width, hdr_height))
182 chroma_height = ((hdr_height >> 2) + 3) & 0x7ffc;
183 chroma_width = ((hdr_width >> 2) + 3) & 0x7ffc;
184 offs1 = bytestream_get_le32(&buf_pos);
185 offs2 = bytestream_get_le32(&buf_pos);
186 offs3 = bytestream_get_le32(&buf_pos);
189 if(fflags3 == 0x80) return 4;
191 if(fflags1 & 0x200) {
192 s->cur_frame = s->iv_frame + 1;
193 s->ref_frame = s->iv_frame;
195 s->cur_frame = s->iv_frame;
196 s->ref_frame = s->iv_frame + 1;
199 buf_pos = buf + 16 + offs1;
200 offs = bytestream_get_le32(&buf_pos);
202 iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, hdr_width,
203 hdr_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos,
204 FFMIN(hdr_width, 160));
206 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
209 buf_pos = buf + 16 + offs2;
210 offs = bytestream_get_le32(&buf_pos);
212 iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width,
213 chroma_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos,
214 FFMIN(chroma_width, 40));
216 buf_pos = buf + 16 + offs3;
217 offs = bytestream_get_le32(&buf_pos);
219 iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width,
220 chroma_height, buf_pos + offs * 2, fflags2, hdr_pos, buf_pos,
221 FFMIN(chroma_width, 40));
234 long split_direction;
239 #define LV1_CHECK(buf1,rle_v3,lv1,lp2) \
240 if((lv1 & 0x80) != 0) { \
251 #define RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) \
264 #define LP2_CHECK(buf1,rle_v3,lp2) \
265 if(lp2 == 0 && rle_v3 != 0) \
273 #define RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) \
281 static void iv_Decode_Chunk(Indeo3DecodeContext *s,
282 uint8_t *cur, uint8_t *ref, int width, int height,
283 const uint8_t *buf1, long fflags2, const uint8_t *hdr,
284 const uint8_t *buf2, int min_width_160)
287 unsigned long bit_pos, lv, lv1, lv2;
288 long *width_tbl, width_tbl_arr[10];
289 const signed char *ref_vectors;
290 uint8_t *cur_frm_pos, *ref_frm_pos, *cp, *cp2;
291 uint32_t *cur_lp, *ref_lp;
292 const uint32_t *correction_lp[2], *correctionloworder_lp[2], *correctionhighorder_lp[2];
293 uint8_t *correction_type_sp[2];
294 ustr_t strip_tbl[20], *strip;
295 int i, j, k, lp1, lp2, flag1, cmd, blks_width, blks_height, region_160_width,
296 rle_v1, rle_v2, rle_v3;
302 width_tbl = width_tbl_arr + 1;
303 i = (width < 0 ? width + 3 : width)/4;
304 for(j = -1; j < 8; j++)
305 width_tbl[j] = i * j;
309 for(region_160_width = 0; region_160_width < (width - min_width_160); region_160_width += min_width_160);
311 strip->ypos = strip->xpos = 0;
312 for(strip->width = min_width_160; width > strip->width; strip->width *= 2);
313 strip->height = height;
314 strip->split_direction = 0;
315 strip->split_flag = 0;
320 rle_v1 = rle_v2 = rle_v3 = 0;
322 while(strip >= strip_tbl) {
329 cmd = (bit_buf >> bit_pos) & 0x03;
333 memcpy(strip, strip-1, sizeof(ustr_t));
334 strip->split_flag = 1;
335 strip->split_direction = 0;
336 strip->height = (strip->height > 8 ? ((strip->height+8)>>4)<<3 : 4);
338 } else if(cmd == 1) {
340 memcpy(strip, strip-1, sizeof(ustr_t));
341 strip->split_flag = 1;
342 strip->split_direction = 1;
343 strip->width = (strip->width > 8 ? ((strip->width+8)>>4)<<3 : 4);
345 } else if(cmd == 2) {
346 if(strip->usl7 == 0) {
351 } else if(cmd == 3) {
352 if(strip->usl7 == 0) {
354 ref_vectors = (const signed char*)buf2 + (*buf1 * 2);
360 cur_frm_pos = cur + width * strip->ypos + strip->xpos;
362 if((blks_width = strip->width) < 0)
365 blks_height = strip->height;
367 if(ref_vectors != NULL) {
368 ref_frm_pos = ref + (ref_vectors[0] + strip->ypos) * width +
369 ref_vectors[1] + strip->xpos;
371 ref_frm_pos = cur_frm_pos - width_tbl[4];
380 cmd = (bit_buf >> bit_pos) & 0x03;
382 if(cmd == 0 || ref_vectors != NULL) {
383 for(lp1 = 0; lp1 < blks_width; lp1++) {
384 for(i = 0, j = 0; i < blks_height; i++, j += width_tbl[1])
385 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
397 if((lv - 8) <= 7 && (k == 0 || k == 3 || k == 10)) {
398 cp2 = s->ModPred + ((lv - 8) << 7);
400 for(i = 0; i < blks_width << 2; i++) {
406 if(k == 1 || k == 4) {
407 lv = (hdr[j] & 0xf) + fflags2;
408 correction_type_sp[0] = s->corrector_type + (lv << 8);
409 correction_lp[0] = correction + (lv << 8);
410 lv = (hdr[j] >> 4) + fflags2;
411 correction_lp[1] = correction + (lv << 8);
412 correction_type_sp[1] = s->corrector_type + (lv << 8);
414 correctionloworder_lp[0] = correctionloworder_lp[1] = correctionloworder + (lv << 8);
415 correctionhighorder_lp[0] = correctionhighorder_lp[1] = correctionhighorder + (lv << 8);
416 correction_type_sp[0] = correction_type_sp[1] = s->corrector_type + (lv << 8);
417 correction_lp[0] = correction_lp[1] = correction + (lv << 8);
422 case 0: /********** CASE 0 **********/
423 for( ; blks_height > 0; blks_height -= 4) {
424 for(lp1 = 0; lp1 < blks_width; lp1++) {
425 for(lp2 = 0; lp2 < 4; ) {
427 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2];
428 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2];
430 switch(correction_type_sp[0][k]) {
432 *cur_lp = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
436 res = ((le2me_16(((unsigned short *)(ref_lp))[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
437 ((unsigned short *)cur_lp)[0] = le2me_16(res);
438 res = ((le2me_16(((unsigned short *)(ref_lp))[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
439 ((unsigned short *)cur_lp)[1] = le2me_16(res);
445 for(i = 0, j = 0; i < 2; i++, j += width_tbl[1])
446 cur_lp[j] = ref_lp[j];
452 for(i = 0, j = 0; i < (3 - lp2); i++, j += width_tbl[1])
453 cur_lp[j] = ref_lp[j];
459 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
461 if(rle_v1 == 1 || ref_vectors != NULL) {
462 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
463 cur_lp[j] = ref_lp[j];
466 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
473 LP2_CHECK(buf1,rle_v3,lp2)
475 for(i = 0, j = 0; i < (4 - lp2); i++, j += width_tbl[1])
476 cur_lp[j] = ref_lp[j];
488 if(ref_vectors != NULL) {
489 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
490 cur_lp[j] = ref_lp[j];
497 lv = (lv1 & 0x7F) << 1;
500 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
503 LV1_CHECK(buf1,rle_v3,lv1,lp2)
514 cur_frm_pos += ((width - blks_width) * 4);
515 ref_frm_pos += ((width - blks_width) * 4);
520 case 3: /********** CASE 3 **********/
521 if(ref_vectors != NULL)
525 for( ; blks_height > 0; blks_height -= 8) {
526 for(lp1 = 0; lp1 < blks_width; lp1++) {
527 for(lp2 = 0; lp2 < 4; ) {
530 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
531 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
533 switch(correction_type_sp[lp2 & 0x01][k]) {
535 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
536 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
537 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
539 cur_lp[0] = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
544 res = ((le2me_16(((unsigned short *)ref_lp)[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
545 ((unsigned short *)cur_lp)[width_tbl[2]] = le2me_16(res);
546 res = ((le2me_16(((unsigned short *)ref_lp)[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
547 ((unsigned short *)cur_lp)[width_tbl[2]+1] = le2me_16(res);
549 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
550 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
552 cur_lp[0] = cur_lp[width_tbl[1]];
559 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
567 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
589 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
592 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
593 cur_lp[j] = ref_lp[j];
596 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
599 rle_v2 = (*buf1) - 1;
603 LP2_CHECK(buf1,rle_v3,lp2)
605 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
611 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
613 lv = (lv1 & 0x7F) << 1;
617 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
620 LV1_CHECK(buf1,rle_v3,lv1,lp2)
631 cur_frm_pos += (((width * 2) - blks_width) * 4);
636 case 10: /********** CASE 10 **********/
637 if(ref_vectors == NULL) {
640 for( ; blks_height > 0; blks_height -= 8) {
641 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
642 for(lp2 = 0; lp2 < 4; ) {
644 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
645 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
648 if(lp2 == 0 && flag1 != 0) {
649 #ifdef WORDS_BIGENDIAN
650 lv1 = lv1 & 0xFF00FF00;
651 lv1 = (lv1 >> 8) | lv1;
652 lv2 = lv2 & 0xFF00FF00;
653 lv2 = (lv2 >> 8) | lv2;
655 lv1 = lv1 & 0x00FF00FF;
656 lv1 = (lv1 << 8) | lv1;
657 lv2 = lv2 & 0x00FF00FF;
658 lv2 = (lv2 << 8) | lv2;
662 switch(correction_type_sp[lp2 & 0x01][k]) {
664 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
665 cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(lv2) >> 1) + correctionhighorder_lp[lp2 & 0x01][k]) << 1);
666 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
667 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
668 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
670 cur_lp[0] = cur_lp[width_tbl[1]];
671 cur_lp[1] = cur_lp[width_tbl[1]+1];
677 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][*buf1]) << 1);
678 cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(lv2) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
679 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
680 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
681 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
683 cur_lp[0] = cur_lp[width_tbl[1]];
684 cur_lp[1] = cur_lp[width_tbl[1]+1];
693 for(i = 0, j = width_tbl[1]; i < 3; i++, j += width_tbl[1]) {
697 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
698 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
700 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
711 if(lp2 == 0 && flag1 != 0) {
712 for(i = 0, j = width_tbl[1]; i < 5; i++, j += width_tbl[1]) {
716 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
717 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
719 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
730 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
733 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
737 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
738 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
740 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
746 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
750 rle_v2 = (*buf1) - 1;
753 LP2_CHECK(buf1,rle_v3,lp2)
755 if(lp2 == 0 && flag1 != 0) {
756 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
760 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
761 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
763 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
788 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
790 lv = (lv1 & 0x7F) << 1;
793 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
795 LV1_CHECK(buf1,rle_v3,lv1,lp2)
806 cur_frm_pos += (((width * 2) - blks_width) * 4);
810 for( ; blks_height > 0; blks_height -= 8) {
811 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
812 for(lp2 = 0; lp2 < 4; ) {
814 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
815 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
817 switch(correction_type_sp[lp2 & 0x01][k]) {
819 lv1 = correctionloworder_lp[lp2 & 0x01][k];
820 lv2 = correctionhighorder_lp[lp2 & 0x01][k];
821 cur_lp[0] = le2me_32(((le2me_32(ref_lp[0]) >> 1) + lv1) << 1);
822 cur_lp[1] = le2me_32(((le2me_32(ref_lp[1]) >> 1) + lv2) << 1);
823 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
824 cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
829 lv1 = correctionloworder_lp[lp2 & 0x01][*buf1++];
830 lv2 = correctionloworder_lp[lp2 & 0x01][k];
831 cur_lp[0] = le2me_32(((le2me_32(ref_lp[0]) >> 1) + lv1) << 1);
832 cur_lp[1] = le2me_32(((le2me_32(ref_lp[1]) >> 1) + lv2) << 1);
833 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
834 cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
840 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
841 cur_lp[j] = ref_lp[j];
842 cur_lp[j+1] = ref_lp[j+1];
850 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
851 cur_lp[j] = ref_lp[j];
852 cur_lp[j+1] = ref_lp[j+1];
860 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
861 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
862 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
863 ((uint32_t *)cur_frm_pos)[j+1] = ((uint32_t *)ref_frm_pos)[j+1];
865 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
869 rle_v2 = (*buf1) - 1;
873 LP2_CHECK(buf1,rle_v3,lp2)
876 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
877 cur_lp[j] = ref_lp[j];
878 cur_lp[j+1] = ref_lp[j+1];
884 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
886 lv = (lv1 & 0x7F) << 1;
889 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
890 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)cur_frm_pos)[j+1] = lv;
891 LV1_CHECK(buf1,rle_v3,lv1,lp2)
903 cur_frm_pos += (((width * 2) - blks_width) * 4);
904 ref_frm_pos += (((width * 2) - blks_width) * 4);
909 case 11: /********** CASE 11 **********/
910 if(ref_vectors == NULL)
913 for( ; blks_height > 0; blks_height -= 8) {
914 for(lp1 = 0; lp1 < blks_width; lp1++) {
915 for(lp2 = 0; lp2 < 4; ) {
917 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
918 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
920 switch(correction_type_sp[lp2 & 0x01][k]) {
922 cur_lp[0] = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
923 cur_lp[width_tbl[1]] = le2me_32(((le2me_32(ref_lp[width_tbl[1]]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
928 lv1 = (unsigned short)(correction_lp[lp2 & 0x01][*buf1++]);
929 lv2 = (unsigned short)(correction_lp[lp2 & 0x01][k]);
930 res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[0]) >> 1) + lv1) << 1);
931 ((unsigned short *)cur_lp)[0] = le2me_16(res);
932 res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[1]) >> 1) + lv2) << 1);
933 ((unsigned short *)cur_lp)[1] = le2me_16(res);
934 res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[width_tbl[2]]) >> 1) + lv1) << 1);
935 ((unsigned short *)cur_lp)[width_tbl[2]] = le2me_16(res);
936 res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[width_tbl[2]+1]) >> 1) + lv2) << 1);
937 ((unsigned short *)cur_lp)[width_tbl[2]+1] = le2me_16(res);
943 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
944 cur_lp[j] = ref_lp[j];
951 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
952 cur_lp[j] = ref_lp[j];
959 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
961 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
962 cur_lp[j] = ref_lp[j];
964 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
968 rle_v2 = (*buf1) - 1;
972 LP2_CHECK(buf1,rle_v3,lp2)
975 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
976 cur_lp[j] = ref_lp[j];
981 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
983 lv = (lv1 & 0x7F) << 1;
986 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
988 LV1_CHECK(buf1,rle_v3,lv1,lp2)
1000 cur_frm_pos += (((width * 2) - blks_width) * 4);
1001 ref_frm_pos += (((width * 2) - blks_width) * 4);
1010 if(strip < strip_tbl)
1013 for( ; strip >= strip_tbl; strip--) {
1014 if(strip->split_flag != 0) {
1015 strip->split_flag = 0;
1016 strip->usl7 = (strip-1)->usl7;
1018 if(strip->split_direction) {
1019 strip->xpos += strip->width;
1020 strip->width = (strip-1)->width - strip->width;
1021 if(region_160_width <= strip->xpos && width < strip->width + strip->xpos)
1022 strip->width = width - strip->xpos;
1024 strip->ypos += strip->height;
1025 strip->height = (strip-1)->height - strip->height;
1033 static av_cold int indeo3_decode_init(AVCodecContext *avctx)
1035 Indeo3DecodeContext *s = avctx->priv_data;
1039 s->width = avctx->width;
1040 s->height = avctx->height;
1041 avctx->pix_fmt = PIX_FMT_YUV410P;
1043 if (!(ret = build_modpred(s)))
1044 ret = iv_alloc_frames(s);
1051 static int indeo3_decode_frame(AVCodecContext *avctx,
1052 void *data, int *data_size,
1053 const uint8_t *buf, int buf_size)
1055 Indeo3DecodeContext *s=avctx->priv_data;
1056 uint8_t *src, *dest;
1059 iv_decode_frame(s, buf, buf_size);
1061 if(s->frame.data[0])
1062 avctx->release_buffer(avctx, &s->frame);
1064 s->frame.reference = 0;
1065 if(avctx->get_buffer(avctx, &s->frame) < 0) {
1066 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1070 src = s->cur_frame->Ybuf;
1071 dest = s->frame.data[0];
1072 for (y = 0; y < s->height; y++) {
1073 memcpy(dest, src, s->cur_frame->y_w);
1074 src += s->cur_frame->y_w;
1075 dest += s->frame.linesize[0];
1078 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
1080 src = s->cur_frame->Ubuf;
1081 dest = s->frame.data[1];
1082 for (y = 0; y < s->height / 4; y++) {
1083 memcpy(dest, src, s->cur_frame->uv_w);
1084 src += s->cur_frame->uv_w;
1085 dest += s->frame.linesize[1];
1088 src = s->cur_frame->Vbuf;
1089 dest = s->frame.data[2];
1090 for (y = 0; y < s->height / 4; y++) {
1091 memcpy(dest, src, s->cur_frame->uv_w);
1092 src += s->cur_frame->uv_w;
1093 dest += s->frame.linesize[2];
1097 *data_size=sizeof(AVFrame);
1098 *(AVFrame*)data= s->frame;
1103 static av_cold int indeo3_decode_end(AVCodecContext *avctx)
1105 Indeo3DecodeContext *s = avctx->priv_data;
1112 AVCodec indeo3_decoder = {
1116 sizeof(Indeo3DecodeContext),
1120 indeo3_decode_frame,
1123 .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"),