2 * MMX optimized DSP utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
25 #include "../dsputil.h"
26 #include "../simple_idct.h"
27 #include "../mpegvideo.h"
34 extern void ff_idct_xvid_mmx(short *block);
35 extern void ff_idct_xvid_mmx2(short *block);
37 int mm_flags; /* multimedia extension flags */
39 /* pixel operations */
40 static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
41 static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
42 static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
44 static const uint64_t ff_pdw_80000000[2] attribute_used __attribute__ ((aligned(16))) =
45 {0x8000000080000000ULL, 0x8000000080000000ULL};
47 static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
48 static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
49 static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
50 static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
51 static const uint64_t ff_pw_8 attribute_used __attribute__ ((aligned(8))) = 0x0008000800080008ULL;
52 static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
53 static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
54 static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
55 static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
57 static const uint64_t ff_pb_1 attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
58 static const uint64_t ff_pb_3 attribute_used __attribute__ ((aligned(8))) = 0x0303030303030303ULL;
59 static const uint64_t ff_pb_7 attribute_used __attribute__ ((aligned(8))) = 0x0707070707070707ULL;
60 static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
61 static const uint64_t ff_pb_A1 attribute_used __attribute__ ((aligned(8))) = 0xA1A1A1A1A1A1A1A1ULL;
62 static const uint64_t ff_pb_5F attribute_used __attribute__ ((aligned(8))) = 0x5F5F5F5F5F5F5F5FULL;
63 static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
65 #define JUMPALIGN() __asm __volatile (ASMALIGN(3)::)
66 #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
68 #define MOVQ_WONE(regd) \
70 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
71 "psrlw $15, %%" #regd ::)
73 #define MOVQ_BFE(regd) \
75 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
76 "paddb %%" #regd ", %%" #regd " \n\t" ::)
79 #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
80 #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
82 // for shared library it's better to use this way for accessing constants
84 #define MOVQ_BONE(regd) \
86 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
87 "psrlw $15, %%" #regd " \n\t" \
88 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
90 #define MOVQ_WTWO(regd) \
92 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
93 "psrlw $15, %%" #regd " \n\t" \
94 "psllw $1, %%" #regd " \n\t"::)
98 // using regr as temporary and for the output result
99 // first argument is unmodifed and second is trashed
100 // regfe is supposed to contain 0xfefefefefefefefe
101 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
102 "movq " #rega ", " #regr " \n\t"\
103 "pand " #regb ", " #regr " \n\t"\
104 "pxor " #rega ", " #regb " \n\t"\
105 "pand " #regfe "," #regb " \n\t"\
106 "psrlq $1, " #regb " \n\t"\
107 "paddb " #regb ", " #regr " \n\t"
109 #define PAVGB_MMX(rega, regb, regr, regfe) \
110 "movq " #rega ", " #regr " \n\t"\
111 "por " #regb ", " #regr " \n\t"\
112 "pxor " #rega ", " #regb " \n\t"\
113 "pand " #regfe "," #regb " \n\t"\
114 "psrlq $1, " #regb " \n\t"\
115 "psubb " #regb ", " #regr " \n\t"
117 // mm6 is supposed to contain 0xfefefefefefefefe
118 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
119 "movq " #rega ", " #regr " \n\t"\
120 "movq " #regc ", " #regp " \n\t"\
121 "pand " #regb ", " #regr " \n\t"\
122 "pand " #regd ", " #regp " \n\t"\
123 "pxor " #rega ", " #regb " \n\t"\
124 "pxor " #regc ", " #regd " \n\t"\
125 "pand %%mm6, " #regb " \n\t"\
126 "pand %%mm6, " #regd " \n\t"\
127 "psrlq $1, " #regb " \n\t"\
128 "psrlq $1, " #regd " \n\t"\
129 "paddb " #regb ", " #regr " \n\t"\
130 "paddb " #regd ", " #regp " \n\t"
132 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
133 "movq " #rega ", " #regr " \n\t"\
134 "movq " #regc ", " #regp " \n\t"\
135 "por " #regb ", " #regr " \n\t"\
136 "por " #regd ", " #regp " \n\t"\
137 "pxor " #rega ", " #regb " \n\t"\
138 "pxor " #regc ", " #regd " \n\t"\
139 "pand %%mm6, " #regb " \n\t"\
140 "pand %%mm6, " #regd " \n\t"\
141 "psrlq $1, " #regd " \n\t"\
142 "psrlq $1, " #regb " \n\t"\
143 "psubb " #regb ", " #regr " \n\t"\
144 "psubb " #regd ", " #regp " \n\t"
146 /***********************************/
147 /* MMX no rounding */
148 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
149 #define SET_RND MOVQ_WONE
150 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
151 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
153 #include "dsputil_mmx_rnd.h"
159 /***********************************/
162 #define DEF(x, y) x ## _ ## y ##_mmx
163 #define SET_RND MOVQ_WTWO
164 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
165 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
167 #include "dsputil_mmx_rnd.h"
174 /***********************************/
177 #define DEF(x) x ## _3dnow
178 /* for Athlons PAVGUSB is preferred */
179 #define PAVGB "pavgusb"
181 #include "dsputil_mmx_avg.h"
186 /***********************************/
189 #define DEF(x) x ## _mmx2
191 /* Introduced only in MMX2 set */
192 #define PAVGB "pavgb"
194 #include "dsputil_mmx_avg.h"
199 #define SBUTTERFLY(a,b,t,n)\
200 "movq " #a ", " #t " \n\t" /* abcd */\
201 "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
202 "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
204 /***********************************/
207 #ifdef CONFIG_ENCODERS
208 static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
211 "mov $-128, %%"REG_a" \n\t"
212 "pxor %%mm7, %%mm7 \n\t"
215 "movq (%0), %%mm0 \n\t"
216 "movq (%0, %2), %%mm2 \n\t"
217 "movq %%mm0, %%mm1 \n\t"
218 "movq %%mm2, %%mm3 \n\t"
219 "punpcklbw %%mm7, %%mm0 \n\t"
220 "punpckhbw %%mm7, %%mm1 \n\t"
221 "punpcklbw %%mm7, %%mm2 \n\t"
222 "punpckhbw %%mm7, %%mm3 \n\t"
223 "movq %%mm0, (%1, %%"REG_a") \n\t"
224 "movq %%mm1, 8(%1, %%"REG_a") \n\t"
225 "movq %%mm2, 16(%1, %%"REG_a") \n\t"
226 "movq %%mm3, 24(%1, %%"REG_a") \n\t"
228 "add $32, %%"REG_a" \n\t"
231 : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
236 static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
239 "pxor %%mm7, %%mm7 \n\t"
240 "mov $-128, %%"REG_a" \n\t"
243 "movq (%0), %%mm0 \n\t"
244 "movq (%1), %%mm2 \n\t"
245 "movq %%mm0, %%mm1 \n\t"
246 "movq %%mm2, %%mm3 \n\t"
247 "punpcklbw %%mm7, %%mm0 \n\t"
248 "punpckhbw %%mm7, %%mm1 \n\t"
249 "punpcklbw %%mm7, %%mm2 \n\t"
250 "punpckhbw %%mm7, %%mm3 \n\t"
251 "psubw %%mm2, %%mm0 \n\t"
252 "psubw %%mm3, %%mm1 \n\t"
253 "movq %%mm0, (%2, %%"REG_a") \n\t"
254 "movq %%mm1, 8(%2, %%"REG_a") \n\t"
257 "add $16, %%"REG_a" \n\t"
259 : "+r" (s1), "+r" (s2)
260 : "r" (block+64), "r" ((long)stride)
264 #endif //CONFIG_ENCODERS
266 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
271 /* read the pixels */
276 "movq %3, %%mm0 \n\t"
277 "movq 8%3, %%mm1 \n\t"
278 "movq 16%3, %%mm2 \n\t"
279 "movq 24%3, %%mm3 \n\t"
280 "movq 32%3, %%mm4 \n\t"
281 "movq 40%3, %%mm5 \n\t"
282 "movq 48%3, %%mm6 \n\t"
283 "movq 56%3, %%mm7 \n\t"
284 "packuswb %%mm1, %%mm0 \n\t"
285 "packuswb %%mm3, %%mm2 \n\t"
286 "packuswb %%mm5, %%mm4 \n\t"
287 "packuswb %%mm7, %%mm6 \n\t"
288 "movq %%mm0, (%0) \n\t"
289 "movq %%mm2, (%0, %1) \n\t"
290 "movq %%mm4, (%0, %1, 2) \n\t"
291 "movq %%mm6, (%0, %2) \n\t"
292 ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
297 // if here would be an exact copy of the code above
298 // compiler would generate some very strange code
301 "movq (%3), %%mm0 \n\t"
302 "movq 8(%3), %%mm1 \n\t"
303 "movq 16(%3), %%mm2 \n\t"
304 "movq 24(%3), %%mm3 \n\t"
305 "movq 32(%3), %%mm4 \n\t"
306 "movq 40(%3), %%mm5 \n\t"
307 "movq 48(%3), %%mm6 \n\t"
308 "movq 56(%3), %%mm7 \n\t"
309 "packuswb %%mm1, %%mm0 \n\t"
310 "packuswb %%mm3, %%mm2 \n\t"
311 "packuswb %%mm5, %%mm4 \n\t"
312 "packuswb %%mm7, %%mm6 \n\t"
313 "movq %%mm0, (%0) \n\t"
314 "movq %%mm2, (%0, %1) \n\t"
315 "movq %%mm4, (%0, %1, 2) \n\t"
316 "movq %%mm6, (%0, %2) \n\t"
317 ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
321 static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
322 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
324 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
328 movq_m2r(*vector128, mm1);
329 for (i = 0; i < 8; i++) {
330 movq_m2r(*(block), mm0);
331 packsswb_m2r(*(block + 4), mm0);
334 movq_r2m(mm0, *pixels);
339 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
345 /* read the pixels */
352 "movq (%2), %%mm0 \n\t"
353 "movq 8(%2), %%mm1 \n\t"
354 "movq 16(%2), %%mm2 \n\t"
355 "movq 24(%2), %%mm3 \n\t"
356 "movq %0, %%mm4 \n\t"
357 "movq %1, %%mm6 \n\t"
358 "movq %%mm4, %%mm5 \n\t"
359 "punpcklbw %%mm7, %%mm4 \n\t"
360 "punpckhbw %%mm7, %%mm5 \n\t"
361 "paddsw %%mm4, %%mm0 \n\t"
362 "paddsw %%mm5, %%mm1 \n\t"
363 "movq %%mm6, %%mm5 \n\t"
364 "punpcklbw %%mm7, %%mm6 \n\t"
365 "punpckhbw %%mm7, %%mm5 \n\t"
366 "paddsw %%mm6, %%mm2 \n\t"
367 "paddsw %%mm5, %%mm3 \n\t"
368 "packuswb %%mm1, %%mm0 \n\t"
369 "packuswb %%mm3, %%mm2 \n\t"
370 "movq %%mm0, %0 \n\t"
371 "movq %%mm2, %1 \n\t"
372 :"+m"(*pix), "+m"(*(pix+line_size))
380 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
383 "lea (%3, %3), %%"REG_a" \n\t"
386 "movd (%1), %%mm0 \n\t"
387 "movd (%1, %3), %%mm1 \n\t"
388 "movd %%mm0, (%2) \n\t"
389 "movd %%mm1, (%2, %3) \n\t"
390 "add %%"REG_a", %1 \n\t"
391 "add %%"REG_a", %2 \n\t"
392 "movd (%1), %%mm0 \n\t"
393 "movd (%1, %3), %%mm1 \n\t"
394 "movd %%mm0, (%2) \n\t"
395 "movd %%mm1, (%2, %3) \n\t"
396 "add %%"REG_a", %1 \n\t"
397 "add %%"REG_a", %2 \n\t"
400 : "+g"(h), "+r" (pixels), "+r" (block)
401 : "r"((long)line_size)
406 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
409 "lea (%3, %3), %%"REG_a" \n\t"
412 "movq (%1), %%mm0 \n\t"
413 "movq (%1, %3), %%mm1 \n\t"
414 "movq %%mm0, (%2) \n\t"
415 "movq %%mm1, (%2, %3) \n\t"
416 "add %%"REG_a", %1 \n\t"
417 "add %%"REG_a", %2 \n\t"
418 "movq (%1), %%mm0 \n\t"
419 "movq (%1, %3), %%mm1 \n\t"
420 "movq %%mm0, (%2) \n\t"
421 "movq %%mm1, (%2, %3) \n\t"
422 "add %%"REG_a", %1 \n\t"
423 "add %%"REG_a", %2 \n\t"
426 : "+g"(h), "+r" (pixels), "+r" (block)
427 : "r"((long)line_size)
432 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
435 "lea (%3, %3), %%"REG_a" \n\t"
438 "movq (%1), %%mm0 \n\t"
439 "movq 8(%1), %%mm4 \n\t"
440 "movq (%1, %3), %%mm1 \n\t"
441 "movq 8(%1, %3), %%mm5 \n\t"
442 "movq %%mm0, (%2) \n\t"
443 "movq %%mm4, 8(%2) \n\t"
444 "movq %%mm1, (%2, %3) \n\t"
445 "movq %%mm5, 8(%2, %3) \n\t"
446 "add %%"REG_a", %1 \n\t"
447 "add %%"REG_a", %2 \n\t"
448 "movq (%1), %%mm0 \n\t"
449 "movq 8(%1), %%mm4 \n\t"
450 "movq (%1, %3), %%mm1 \n\t"
451 "movq 8(%1, %3), %%mm5 \n\t"
452 "movq %%mm0, (%2) \n\t"
453 "movq %%mm4, 8(%2) \n\t"
454 "movq %%mm1, (%2, %3) \n\t"
455 "movq %%mm5, 8(%2, %3) \n\t"
456 "add %%"REG_a", %1 \n\t"
457 "add %%"REG_a", %2 \n\t"
460 : "+g"(h), "+r" (pixels), "+r" (block)
461 : "r"((long)line_size)
466 static void clear_blocks_mmx(DCTELEM *blocks)
469 "pxor %%mm7, %%mm7 \n\t"
470 "mov $-128*6, %%"REG_a" \n\t"
472 "movq %%mm7, (%0, %%"REG_a") \n\t"
473 "movq %%mm7, 8(%0, %%"REG_a") \n\t"
474 "movq %%mm7, 16(%0, %%"REG_a") \n\t"
475 "movq %%mm7, 24(%0, %%"REG_a") \n\t"
476 "add $32, %%"REG_a" \n\t"
478 : : "r" (((uint8_t *)blocks)+128*6)
483 #ifdef CONFIG_ENCODERS
484 static int pix_sum16_mmx(uint8_t * pix, int line_size){
487 long index= -line_size*h;
490 "pxor %%mm7, %%mm7 \n\t"
491 "pxor %%mm6, %%mm6 \n\t"
493 "movq (%2, %1), %%mm0 \n\t"
494 "movq (%2, %1), %%mm1 \n\t"
495 "movq 8(%2, %1), %%mm2 \n\t"
496 "movq 8(%2, %1), %%mm3 \n\t"
497 "punpcklbw %%mm7, %%mm0 \n\t"
498 "punpckhbw %%mm7, %%mm1 \n\t"
499 "punpcklbw %%mm7, %%mm2 \n\t"
500 "punpckhbw %%mm7, %%mm3 \n\t"
501 "paddw %%mm0, %%mm1 \n\t"
502 "paddw %%mm2, %%mm3 \n\t"
503 "paddw %%mm1, %%mm3 \n\t"
504 "paddw %%mm3, %%mm6 \n\t"
507 "movq %%mm6, %%mm5 \n\t"
508 "psrlq $32, %%mm6 \n\t"
509 "paddw %%mm5, %%mm6 \n\t"
510 "movq %%mm6, %%mm5 \n\t"
511 "psrlq $16, %%mm6 \n\t"
512 "paddw %%mm5, %%mm6 \n\t"
513 "movd %%mm6, %0 \n\t"
514 "andl $0xFFFF, %0 \n\t"
515 : "=&r" (sum), "+r" (index)
516 : "r" (pix - index), "r" ((long)line_size)
521 #endif //CONFIG_ENCODERS
523 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
527 "movq (%1, %0), %%mm0 \n\t"
528 "movq (%2, %0), %%mm1 \n\t"
529 "paddb %%mm0, %%mm1 \n\t"
530 "movq %%mm1, (%2, %0) \n\t"
531 "movq 8(%1, %0), %%mm0 \n\t"
532 "movq 8(%2, %0), %%mm1 \n\t"
533 "paddb %%mm0, %%mm1 \n\t"
534 "movq %%mm1, 8(%2, %0) \n\t"
539 : "r"(src), "r"(dst), "r"((long)w-15)
542 dst[i+0] += src[i+0];
545 #define H263_LOOP_FILTER \
546 "pxor %%mm7, %%mm7 \n\t"\
547 "movq %0, %%mm0 \n\t"\
548 "movq %0, %%mm1 \n\t"\
549 "movq %3, %%mm2 \n\t"\
550 "movq %3, %%mm3 \n\t"\
551 "punpcklbw %%mm7, %%mm0 \n\t"\
552 "punpckhbw %%mm7, %%mm1 \n\t"\
553 "punpcklbw %%mm7, %%mm2 \n\t"\
554 "punpckhbw %%mm7, %%mm3 \n\t"\
555 "psubw %%mm2, %%mm0 \n\t"\
556 "psubw %%mm3, %%mm1 \n\t"\
557 "movq %1, %%mm2 \n\t"\
558 "movq %1, %%mm3 \n\t"\
559 "movq %2, %%mm4 \n\t"\
560 "movq %2, %%mm5 \n\t"\
561 "punpcklbw %%mm7, %%mm2 \n\t"\
562 "punpckhbw %%mm7, %%mm3 \n\t"\
563 "punpcklbw %%mm7, %%mm4 \n\t"\
564 "punpckhbw %%mm7, %%mm5 \n\t"\
565 "psubw %%mm2, %%mm4 \n\t"\
566 "psubw %%mm3, %%mm5 \n\t"\
567 "psllw $2, %%mm4 \n\t"\
568 "psllw $2, %%mm5 \n\t"\
569 "paddw %%mm0, %%mm4 \n\t"\
570 "paddw %%mm1, %%mm5 \n\t"\
571 "pxor %%mm6, %%mm6 \n\t"\
572 "pcmpgtw %%mm4, %%mm6 \n\t"\
573 "pcmpgtw %%mm5, %%mm7 \n\t"\
574 "pxor %%mm6, %%mm4 \n\t"\
575 "pxor %%mm7, %%mm5 \n\t"\
576 "psubw %%mm6, %%mm4 \n\t"\
577 "psubw %%mm7, %%mm5 \n\t"\
578 "psrlw $3, %%mm4 \n\t"\
579 "psrlw $3, %%mm5 \n\t"\
580 "packuswb %%mm5, %%mm4 \n\t"\
581 "packsswb %%mm7, %%mm6 \n\t"\
582 "pxor %%mm7, %%mm7 \n\t"\
583 "movd %4, %%mm2 \n\t"\
584 "punpcklbw %%mm2, %%mm2 \n\t"\
585 "punpcklbw %%mm2, %%mm2 \n\t"\
586 "punpcklbw %%mm2, %%mm2 \n\t"\
587 "psubusb %%mm4, %%mm2 \n\t"\
588 "movq %%mm2, %%mm3 \n\t"\
589 "psubusb %%mm4, %%mm3 \n\t"\
590 "psubb %%mm3, %%mm2 \n\t"\
591 "movq %1, %%mm3 \n\t"\
592 "movq %2, %%mm4 \n\t"\
593 "pxor %%mm6, %%mm3 \n\t"\
594 "pxor %%mm6, %%mm4 \n\t"\
595 "paddusb %%mm2, %%mm3 \n\t"\
596 "psubusb %%mm2, %%mm4 \n\t"\
597 "pxor %%mm6, %%mm3 \n\t"\
598 "pxor %%mm6, %%mm4 \n\t"\
599 "paddusb %%mm2, %%mm2 \n\t"\
600 "packsswb %%mm1, %%mm0 \n\t"\
601 "pcmpgtb %%mm0, %%mm7 \n\t"\
602 "pxor %%mm7, %%mm0 \n\t"\
603 "psubb %%mm7, %%mm0 \n\t"\
604 "movq %%mm0, %%mm1 \n\t"\
605 "psubusb %%mm2, %%mm0 \n\t"\
606 "psubb %%mm0, %%mm1 \n\t"\
607 "pand %5, %%mm1 \n\t"\
608 "psrlw $2, %%mm1 \n\t"\
609 "pxor %%mm7, %%mm1 \n\t"\
610 "psubb %%mm7, %%mm1 \n\t"\
611 "movq %0, %%mm5 \n\t"\
612 "movq %3, %%mm6 \n\t"\
613 "psubb %%mm1, %%mm5 \n\t"\
614 "paddb %%mm1, %%mm6 \n\t"
616 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
617 const int strength= ff_h263_loop_filter_strength[qscale];
623 "movq %%mm3, %1 \n\t"
624 "movq %%mm4, %2 \n\t"
625 "movq %%mm5, %0 \n\t"
626 "movq %%mm6, %3 \n\t"
627 : "+m" (*(uint64_t*)(src - 2*stride)),
628 "+m" (*(uint64_t*)(src - 1*stride)),
629 "+m" (*(uint64_t*)(src + 0*stride)),
630 "+m" (*(uint64_t*)(src + 1*stride))
631 : "g" (2*strength), "m"(ff_pb_FC)
635 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
636 asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
637 "movd %4, %%mm0 \n\t"
638 "movd %5, %%mm1 \n\t"
639 "movd %6, %%mm2 \n\t"
640 "movd %7, %%mm3 \n\t"
641 "punpcklbw %%mm1, %%mm0 \n\t"
642 "punpcklbw %%mm3, %%mm2 \n\t"
643 "movq %%mm0, %%mm1 \n\t"
644 "punpcklwd %%mm2, %%mm0 \n\t"
645 "punpckhwd %%mm2, %%mm1 \n\t"
646 "movd %%mm0, %0 \n\t"
647 "punpckhdq %%mm0, %%mm0 \n\t"
648 "movd %%mm0, %1 \n\t"
649 "movd %%mm1, %2 \n\t"
650 "punpckhdq %%mm1, %%mm1 \n\t"
651 "movd %%mm1, %3 \n\t"
653 : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
654 "=m" (*(uint32_t*)(dst + 1*dst_stride)),
655 "=m" (*(uint32_t*)(dst + 2*dst_stride)),
656 "=m" (*(uint32_t*)(dst + 3*dst_stride))
657 : "m" (*(uint32_t*)(src + 0*src_stride)),
658 "m" (*(uint32_t*)(src + 1*src_stride)),
659 "m" (*(uint32_t*)(src + 2*src_stride)),
660 "m" (*(uint32_t*)(src + 3*src_stride))
664 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
665 const int strength= ff_h263_loop_filter_strength[qscale];
666 uint64_t temp[4] __attribute__ ((aligned(8)));
667 uint8_t *btemp= (uint8_t*)temp;
671 transpose4x4(btemp , src , 8, stride);
672 transpose4x4(btemp+4, src + 4*stride, 8, stride);
674 H263_LOOP_FILTER // 5 3 4 6
680 : "g" (2*strength), "m"(ff_pb_FC)
684 "movq %%mm5, %%mm1 \n\t"
685 "movq %%mm4, %%mm0 \n\t"
686 "punpcklbw %%mm3, %%mm5 \n\t"
687 "punpcklbw %%mm6, %%mm4 \n\t"
688 "punpckhbw %%mm3, %%mm1 \n\t"
689 "punpckhbw %%mm6, %%mm0 \n\t"
690 "movq %%mm5, %%mm3 \n\t"
691 "movq %%mm1, %%mm6 \n\t"
692 "punpcklwd %%mm4, %%mm5 \n\t"
693 "punpcklwd %%mm0, %%mm1 \n\t"
694 "punpckhwd %%mm4, %%mm3 \n\t"
695 "punpckhwd %%mm0, %%mm6 \n\t"
696 "movd %%mm5, (%0) \n\t"
697 "punpckhdq %%mm5, %%mm5 \n\t"
698 "movd %%mm5, (%0,%2) \n\t"
699 "movd %%mm3, (%0,%2,2) \n\t"
700 "punpckhdq %%mm3, %%mm3 \n\t"
701 "movd %%mm3, (%0,%3) \n\t"
702 "movd %%mm1, (%1) \n\t"
703 "punpckhdq %%mm1, %%mm1 \n\t"
704 "movd %%mm1, (%1,%2) \n\t"
705 "movd %%mm6, (%1,%2,2) \n\t"
706 "punpckhdq %%mm6, %%mm6 \n\t"
707 "movd %%mm6, (%1,%3) \n\t"
709 "r" (src + 4*stride),
710 "r" ((long) stride ),
711 "r" ((long)(3*stride))
715 #ifdef CONFIG_ENCODERS
716 static int pix_norm1_mmx(uint8_t *pix, int line_size) {
723 "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
724 "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
726 "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
728 "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
729 "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
731 "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
732 "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
733 "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
735 "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
736 "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
738 "pmaddwd %%mm3,%%mm3\n"
739 "pmaddwd %%mm4,%%mm4\n"
741 "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
742 pix2^2+pix3^2+pix6^2+pix7^2) */
743 "paddd %%mm3,%%mm4\n"
744 "paddd %%mm2,%%mm7\n"
747 "paddd %%mm4,%%mm7\n"
752 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
753 "paddd %%mm7,%%mm1\n"
755 : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
759 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
764 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
765 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
767 "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
768 "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
769 "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
770 "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
772 /* todo: mm1-mm2, mm3-mm4 */
773 /* algo: substract mm1 from mm2 with saturation and vice versa */
774 /* OR the results to get absolute difference */
777 "psubusb %%mm2,%%mm1\n"
778 "psubusb %%mm4,%%mm3\n"
779 "psubusb %%mm5,%%mm2\n"
780 "psubusb %%mm6,%%mm4\n"
785 /* now convert to 16-bit vectors so we can square them */
789 "punpckhbw %%mm0,%%mm2\n"
790 "punpckhbw %%mm0,%%mm4\n"
791 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
792 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
794 "pmaddwd %%mm2,%%mm2\n"
795 "pmaddwd %%mm4,%%mm4\n"
796 "pmaddwd %%mm1,%%mm1\n"
797 "pmaddwd %%mm3,%%mm3\n"
799 "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
800 "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
802 "paddd %%mm2,%%mm1\n"
803 "paddd %%mm4,%%mm3\n"
804 "paddd %%mm1,%%mm7\n"
805 "paddd %%mm3,%%mm7\n"
811 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
812 "paddd %%mm7,%%mm1\n"
814 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
815 : "r" ((long)line_size) , "m" (h)
820 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
824 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
825 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
827 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
828 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
829 "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
830 "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
832 /* todo: mm1-mm2, mm3-mm4 */
833 /* algo: substract mm1 from mm2 with saturation and vice versa */
834 /* OR the results to get absolute difference */
837 "psubusb %%mm2,%%mm1\n"
838 "psubusb %%mm4,%%mm3\n"
839 "psubusb %%mm5,%%mm2\n"
840 "psubusb %%mm6,%%mm4\n"
845 /* now convert to 16-bit vectors so we can square them */
849 "punpckhbw %%mm0,%%mm2\n"
850 "punpckhbw %%mm0,%%mm4\n"
851 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
852 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
854 "pmaddwd %%mm2,%%mm2\n"
855 "pmaddwd %%mm4,%%mm4\n"
856 "pmaddwd %%mm1,%%mm1\n"
857 "pmaddwd %%mm3,%%mm3\n"
862 "paddd %%mm2,%%mm1\n"
863 "paddd %%mm4,%%mm3\n"
864 "paddd %%mm1,%%mm7\n"
865 "paddd %%mm3,%%mm7\n"
871 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
872 "paddd %%mm7,%%mm1\n"
874 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
875 : "r" ((long)line_size) , "m" (h)
880 static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
884 "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
885 "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
887 "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
888 "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
889 "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
890 "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
892 /* todo: mm1-mm2, mm3-mm4 */
893 /* algo: substract mm1 from mm2 with saturation and vice versa */
894 /* OR the results to get absolute difference */
895 "movdqa %%xmm1,%%xmm5\n"
896 "movdqa %%xmm3,%%xmm6\n"
897 "psubusb %%xmm2,%%xmm1\n"
898 "psubusb %%xmm4,%%xmm3\n"
899 "psubusb %%xmm5,%%xmm2\n"
900 "psubusb %%xmm6,%%xmm4\n"
902 "por %%xmm1,%%xmm2\n"
903 "por %%xmm3,%%xmm4\n"
905 /* now convert to 16-bit vectors so we can square them */
906 "movdqa %%xmm2,%%xmm1\n"
907 "movdqa %%xmm4,%%xmm3\n"
909 "punpckhbw %%xmm0,%%xmm2\n"
910 "punpckhbw %%xmm0,%%xmm4\n"
911 "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
912 "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
914 "pmaddwd %%xmm2,%%xmm2\n"
915 "pmaddwd %%xmm4,%%xmm4\n"
916 "pmaddwd %%xmm1,%%xmm1\n"
917 "pmaddwd %%xmm3,%%xmm3\n"
919 "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
920 "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
922 "paddd %%xmm2,%%xmm1\n"
923 "paddd %%xmm4,%%xmm3\n"
924 "paddd %%xmm1,%%xmm7\n"
925 "paddd %%xmm3,%%xmm7\n"
930 "movdqa %%xmm7,%%xmm1\n"
931 "psrldq $8, %%xmm7\n" /* shift hi qword to lo */
932 "paddd %%xmm1,%%xmm7\n"
933 "movdqa %%xmm7,%%xmm1\n"
934 "psrldq $4, %%xmm7\n" /* shift hi dword to lo */
935 "paddd %%xmm1,%%xmm7\n"
937 : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
938 : "r" ((long)line_size));
942 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
950 "movq %%mm0, %%mm1\n"
954 "movq %%mm0, %%mm2\n"
955 "movq %%mm1, %%mm3\n"
956 "punpcklbw %%mm7,%%mm0\n"
957 "punpcklbw %%mm7,%%mm1\n"
958 "punpckhbw %%mm7,%%mm2\n"
959 "punpckhbw %%mm7,%%mm3\n"
960 "psubw %%mm1, %%mm0\n"
961 "psubw %%mm3, %%mm2\n"
966 "movq %%mm4, %%mm1\n"
970 "movq %%mm4, %%mm5\n"
971 "movq %%mm1, %%mm3\n"
972 "punpcklbw %%mm7,%%mm4\n"
973 "punpcklbw %%mm7,%%mm1\n"
974 "punpckhbw %%mm7,%%mm5\n"
975 "punpckhbw %%mm7,%%mm3\n"
976 "psubw %%mm1, %%mm4\n"
977 "psubw %%mm3, %%mm5\n"
978 "psubw %%mm4, %%mm0\n"
979 "psubw %%mm5, %%mm2\n"
980 "pxor %%mm3, %%mm3\n"
981 "pxor %%mm1, %%mm1\n"
982 "pcmpgtw %%mm0, %%mm3\n\t"
983 "pcmpgtw %%mm2, %%mm1\n\t"
984 "pxor %%mm3, %%mm0\n"
985 "pxor %%mm1, %%mm2\n"
986 "psubw %%mm3, %%mm0\n"
987 "psubw %%mm1, %%mm2\n"
988 "paddw %%mm0, %%mm2\n"
989 "paddw %%mm2, %%mm6\n"
995 "movq %%mm0, %%mm1\n"
999 "movq %%mm0, %%mm2\n"
1000 "movq %%mm1, %%mm3\n"
1001 "punpcklbw %%mm7,%%mm0\n"
1002 "punpcklbw %%mm7,%%mm1\n"
1003 "punpckhbw %%mm7,%%mm2\n"
1004 "punpckhbw %%mm7,%%mm3\n"
1005 "psubw %%mm1, %%mm0\n"
1006 "psubw %%mm3, %%mm2\n"
1007 "psubw %%mm0, %%mm4\n"
1008 "psubw %%mm2, %%mm5\n"
1009 "pxor %%mm3, %%mm3\n"
1010 "pxor %%mm1, %%mm1\n"
1011 "pcmpgtw %%mm4, %%mm3\n\t"
1012 "pcmpgtw %%mm5, %%mm1\n\t"
1013 "pxor %%mm3, %%mm4\n"
1014 "pxor %%mm1, %%mm5\n"
1015 "psubw %%mm3, %%mm4\n"
1016 "psubw %%mm1, %%mm5\n"
1017 "paddw %%mm4, %%mm5\n"
1018 "paddw %%mm5, %%mm6\n"
1023 "movq %%mm4, %%mm1\n"
1027 "movq %%mm4, %%mm5\n"
1028 "movq %%mm1, %%mm3\n"
1029 "punpcklbw %%mm7,%%mm4\n"
1030 "punpcklbw %%mm7,%%mm1\n"
1031 "punpckhbw %%mm7,%%mm5\n"
1032 "punpckhbw %%mm7,%%mm3\n"
1033 "psubw %%mm1, %%mm4\n"
1034 "psubw %%mm3, %%mm5\n"
1035 "psubw %%mm4, %%mm0\n"
1036 "psubw %%mm5, %%mm2\n"
1037 "pxor %%mm3, %%mm3\n"
1038 "pxor %%mm1, %%mm1\n"
1039 "pcmpgtw %%mm0, %%mm3\n\t"
1040 "pcmpgtw %%mm2, %%mm1\n\t"
1041 "pxor %%mm3, %%mm0\n"
1042 "pxor %%mm1, %%mm2\n"
1043 "psubw %%mm3, %%mm0\n"
1044 "psubw %%mm1, %%mm2\n"
1045 "paddw %%mm0, %%mm2\n"
1046 "paddw %%mm2, %%mm6\n"
1052 "movq %%mm6, %%mm0\n"
1053 "punpcklwd %%mm7,%%mm0\n"
1054 "punpckhwd %%mm7,%%mm6\n"
1055 "paddd %%mm0, %%mm6\n"
1057 "movq %%mm6,%%mm0\n"
1058 "psrlq $32, %%mm6\n"
1059 "paddd %%mm6,%%mm0\n"
1061 : "+r" (pix1), "=r"(tmp)
1062 : "r" ((long)line_size) , "g" (h-2)
1067 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
1069 uint8_t * pix= pix1;
1072 "pxor %%mm7,%%mm7\n"
1073 "pxor %%mm6,%%mm6\n"
1076 "movq 1(%0),%%mm1\n"
1077 "movq %%mm0, %%mm2\n"
1078 "movq %%mm1, %%mm3\n"
1079 "punpcklbw %%mm7,%%mm0\n"
1080 "punpcklbw %%mm7,%%mm1\n"
1081 "punpckhbw %%mm7,%%mm2\n"
1082 "punpckhbw %%mm7,%%mm3\n"
1083 "psubw %%mm1, %%mm0\n"
1084 "psubw %%mm3, %%mm2\n"
1089 "movq 1(%0),%%mm1\n"
1090 "movq %%mm4, %%mm5\n"
1091 "movq %%mm1, %%mm3\n"
1092 "punpcklbw %%mm7,%%mm4\n"
1093 "punpcklbw %%mm7,%%mm1\n"
1094 "punpckhbw %%mm7,%%mm5\n"
1095 "punpckhbw %%mm7,%%mm3\n"
1096 "psubw %%mm1, %%mm4\n"
1097 "psubw %%mm3, %%mm5\n"
1098 "psubw %%mm4, %%mm0\n"
1099 "psubw %%mm5, %%mm2\n"
1100 "pxor %%mm3, %%mm3\n"
1101 "pxor %%mm1, %%mm1\n"
1102 "pcmpgtw %%mm0, %%mm3\n\t"
1103 "pcmpgtw %%mm2, %%mm1\n\t"
1104 "pxor %%mm3, %%mm0\n"
1105 "pxor %%mm1, %%mm2\n"
1106 "psubw %%mm3, %%mm0\n"
1107 "psubw %%mm1, %%mm2\n"
1108 "paddw %%mm0, %%mm2\n"
1109 "paddw %%mm2, %%mm6\n"
1115 "movq 1(%0),%%mm1\n"
1116 "movq %%mm0, %%mm2\n"
1117 "movq %%mm1, %%mm3\n"
1118 "punpcklbw %%mm7,%%mm0\n"
1119 "punpcklbw %%mm7,%%mm1\n"
1120 "punpckhbw %%mm7,%%mm2\n"
1121 "punpckhbw %%mm7,%%mm3\n"
1122 "psubw %%mm1, %%mm0\n"
1123 "psubw %%mm3, %%mm2\n"
1124 "psubw %%mm0, %%mm4\n"
1125 "psubw %%mm2, %%mm5\n"
1126 "pxor %%mm3, %%mm3\n"
1127 "pxor %%mm1, %%mm1\n"
1128 "pcmpgtw %%mm4, %%mm3\n\t"
1129 "pcmpgtw %%mm5, %%mm1\n\t"
1130 "pxor %%mm3, %%mm4\n"
1131 "pxor %%mm1, %%mm5\n"
1132 "psubw %%mm3, %%mm4\n"
1133 "psubw %%mm1, %%mm5\n"
1134 "paddw %%mm4, %%mm5\n"
1135 "paddw %%mm5, %%mm6\n"
1140 "movq 1(%0),%%mm1\n"
1141 "movq %%mm4, %%mm5\n"
1142 "movq %%mm1, %%mm3\n"
1143 "punpcklbw %%mm7,%%mm4\n"
1144 "punpcklbw %%mm7,%%mm1\n"
1145 "punpckhbw %%mm7,%%mm5\n"
1146 "punpckhbw %%mm7,%%mm3\n"
1147 "psubw %%mm1, %%mm4\n"
1148 "psubw %%mm3, %%mm5\n"
1149 "psubw %%mm4, %%mm0\n"
1150 "psubw %%mm5, %%mm2\n"
1151 "pxor %%mm3, %%mm3\n"
1152 "pxor %%mm1, %%mm1\n"
1153 "pcmpgtw %%mm0, %%mm3\n\t"
1154 "pcmpgtw %%mm2, %%mm1\n\t"
1155 "pxor %%mm3, %%mm0\n"
1156 "pxor %%mm1, %%mm2\n"
1157 "psubw %%mm3, %%mm0\n"
1158 "psubw %%mm1, %%mm2\n"
1159 "paddw %%mm0, %%mm2\n"
1160 "paddw %%mm2, %%mm6\n"
1166 "movq %%mm6, %%mm0\n"
1167 "punpcklwd %%mm7,%%mm0\n"
1168 "punpckhwd %%mm7,%%mm6\n"
1169 "paddd %%mm0, %%mm6\n"
1171 "movq %%mm6,%%mm0\n"
1172 "psrlq $32, %%mm6\n"
1173 "paddd %%mm6,%%mm0\n"
1175 : "+r" (pix1), "=r"(tmp)
1176 : "r" ((long)line_size) , "g" (h-2)
1178 return tmp + hf_noise8_mmx(pix+8, line_size, h);
1181 static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1182 MpegEncContext *c = p;
1185 if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
1186 else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
1187 score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
1189 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
1190 else return score1 + FFABS(score2)*8;
1193 static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1194 MpegEncContext *c = p;
1195 int score1= sse8_mmx(c, pix1, pix2, line_size, h);
1196 int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
1198 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
1199 else return score1 + FFABS(score2)*8;
1202 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1205 assert( (((int)pix) & 7) == 0);
1206 assert((line_size &7) ==0);
1208 #define SUM(in0, in1, out0, out1) \
1209 "movq (%0), %%mm2\n"\
1210 "movq 8(%0), %%mm3\n"\
1212 "movq %%mm2, " #out0 "\n"\
1213 "movq %%mm3, " #out1 "\n"\
1214 "psubusb " #in0 ", %%mm2\n"\
1215 "psubusb " #in1 ", %%mm3\n"\
1216 "psubusb " #out0 ", " #in0 "\n"\
1217 "psubusb " #out1 ", " #in1 "\n"\
1218 "por %%mm2, " #in0 "\n"\
1219 "por %%mm3, " #in1 "\n"\
1220 "movq " #in0 ", %%mm2\n"\
1221 "movq " #in1 ", %%mm3\n"\
1222 "punpcklbw %%mm7, " #in0 "\n"\
1223 "punpcklbw %%mm7, " #in1 "\n"\
1224 "punpckhbw %%mm7, %%mm2\n"\
1225 "punpckhbw %%mm7, %%mm3\n"\
1226 "paddw " #in1 ", " #in0 "\n"\
1227 "paddw %%mm3, %%mm2\n"\
1228 "paddw %%mm2, " #in0 "\n"\
1229 "paddw " #in0 ", %%mm6\n"
1234 "pxor %%mm6,%%mm6\n"
1235 "pxor %%mm7,%%mm7\n"
1237 "movq 8(%0),%%mm1\n"
1240 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1243 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1245 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1250 "movq %%mm6,%%mm0\n"
1251 "psrlq $32, %%mm6\n"
1252 "paddw %%mm6,%%mm0\n"
1253 "movq %%mm0,%%mm6\n"
1254 "psrlq $16, %%mm0\n"
1255 "paddw %%mm6,%%mm0\n"
1257 : "+r" (pix), "=r"(tmp)
1258 : "r" ((long)line_size) , "m" (h)
1260 return tmp & 0xFFFF;
1264 static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1267 assert( (((int)pix) & 7) == 0);
1268 assert((line_size &7) ==0);
1270 #define SUM(in0, in1, out0, out1) \
1271 "movq (%0), " #out0 "\n"\
1272 "movq 8(%0), " #out1 "\n"\
1274 "psadbw " #out0 ", " #in0 "\n"\
1275 "psadbw " #out1 ", " #in1 "\n"\
1276 "paddw " #in1 ", " #in0 "\n"\
1277 "paddw " #in0 ", %%mm6\n"
1281 "pxor %%mm6,%%mm6\n"
1282 "pxor %%mm7,%%mm7\n"
1284 "movq 8(%0),%%mm1\n"
1287 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1290 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1292 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1298 : "+r" (pix), "=r"(tmp)
1299 : "r" ((long)line_size) , "m" (h)
1305 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1308 assert( (((int)pix1) & 7) == 0);
1309 assert( (((int)pix2) & 7) == 0);
1310 assert((line_size &7) ==0);
1312 #define SUM(in0, in1, out0, out1) \
1313 "movq (%0),%%mm2\n"\
1314 "movq (%1)," #out0 "\n"\
1315 "movq 8(%0),%%mm3\n"\
1316 "movq 8(%1)," #out1 "\n"\
1319 "psubb " #out0 ", %%mm2\n"\
1320 "psubb " #out1 ", %%mm3\n"\
1321 "pxor %%mm7, %%mm2\n"\
1322 "pxor %%mm7, %%mm3\n"\
1323 "movq %%mm2, " #out0 "\n"\
1324 "movq %%mm3, " #out1 "\n"\
1325 "psubusb " #in0 ", %%mm2\n"\
1326 "psubusb " #in1 ", %%mm3\n"\
1327 "psubusb " #out0 ", " #in0 "\n"\
1328 "psubusb " #out1 ", " #in1 "\n"\
1329 "por %%mm2, " #in0 "\n"\
1330 "por %%mm3, " #in1 "\n"\
1331 "movq " #in0 ", %%mm2\n"\
1332 "movq " #in1 ", %%mm3\n"\
1333 "punpcklbw %%mm7, " #in0 "\n"\
1334 "punpcklbw %%mm7, " #in1 "\n"\
1335 "punpckhbw %%mm7, %%mm2\n"\
1336 "punpckhbw %%mm7, %%mm3\n"\
1337 "paddw " #in1 ", " #in0 "\n"\
1338 "paddw %%mm3, %%mm2\n"\
1339 "paddw %%mm2, " #in0 "\n"\
1340 "paddw " #in0 ", %%mm6\n"
1345 "pxor %%mm6,%%mm6\n"
1346 "pcmpeqw %%mm7,%%mm7\n"
1347 "psllw $15, %%mm7\n"
1348 "packsswb %%mm7, %%mm7\n"
1351 "movq 8(%0),%%mm1\n"
1352 "movq 8(%1),%%mm3\n"
1356 "psubb %%mm2, %%mm0\n"
1357 "psubb %%mm3, %%mm1\n"
1358 "pxor %%mm7, %%mm0\n"
1359 "pxor %%mm7, %%mm1\n"
1360 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1363 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1365 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1370 "movq %%mm6,%%mm0\n"
1371 "psrlq $32, %%mm6\n"
1372 "paddw %%mm6,%%mm0\n"
1373 "movq %%mm0,%%mm6\n"
1374 "psrlq $16, %%mm0\n"
1375 "paddw %%mm6,%%mm0\n"
1377 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
1378 : "r" ((long)line_size) , "m" (h)
1380 return tmp & 0x7FFF;
1384 static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1387 assert( (((int)pix1) & 7) == 0);
1388 assert( (((int)pix2) & 7) == 0);
1389 assert((line_size &7) ==0);
1391 #define SUM(in0, in1, out0, out1) \
1392 "movq (%0)," #out0 "\n"\
1393 "movq (%1),%%mm2\n"\
1394 "movq 8(%0)," #out1 "\n"\
1395 "movq 8(%1),%%mm3\n"\
1398 "psubb %%mm2, " #out0 "\n"\
1399 "psubb %%mm3, " #out1 "\n"\
1400 "pxor %%mm7, " #out0 "\n"\
1401 "pxor %%mm7, " #out1 "\n"\
1402 "psadbw " #out0 ", " #in0 "\n"\
1403 "psadbw " #out1 ", " #in1 "\n"\
1404 "paddw " #in1 ", " #in0 "\n"\
1405 "paddw " #in0 ", %%mm6\n"
1409 "pxor %%mm6,%%mm6\n"
1410 "pcmpeqw %%mm7,%%mm7\n"
1411 "psllw $15, %%mm7\n"
1412 "packsswb %%mm7, %%mm7\n"
1415 "movq 8(%0),%%mm1\n"
1416 "movq 8(%1),%%mm3\n"
1420 "psubb %%mm2, %%mm0\n"
1421 "psubb %%mm3, %%mm1\n"
1422 "pxor %%mm7, %%mm0\n"
1423 "pxor %%mm7, %%mm1\n"
1424 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1427 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1429 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1435 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
1436 : "r" ((long)line_size) , "m" (h)
1442 static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
1446 "movq (%2, %0), %%mm0 \n\t"
1447 "movq (%1, %0), %%mm1 \n\t"
1448 "psubb %%mm0, %%mm1 \n\t"
1449 "movq %%mm1, (%3, %0) \n\t"
1450 "movq 8(%2, %0), %%mm0 \n\t"
1451 "movq 8(%1, %0), %%mm1 \n\t"
1452 "psubb %%mm0, %%mm1 \n\t"
1453 "movq %%mm1, 8(%3, %0) \n\t"
1458 : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
1461 dst[i+0] = src1[i+0]-src2[i+0];
1464 static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
1470 "movq -1(%1, %0), %%mm0 \n\t" // LT
1471 "movq (%1, %0), %%mm1 \n\t" // T
1472 "movq -1(%2, %0), %%mm2 \n\t" // L
1473 "movq (%2, %0), %%mm3 \n\t" // X
1474 "movq %%mm2, %%mm4 \n\t" // L
1475 "psubb %%mm0, %%mm2 \n\t"
1476 "paddb %%mm1, %%mm2 \n\t" // L + T - LT
1477 "movq %%mm4, %%mm5 \n\t" // L
1478 "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
1479 "pminub %%mm5, %%mm1 \n\t" // min(T, L)
1480 "pminub %%mm2, %%mm4 \n\t"
1481 "pmaxub %%mm1, %%mm4 \n\t"
1482 "psubb %%mm4, %%mm3 \n\t" // dst - pred
1483 "movq %%mm3, (%3, %0) \n\t"
1488 : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
1494 dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
1496 *left_top= src1[w-1];
1500 #define LBUTTERFLY2(a1,b1,a2,b2)\
1501 "paddw " #b1 ", " #a1 " \n\t"\
1502 "paddw " #b2 ", " #a2 " \n\t"\
1503 "paddw " #b1 ", " #b1 " \n\t"\
1504 "paddw " #b2 ", " #b2 " \n\t"\
1505 "psubw " #a1 ", " #b1 " \n\t"\
1506 "psubw " #a2 ", " #b2 " \n\t"
1509 LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
1510 LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
1511 LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
1512 LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
1513 LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
1514 LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
1517 "pxor " #z ", " #z " \n\t"\
1518 "pcmpgtw " #a ", " #z " \n\t"\
1519 "pxor " #z ", " #a " \n\t"\
1520 "psubw " #z ", " #a " \n\t"
1522 #define MMABS_SUM(a,z, sum)\
1523 "pxor " #z ", " #z " \n\t"\
1524 "pcmpgtw " #a ", " #z " \n\t"\
1525 "pxor " #z ", " #a " \n\t"\
1526 "psubw " #z ", " #a " \n\t"\
1527 "paddusw " #a ", " #sum " \n\t"
1529 #define MMABS_MMX2(a,z)\
1530 "pxor " #z ", " #z " \n\t"\
1531 "psubw " #a ", " #z " \n\t"\
1532 "pmaxsw " #z ", " #a " \n\t"
1534 #define MMABS_SUM_MMX2(a,z, sum)\
1535 "pxor " #z ", " #z " \n\t"\
1536 "psubw " #a ", " #z " \n\t"\
1537 "pmaxsw " #z ", " #a " \n\t"\
1538 "paddusw " #a ", " #sum " \n\t"
1540 #define TRANSPOSE4(a,b,c,d,t)\
1541 SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
1542 SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
1543 SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
1544 SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
1546 #define LOAD4(o, a, b, c, d)\
1547 "movq "#o"(%1), " #a " \n\t"\
1548 "movq "#o"+16(%1), " #b " \n\t"\
1549 "movq "#o"+32(%1), " #c " \n\t"\
1550 "movq "#o"+48(%1), " #d " \n\t"
1552 #define STORE4(o, a, b, c, d)\
1553 "movq "#a", "#o"(%1) \n\t"\
1554 "movq "#b", "#o"+16(%1) \n\t"\
1555 "movq "#c", "#o"+32(%1) \n\t"\
1556 "movq "#d", "#o"+48(%1) \n\t"\
1558 static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1559 DECLARE_ALIGNED_8(uint64_t, temp[16]);
1564 diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1567 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1568 LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1572 "movq %%mm7, 112(%1) \n\t"
1574 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1575 STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1577 "movq 112(%1), %%mm7 \n\t"
1578 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1579 STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1581 LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1582 LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1586 "movq %%mm7, 120(%1) \n\t"
1588 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1589 STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1591 "movq 120(%1), %%mm7 \n\t"
1592 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1593 "movq %%mm7, %%mm5 \n\t"//FIXME remove
1594 "movq %%mm6, %%mm7 \n\t"
1595 "movq %%mm0, %%mm6 \n\t"
1596 // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1598 LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1599 // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1602 "movq %%mm7, 64(%1) \n\t"
1604 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1605 MMABS_SUM(%%mm2, %%mm7, %%mm0)
1606 MMABS_SUM(%%mm3, %%mm7, %%mm0)
1607 MMABS_SUM(%%mm4, %%mm7, %%mm0)
1608 MMABS_SUM(%%mm5, %%mm7, %%mm0)
1609 MMABS_SUM(%%mm6, %%mm7, %%mm0)
1610 "movq 64(%1), %%mm1 \n\t"
1611 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1612 "movq %%mm0, 64(%1) \n\t"
1614 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1615 LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1618 "movq %%mm7, (%1) \n\t"
1620 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1621 MMABS_SUM(%%mm2, %%mm7, %%mm0)
1622 MMABS_SUM(%%mm3, %%mm7, %%mm0)
1623 MMABS_SUM(%%mm4, %%mm7, %%mm0)
1624 MMABS_SUM(%%mm5, %%mm7, %%mm0)
1625 MMABS_SUM(%%mm6, %%mm7, %%mm0)
1626 "movq (%1), %%mm1 \n\t"
1627 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1628 "movq 64(%1), %%mm1 \n\t"
1629 MMABS_SUM(%%mm1, %%mm7, %%mm0)
1631 "movq %%mm0, %%mm1 \n\t"
1632 "psrlq $32, %%mm0 \n\t"
1633 "paddusw %%mm1, %%mm0 \n\t"
1634 "movq %%mm0, %%mm1 \n\t"
1635 "psrlq $16, %%mm0 \n\t"
1636 "paddusw %%mm1, %%mm0 \n\t"
1637 "movd %%mm0, %0 \n\t"
1645 static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1646 DECLARE_ALIGNED_8(uint64_t, temp[16]);
1651 diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1654 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1655 LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1659 "movq %%mm7, 112(%1) \n\t"
1661 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1662 STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1664 "movq 112(%1), %%mm7 \n\t"
1665 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1666 STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1668 LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1669 LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1673 "movq %%mm7, 120(%1) \n\t"
1675 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1676 STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1678 "movq 120(%1), %%mm7 \n\t"
1679 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1680 "movq %%mm7, %%mm5 \n\t"//FIXME remove
1681 "movq %%mm6, %%mm7 \n\t"
1682 "movq %%mm0, %%mm6 \n\t"
1683 // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1685 LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1686 // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1689 "movq %%mm7, 64(%1) \n\t"
1690 MMABS_MMX2(%%mm0, %%mm7)
1691 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1692 MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1693 MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1694 MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1695 MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1696 MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1697 "movq 64(%1), %%mm1 \n\t"
1698 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1699 "movq %%mm0, 64(%1) \n\t"
1701 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1702 LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1705 "movq %%mm7, (%1) \n\t"
1706 MMABS_MMX2(%%mm0, %%mm7)
1707 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1708 MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1709 MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1710 MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1711 MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1712 MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1713 "movq (%1), %%mm1 \n\t"
1714 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1715 "movq 64(%1), %%mm1 \n\t"
1716 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1718 "pshufw $0x0E, %%mm0, %%mm1 \n\t"
1719 "paddusw %%mm1, %%mm0 \n\t"
1720 "pshufw $0x01, %%mm0, %%mm1 \n\t"
1721 "paddusw %%mm1, %%mm0 \n\t"
1722 "movd %%mm0, %0 \n\t"
1731 WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
1732 WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
1734 static int ssd_int8_vs_int16_mmx(int8_t *pix1, int16_t *pix2, int size){
1738 "pxor %%mm4, %%mm4 \n"
1741 "movq (%2,%0), %%mm2 \n"
1742 "movq (%3,%0,2), %%mm0 \n"
1743 "movq 8(%3,%0,2), %%mm1 \n"
1744 "punpckhbw %%mm2, %%mm3 \n"
1745 "punpcklbw %%mm2, %%mm2 \n"
1746 "psraw $8, %%mm3 \n"
1747 "psraw $8, %%mm2 \n"
1748 "psubw %%mm3, %%mm1 \n"
1749 "psubw %%mm2, %%mm0 \n"
1750 "pmaddwd %%mm1, %%mm1 \n"
1751 "pmaddwd %%mm0, %%mm0 \n"
1752 "paddd %%mm1, %%mm4 \n"
1753 "paddd %%mm0, %%mm4 \n"
1755 "movq %%mm4, %%mm3 \n"
1756 "psrlq $32, %%mm3 \n"
1757 "paddd %%mm3, %%mm4 \n"
1760 :"r"(pix1), "r"(pix2)
1765 #endif //CONFIG_ENCODERS
1767 #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
1768 #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
1770 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
1771 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
1772 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
1773 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
1774 "movq "#in7", " #m3 " \n\t" /* d */\
1775 "movq "#in0", %%mm5 \n\t" /* D */\
1776 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
1777 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
1778 "movq "#in1", %%mm5 \n\t" /* C */\
1779 "movq "#in2", %%mm6 \n\t" /* B */\
1780 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
1781 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
1782 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
1783 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
1784 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
1785 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
1786 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
1787 "psraw $5, %%mm5 \n\t"\
1788 "packuswb %%mm5, %%mm5 \n\t"\
1789 OP(%%mm5, out, %%mm7, d)
1791 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
1792 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1796 "pxor %%mm7, %%mm7 \n\t"\
1798 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1799 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1800 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1801 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1802 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1803 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1804 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1805 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1806 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1807 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1808 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1809 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1810 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1811 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1812 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1813 "paddw %%mm3, %%mm5 \n\t" /* b */\
1814 "paddw %%mm2, %%mm6 \n\t" /* c */\
1815 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1816 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1817 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1818 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1819 "paddw %%mm4, %%mm0 \n\t" /* a */\
1820 "paddw %%mm1, %%mm5 \n\t" /* d */\
1821 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1822 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1823 "paddw %6, %%mm6 \n\t"\
1824 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1825 "psraw $5, %%mm0 \n\t"\
1826 "movq %%mm0, %5 \n\t"\
1827 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1829 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
1830 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
1831 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
1832 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
1833 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
1834 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
1835 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
1836 "paddw %%mm0, %%mm2 \n\t" /* b */\
1837 "paddw %%mm5, %%mm3 \n\t" /* c */\
1838 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
1839 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1840 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
1841 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
1842 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
1843 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
1844 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1845 "paddw %%mm2, %%mm1 \n\t" /* a */\
1846 "paddw %%mm6, %%mm4 \n\t" /* d */\
1847 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1848 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
1849 "paddw %6, %%mm1 \n\t"\
1850 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
1851 "psraw $5, %%mm3 \n\t"\
1852 "movq %5, %%mm1 \n\t"\
1853 "packuswb %%mm3, %%mm1 \n\t"\
1854 OP_MMX2(%%mm1, (%1),%%mm4, q)\
1855 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1857 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
1858 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
1859 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
1860 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
1861 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
1862 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
1863 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
1864 "paddw %%mm1, %%mm5 \n\t" /* b */\
1865 "paddw %%mm4, %%mm0 \n\t" /* c */\
1866 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1867 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
1868 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
1869 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
1870 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
1871 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
1872 "paddw %%mm3, %%mm2 \n\t" /* d */\
1873 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
1874 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
1875 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
1876 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
1877 "paddw %%mm2, %%mm6 \n\t" /* a */\
1878 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1879 "paddw %6, %%mm0 \n\t"\
1880 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1881 "psraw $5, %%mm0 \n\t"\
1882 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1884 "paddw %%mm5, %%mm3 \n\t" /* a */\
1885 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
1886 "paddw %%mm4, %%mm6 \n\t" /* b */\
1887 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
1888 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
1889 "paddw %%mm1, %%mm4 \n\t" /* c */\
1890 "paddw %%mm2, %%mm5 \n\t" /* d */\
1891 "paddw %%mm6, %%mm6 \n\t" /* 2b */\
1892 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
1893 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1894 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
1895 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
1896 "paddw %6, %%mm4 \n\t"\
1897 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
1898 "psraw $5, %%mm4 \n\t"\
1899 "packuswb %%mm4, %%mm0 \n\t"\
1900 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1906 : "+a"(src), "+c"(dst), "+m"(h)\
1907 : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1912 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1915 /* quick HACK, XXX FIXME MUST be optimized */\
1918 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1919 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1920 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1921 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1922 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1923 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1924 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1925 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1926 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1927 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1928 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1929 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1930 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1931 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1932 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1933 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1935 "movq (%0), %%mm0 \n\t"\
1936 "movq 8(%0), %%mm1 \n\t"\
1937 "paddw %2, %%mm0 \n\t"\
1938 "paddw %2, %%mm1 \n\t"\
1939 "psraw $5, %%mm0 \n\t"\
1940 "psraw $5, %%mm1 \n\t"\
1941 "packuswb %%mm1, %%mm0 \n\t"\
1942 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1943 "movq 16(%0), %%mm0 \n\t"\
1944 "movq 24(%0), %%mm1 \n\t"\
1945 "paddw %2, %%mm0 \n\t"\
1946 "paddw %2, %%mm1 \n\t"\
1947 "psraw $5, %%mm0 \n\t"\
1948 "psraw $5, %%mm1 \n\t"\
1949 "packuswb %%mm1, %%mm0 \n\t"\
1950 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1951 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1959 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1963 "pxor %%mm7, %%mm7 \n\t"\
1965 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1966 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1967 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1968 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1969 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1970 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1971 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1972 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1973 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1974 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1975 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1976 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1977 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1978 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1979 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1980 "paddw %%mm3, %%mm5 \n\t" /* b */\
1981 "paddw %%mm2, %%mm6 \n\t" /* c */\
1982 "paddw %%mm5, %%mm5 \n\t" /* 2b */\
1983 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1984 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1985 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1986 "paddw %%mm4, %%mm0 \n\t" /* a */\
1987 "paddw %%mm1, %%mm5 \n\t" /* d */\
1988 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1989 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1990 "paddw %6, %%mm6 \n\t"\
1991 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1992 "psraw $5, %%mm0 \n\t"\
1993 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1995 "movd 5(%0), %%mm5 \n\t" /* FGHI */\
1996 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1997 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1998 "paddw %%mm5, %%mm1 \n\t" /* a */\
1999 "paddw %%mm6, %%mm2 \n\t" /* b */\
2000 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
2001 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
2002 "paddw %%mm6, %%mm3 \n\t" /* c */\
2003 "paddw %%mm5, %%mm4 \n\t" /* d */\
2004 "paddw %%mm2, %%mm2 \n\t" /* 2b */\
2005 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
2006 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
2007 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
2008 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
2009 "paddw %6, %%mm1 \n\t"\
2010 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
2011 "psraw $5, %%mm3 \n\t"\
2012 "packuswb %%mm3, %%mm0 \n\t"\
2013 OP_MMX2(%%mm0, (%1), %%mm4, q)\
2019 : "+a"(src), "+c"(dst), "+m"(h)\
2020 : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
2025 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
2028 /* quick HACK, XXX FIXME MUST be optimized */\
2031 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
2032 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
2033 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
2034 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
2035 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
2036 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
2037 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
2038 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
2040 "movq (%0), %%mm0 \n\t"\
2041 "movq 8(%0), %%mm1 \n\t"\
2042 "paddw %2, %%mm0 \n\t"\
2043 "paddw %2, %%mm1 \n\t"\
2044 "psraw $5, %%mm0 \n\t"\
2045 "psraw $5, %%mm1 \n\t"\
2046 "packuswb %%mm1, %%mm0 \n\t"\
2047 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
2048 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
2056 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
2058 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2059 uint64_t temp[17*4];\
2060 uint64_t *temp_ptr= temp;\
2065 "pxor %%mm7, %%mm7 \n\t"\
2067 "movq (%0), %%mm0 \n\t"\
2068 "movq (%0), %%mm1 \n\t"\
2069 "movq 8(%0), %%mm2 \n\t"\
2070 "movq 8(%0), %%mm3 \n\t"\
2071 "punpcklbw %%mm7, %%mm0 \n\t"\
2072 "punpckhbw %%mm7, %%mm1 \n\t"\
2073 "punpcklbw %%mm7, %%mm2 \n\t"\
2074 "punpckhbw %%mm7, %%mm3 \n\t"\
2075 "movq %%mm0, (%1) \n\t"\
2076 "movq %%mm1, 17*8(%1) \n\t"\
2077 "movq %%mm2, 2*17*8(%1) \n\t"\
2078 "movq %%mm3, 3*17*8(%1) \n\t"\
2083 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
2084 : "r" ((long)srcStride)\
2091 /*FIXME reorder for speed */\
2093 /*"pxor %%mm7, %%mm7 \n\t"*/\
2095 "movq (%0), %%mm0 \n\t"\
2096 "movq 8(%0), %%mm1 \n\t"\
2097 "movq 16(%0), %%mm2 \n\t"\
2098 "movq 24(%0), %%mm3 \n\t"\
2099 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
2100 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
2102 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
2104 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
2106 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
2107 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
2109 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
2110 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
2112 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
2113 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
2115 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
2116 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
2118 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
2120 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
2122 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
2123 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
2125 "add $136, %0 \n\t"\
2130 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2131 : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
2136 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2137 uint64_t temp[9*2];\
2138 uint64_t *temp_ptr= temp;\
2143 "pxor %%mm7, %%mm7 \n\t"\
2145 "movq (%0), %%mm0 \n\t"\
2146 "movq (%0), %%mm1 \n\t"\
2147 "punpcklbw %%mm7, %%mm0 \n\t"\
2148 "punpckhbw %%mm7, %%mm1 \n\t"\
2149 "movq %%mm0, (%1) \n\t"\
2150 "movq %%mm1, 9*8(%1) \n\t"\
2155 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
2156 : "r" ((long)srcStride)\
2163 /*FIXME reorder for speed */\
2165 /*"pxor %%mm7, %%mm7 \n\t"*/\
2167 "movq (%0), %%mm0 \n\t"\
2168 "movq 8(%0), %%mm1 \n\t"\
2169 "movq 16(%0), %%mm2 \n\t"\
2170 "movq 24(%0), %%mm3 \n\t"\
2171 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
2172 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
2174 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
2176 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
2178 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
2180 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
2182 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
2183 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
2190 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2191 : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
2196 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2197 OPNAME ## pixels8_mmx(dst, src, stride, 8);\
2200 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2202 uint8_t * const half= (uint8_t*)temp;\
2203 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2204 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2207 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2208 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
2211 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2213 uint8_t * const half= (uint8_t*)temp;\
2214 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2215 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
2218 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2220 uint8_t * const half= (uint8_t*)temp;\
2221 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2222 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2225 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2226 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
2229 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2231 uint8_t * const half= (uint8_t*)temp;\
2232 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2233 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
2235 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2236 uint64_t half[8 + 9];\
2237 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2238 uint8_t * const halfHV= ((uint8_t*)half);\
2239 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2240 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2241 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2242 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2244 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2245 uint64_t half[8 + 9];\
2246 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2247 uint8_t * const halfHV= ((uint8_t*)half);\
2248 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2249 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2250 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2251 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2253 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2254 uint64_t half[8 + 9];\
2255 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2256 uint8_t * const halfHV= ((uint8_t*)half);\
2257 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2258 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2259 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2260 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2262 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2263 uint64_t half[8 + 9];\
2264 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2265 uint8_t * const halfHV= ((uint8_t*)half);\
2266 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2267 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2268 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2269 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2271 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2272 uint64_t half[8 + 9];\
2273 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2274 uint8_t * const halfHV= ((uint8_t*)half);\
2275 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2276 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2277 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2279 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2280 uint64_t half[8 + 9];\
2281 uint8_t * const halfH= ((uint8_t*)half) + 64;\
2282 uint8_t * const halfHV= ((uint8_t*)half);\
2283 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2284 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2285 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2287 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2288 uint64_t half[8 + 9];\
2289 uint8_t * const halfH= ((uint8_t*)half);\
2290 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2291 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2292 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2294 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2295 uint64_t half[8 + 9];\
2296 uint8_t * const halfH= ((uint8_t*)half);\
2297 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2298 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2299 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2301 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2303 uint8_t * const halfH= ((uint8_t*)half);\
2304 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2305 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2307 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2308 OPNAME ## pixels16_mmx(dst, src, stride, 16);\
2311 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2313 uint8_t * const half= (uint8_t*)temp;\
2314 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2315 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2318 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2319 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
2322 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2324 uint8_t * const half= (uint8_t*)temp;\
2325 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2326 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
2329 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2331 uint8_t * const half= (uint8_t*)temp;\
2332 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2333 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2336 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2337 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
2340 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2342 uint8_t * const half= (uint8_t*)temp;\
2343 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2344 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
2346 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2347 uint64_t half[16*2 + 17*2];\
2348 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2349 uint8_t * const halfHV= ((uint8_t*)half);\
2350 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2351 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2352 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2353 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2355 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2356 uint64_t half[16*2 + 17*2];\
2357 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2358 uint8_t * const halfHV= ((uint8_t*)half);\
2359 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2360 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2361 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2362 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2364 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2365 uint64_t half[16*2 + 17*2];\
2366 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2367 uint8_t * const halfHV= ((uint8_t*)half);\
2368 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2369 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2370 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2371 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2373 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2374 uint64_t half[16*2 + 17*2];\
2375 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2376 uint8_t * const halfHV= ((uint8_t*)half);\
2377 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2378 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2379 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2380 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2382 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2383 uint64_t half[16*2 + 17*2];\
2384 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2385 uint8_t * const halfHV= ((uint8_t*)half);\
2386 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2387 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2388 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2390 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2391 uint64_t half[16*2 + 17*2];\
2392 uint8_t * const halfH= ((uint8_t*)half) + 256;\
2393 uint8_t * const halfHV= ((uint8_t*)half);\
2394 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2395 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2396 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2398 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2399 uint64_t half[17*2];\
2400 uint8_t * const halfH= ((uint8_t*)half);\
2401 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2402 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2403 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2405 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2406 uint64_t half[17*2];\
2407 uint8_t * const halfH= ((uint8_t*)half);\
2408 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2409 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2410 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2412 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2413 uint64_t half[17*2];\
2414 uint8_t * const halfH= ((uint8_t*)half);\
2415 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2416 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2419 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
2420 #define AVG_3DNOW_OP(a,b,temp, size) \
2421 "mov" #size " " #b ", " #temp " \n\t"\
2422 "pavgusb " #temp ", " #a " \n\t"\
2423 "mov" #size " " #a ", " #b " \n\t"
2424 #define AVG_MMX2_OP(a,b,temp, size) \
2425 "mov" #size " " #b ", " #temp " \n\t"\
2426 "pavgb " #temp ", " #a " \n\t"\
2427 "mov" #size " " #a ", " #b " \n\t"
2429 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
2430 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
2431 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
2432 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
2433 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
2434 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
2435 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
2436 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
2437 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
2439 /***********************************/
2440 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
2442 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
2443 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2444 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
2446 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
2447 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2448 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
2451 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
2452 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
2453 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
2454 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
2455 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
2456 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
2457 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
2458 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
2459 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
2460 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
2461 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2462 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
2464 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2465 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
2467 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
2468 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
2469 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
2470 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
2471 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
2472 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
2473 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
2474 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
2476 QPEL_2TAP(put_, 16, mmx2)
2477 QPEL_2TAP(avg_, 16, mmx2)
2478 QPEL_2TAP(put_, 8, mmx2)
2479 QPEL_2TAP(avg_, 8, mmx2)
2480 QPEL_2TAP(put_, 16, 3dnow)
2481 QPEL_2TAP(avg_, 16, 3dnow)
2482 QPEL_2TAP(put_, 8, 3dnow)
2483 QPEL_2TAP(avg_, 8, 3dnow)
2487 static void just_return() { return; }
2490 #define SET_QPEL_FUNC(postfix1, postfix2) \
2491 c->put_ ## postfix1 = put_ ## postfix2;\
2492 c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
2493 c->avg_ ## postfix1 = avg_ ## postfix2;
2495 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
2496 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
2498 const int ix = ox>>(16+shift);
2499 const int iy = oy>>(16+shift);
2500 const int oxs = ox>>4;
2501 const int oys = oy>>4;
2502 const int dxxs = dxx>>4;
2503 const int dxys = dxy>>4;
2504 const int dyxs = dyx>>4;
2505 const int dyys = dyy>>4;
2506 const uint16_t r4[4] = {r,r,r,r};
2507 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
2508 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
2509 const uint64_t shift2 = 2*shift;
2510 uint8_t edge_buf[(h+1)*stride];
2513 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
2514 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
2515 const int dxh = dxy*(h-1);
2516 const int dyw = dyx*(w-1);
2517 if( // non-constant fullpel offset (3% of blocks)
2518 (ox^(ox+dxw) | ox^(ox+dxh) | ox^(ox+dxw+dxh) |
2519 oy^(oy+dyw) | oy^(oy+dyh) | oy^(oy+dyw+dyh)) >> (16+shift)
2520 // uses more than 16 bits of subpel mv (only at huge resolution)
2521 || (dxx|dxy|dyx|dyy)&15 )
2523 //FIXME could still use mmx for some of the rows
2524 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
2528 src += ix + iy*stride;
2529 if( (unsigned)ix >= width-w ||
2530 (unsigned)iy >= height-h )
2532 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
2537 "movd %0, %%mm6 \n\t"
2538 "pxor %%mm7, %%mm7 \n\t"
2539 "punpcklwd %%mm6, %%mm6 \n\t"
2540 "punpcklwd %%mm6, %%mm6 \n\t"
2544 for(x=0; x<w; x+=4){
2545 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
2546 oxs - dxys + dxxs*(x+1),
2547 oxs - dxys + dxxs*(x+2),
2548 oxs - dxys + dxxs*(x+3) };
2549 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
2550 oys - dyys + dyxs*(x+1),
2551 oys - dyys + dyxs*(x+2),
2552 oys - dyys + dyxs*(x+3) };
2556 "movq %0, %%mm4 \n\t"
2557 "movq %1, %%mm5 \n\t"
2558 "paddw %2, %%mm4 \n\t"
2559 "paddw %3, %%mm5 \n\t"
2560 "movq %%mm4, %0 \n\t"
2561 "movq %%mm5, %1 \n\t"
2562 "psrlw $12, %%mm4 \n\t"
2563 "psrlw $12, %%mm5 \n\t"
2564 : "+m"(*dx4), "+m"(*dy4)
2565 : "m"(*dxy4), "m"(*dyy4)
2569 "movq %%mm6, %%mm2 \n\t"
2570 "movq %%mm6, %%mm1 \n\t"
2571 "psubw %%mm4, %%mm2 \n\t"
2572 "psubw %%mm5, %%mm1 \n\t"
2573 "movq %%mm2, %%mm0 \n\t"
2574 "movq %%mm4, %%mm3 \n\t"
2575 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
2576 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
2577 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
2578 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
2580 "movd %4, %%mm5 \n\t"
2581 "movd %3, %%mm4 \n\t"
2582 "punpcklbw %%mm7, %%mm5 \n\t"
2583 "punpcklbw %%mm7, %%mm4 \n\t"
2584 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
2585 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
2587 "movd %2, %%mm5 \n\t"
2588 "movd %1, %%mm4 \n\t"
2589 "punpcklbw %%mm7, %%mm5 \n\t"
2590 "punpcklbw %%mm7, %%mm4 \n\t"
2591 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
2592 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
2593 "paddw %5, %%mm1 \n\t"
2594 "paddw %%mm3, %%mm2 \n\t"
2595 "paddw %%mm1, %%mm0 \n\t"
2596 "paddw %%mm2, %%mm0 \n\t"
2598 "psrlw %6, %%mm0 \n\t"
2599 "packuswb %%mm0, %%mm0 \n\t"
2600 "movd %%mm0, %0 \n\t"
2602 : "=m"(dst[x+y*stride])
2603 : "m"(src[0]), "m"(src[1]),
2604 "m"(src[stride]), "m"(src[stride+1]),
2605 "m"(*r4), "m"(shift2)
2613 #ifdef CONFIG_ENCODERS
2614 static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
2617 assert(FFABS(scale) < 256);
2618 scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2621 "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
2622 "psrlw $15, %%mm6 \n\t" // 1w
2623 "pxor %%mm7, %%mm7 \n\t"
2624 "movd %4, %%mm5 \n\t"
2625 "punpcklwd %%mm5, %%mm5 \n\t"
2626 "punpcklwd %%mm5, %%mm5 \n\t"
2628 "movq (%1, %0), %%mm0 \n\t"
2629 "movq 8(%1, %0), %%mm1 \n\t"
2630 "pmulhw %%mm5, %%mm0 \n\t"
2631 "pmulhw %%mm5, %%mm1 \n\t"
2632 "paddw %%mm6, %%mm0 \n\t"
2633 "paddw %%mm6, %%mm1 \n\t"
2634 "psraw $1, %%mm0 \n\t"
2635 "psraw $1, %%mm1 \n\t"
2636 "paddw (%2, %0), %%mm0 \n\t"
2637 "paddw 8(%2, %0), %%mm1 \n\t"
2638 "psraw $6, %%mm0 \n\t"
2639 "psraw $6, %%mm1 \n\t"
2640 "pmullw (%3, %0), %%mm0 \n\t"
2641 "pmullw 8(%3, %0), %%mm1 \n\t"
2642 "pmaddwd %%mm0, %%mm0 \n\t"
2643 "pmaddwd %%mm1, %%mm1 \n\t"
2644 "paddd %%mm1, %%mm0 \n\t"
2645 "psrld $4, %%mm0 \n\t"
2646 "paddd %%mm0, %%mm7 \n\t"
2648 "cmp $128, %0 \n\t" //FIXME optimize & bench
2650 "movq %%mm7, %%mm6 \n\t"
2651 "psrlq $32, %%mm7 \n\t"
2652 "paddd %%mm6, %%mm7 \n\t"
2653 "psrld $2, %%mm7 \n\t"
2654 "movd %%mm7, %0 \n\t"
2657 : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
2662 static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
2665 if(FFABS(scale) < 256){
2666 scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2668 "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
2669 "psrlw $15, %%mm6 \n\t" // 1w
2670 "movd %3, %%mm5 \n\t"
2671 "punpcklwd %%mm5, %%mm5 \n\t"
2672 "punpcklwd %%mm5, %%mm5 \n\t"
2674 "movq (%1, %0), %%mm0 \n\t"
2675 "movq 8(%1, %0), %%mm1 \n\t"
2676 "pmulhw %%mm5, %%mm0 \n\t"
2677 "pmulhw %%mm5, %%mm1 \n\t"
2678 "paddw %%mm6, %%mm0 \n\t"
2679 "paddw %%mm6, %%mm1 \n\t"
2680 "psraw $1, %%mm0 \n\t"
2681 "psraw $1, %%mm1 \n\t"
2682 "paddw (%2, %0), %%mm0 \n\t"
2683 "paddw 8(%2, %0), %%mm1 \n\t"
2684 "movq %%mm0, (%2, %0) \n\t"
2685 "movq %%mm1, 8(%2, %0) \n\t"
2687 "cmp $128, %0 \n\t" //FIXME optimize & bench
2691 : "r"(basis), "r"(rem), "g"(scale)
2694 for(i=0; i<8*8; i++){
2695 rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
2699 #endif /* CONFIG_ENCODERS */
2701 #define PREFETCH(name, op) \
2702 static void name(void *mem, int stride, int h){\
2703 const uint8_t *p= mem;\
2705 asm volatile(#op" %0" :: "m"(*p));\
2709 PREFETCH(prefetch_mmx2, prefetcht0)
2710 PREFETCH(prefetch_3dnow, prefetch)
2713 #include "h264dsp_mmx.c"
2716 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
2718 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
2719 put_pixels8_mmx(dst, src, stride, 8);
2721 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
2722 avg_pixels8_mmx(dst, src, stride, 8);
2724 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
2725 put_pixels16_mmx(dst, src, stride, 16);
2727 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
2728 avg_pixels16_mmx(dst, src, stride, 16);
2731 /* external functions, from idct_mmx.c */
2732 void ff_mmx_idct(DCTELEM *block);
2733 void ff_mmxext_idct(DCTELEM *block);
2735 void ff_vp3_idct_sse2(int16_t *input_data);
2736 void ff_vp3_idct_mmx(int16_t *data);
2737 void ff_vp3_dsp_init_mmx(void);
2739 /* XXX: those functions should be suppressed ASAP when all IDCTs are
2742 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2744 ff_mmx_idct (block);
2745 put_pixels_clamped_mmx(block, dest, line_size);
2747 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2749 ff_mmx_idct (block);
2750 add_pixels_clamped_mmx(block, dest, line_size);
2752 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2754 ff_mmxext_idct (block);
2755 put_pixels_clamped_mmx(block, dest, line_size);
2757 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2759 ff_mmxext_idct (block);
2760 add_pixels_clamped_mmx(block, dest, line_size);
2763 static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2765 ff_vp3_idct_sse2(block);
2766 put_signed_pixels_clamped_mmx(block, dest, line_size);
2768 static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2770 ff_vp3_idct_sse2(block);
2771 add_pixels_clamped_mmx(block, dest, line_size);
2773 static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2775 ff_vp3_idct_mmx(block);
2776 put_signed_pixels_clamped_mmx(block, dest, line_size);
2778 static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2780 ff_vp3_idct_mmx(block);
2781 add_pixels_clamped_mmx(block, dest, line_size);
2783 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
2785 ff_idct_xvid_mmx (block);
2786 put_pixels_clamped_mmx(block, dest, line_size);
2788 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
2790 ff_idct_xvid_mmx (block);
2791 add_pixels_clamped_mmx(block, dest, line_size);
2793 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
2795 ff_idct_xvid_mmx2 (block);
2796 put_pixels_clamped_mmx(block, dest, line_size);
2798 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
2800 ff_idct_xvid_mmx2 (block);
2801 add_pixels_clamped_mmx(block, dest, line_size);
2804 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
2807 asm volatile("pxor %%mm7, %%mm7":);
2808 for(i=0; i<blocksize; i+=2) {
2810 "movq %0, %%mm0 \n\t"
2811 "movq %1, %%mm1 \n\t"
2812 "movq %%mm0, %%mm2 \n\t"
2813 "movq %%mm1, %%mm3 \n\t"
2814 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
2815 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
2816 "pslld $31, %%mm2 \n\t" // keep only the sign bit
2817 "pxor %%mm2, %%mm1 \n\t"
2818 "movq %%mm3, %%mm4 \n\t"
2819 "pand %%mm1, %%mm3 \n\t"
2820 "pandn %%mm1, %%mm4 \n\t"
2821 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
2822 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
2823 "movq %%mm3, %1 \n\t"
2824 "movq %%mm0, %0 \n\t"
2825 :"+m"(mag[i]), "+m"(ang[i])
2829 asm volatile("femms");
2831 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
2836 "movaps %0, %%xmm5 \n\t"
2837 ::"m"(ff_pdw_80000000[0])
2839 for(i=0; i<blocksize; i+=4) {
2841 "movaps %0, %%xmm0 \n\t"
2842 "movaps %1, %%xmm1 \n\t"
2843 "xorps %%xmm2, %%xmm2 \n\t"
2844 "xorps %%xmm3, %%xmm3 \n\t"
2845 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
2846 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
2847 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
2848 "xorps %%xmm2, %%xmm1 \n\t"
2849 "movaps %%xmm3, %%xmm4 \n\t"
2850 "andps %%xmm1, %%xmm3 \n\t"
2851 "andnps %%xmm1, %%xmm4 \n\t"
2852 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
2853 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
2854 "movaps %%xmm3, %1 \n\t"
2855 "movaps %%xmm0, %0 \n\t"
2856 :"+m"(mag[i]), "+m"(ang[i])
2862 static void vector_fmul_3dnow(float *dst, const float *src, int len){
2866 "movq (%1,%0), %%mm0 \n\t"
2867 "movq 8(%1,%0), %%mm1 \n\t"
2868 "pfmul (%2,%0), %%mm0 \n\t"
2869 "pfmul 8(%2,%0), %%mm1 \n\t"
2870 "movq %%mm0, (%1,%0) \n\t"
2871 "movq %%mm1, 8(%1,%0) \n\t"
2880 static void vector_fmul_sse(float *dst, const float *src, int len){
2884 "movaps (%1,%0), %%xmm0 \n\t"
2885 "movaps 16(%1,%0), %%xmm1 \n\t"
2886 "mulps (%2,%0), %%xmm0 \n\t"
2887 "mulps 16(%2,%0), %%xmm1 \n\t"
2888 "movaps %%xmm0, (%1,%0) \n\t"
2889 "movaps %%xmm1, 16(%1,%0) \n\t"
2898 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2902 "pswapd 8(%1), %%mm0 \n\t"
2903 "pswapd (%1), %%mm1 \n\t"
2904 "pfmul (%3,%0), %%mm0 \n\t"
2905 "pfmul 8(%3,%0), %%mm1 \n\t"
2906 "movq %%mm0, (%2,%0) \n\t"
2907 "movq %%mm1, 8(%2,%0) \n\t"
2911 :"+r"(i), "+r"(src1)
2912 :"r"(dst), "r"(src0)
2914 asm volatile("femms");
2916 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2920 "movaps 16(%1), %%xmm0 \n\t"
2921 "movaps (%1), %%xmm1 \n\t"
2922 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2923 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2924 "mulps (%3,%0), %%xmm0 \n\t"
2925 "mulps 16(%3,%0), %%xmm1 \n\t"
2926 "movaps %%xmm0, (%2,%0) \n\t"
2927 "movaps %%xmm1, 16(%2,%0) \n\t"
2931 :"+r"(i), "+r"(src1)
2932 :"r"(dst), "r"(src0)
2936 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
2937 const float *src2, int src3, int len, int step){
2939 if(step == 2 && src3 == 0){
2943 "movq (%2,%0), %%mm0 \n\t"
2944 "movq 8(%2,%0), %%mm1 \n\t"
2945 "pfmul (%3,%0), %%mm0 \n\t"
2946 "pfmul 8(%3,%0), %%mm1 \n\t"
2947 "pfadd (%4,%0), %%mm0 \n\t"
2948 "pfadd 8(%4,%0), %%mm1 \n\t"
2949 "movd %%mm0, (%1) \n\t"
2950 "movd %%mm1, 16(%1) \n\t"
2951 "psrlq $32, %%mm0 \n\t"
2952 "psrlq $32, %%mm1 \n\t"
2953 "movd %%mm0, 8(%1) \n\t"
2954 "movd %%mm1, 24(%1) \n\t"
2959 :"r"(src0), "r"(src1), "r"(src2)
2963 else if(step == 1 && src3 == 0){
2966 "movq (%2,%0), %%mm0 \n\t"
2967 "movq 8(%2,%0), %%mm1 \n\t"
2968 "pfmul (%3,%0), %%mm0 \n\t"
2969 "pfmul 8(%3,%0), %%mm1 \n\t"
2970 "pfadd (%4,%0), %%mm0 \n\t"
2971 "pfadd 8(%4,%0), %%mm1 \n\t"
2972 "movq %%mm0, (%1,%0) \n\t"
2973 "movq %%mm1, 8(%1,%0) \n\t"
2977 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2982 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2983 asm volatile("femms");
2985 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
2986 const float *src2, int src3, int len, int step){
2988 if(step == 2 && src3 == 0){
2992 "movaps (%2,%0), %%xmm0 \n\t"
2993 "movaps 16(%2,%0), %%xmm1 \n\t"
2994 "mulps (%3,%0), %%xmm0 \n\t"
2995 "mulps 16(%3,%0), %%xmm1 \n\t"
2996 "addps (%4,%0), %%xmm0 \n\t"
2997 "addps 16(%4,%0), %%xmm1 \n\t"
2998 "movss %%xmm0, (%1) \n\t"
2999 "movss %%xmm1, 32(%1) \n\t"
3000 "movhlps %%xmm0, %%xmm2 \n\t"
3001 "movhlps %%xmm1, %%xmm3 \n\t"
3002 "movss %%xmm2, 16(%1) \n\t"
3003 "movss %%xmm3, 48(%1) \n\t"
3004 "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
3005 "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
3006 "movss %%xmm0, 8(%1) \n\t"
3007 "movss %%xmm1, 40(%1) \n\t"
3008 "movhlps %%xmm0, %%xmm2 \n\t"
3009 "movhlps %%xmm1, %%xmm3 \n\t"
3010 "movss %%xmm2, 24(%1) \n\t"
3011 "movss %%xmm3, 56(%1) \n\t"
3016 :"r"(src0), "r"(src1), "r"(src2)
3020 else if(step == 1 && src3 == 0){
3023 "movaps (%2,%0), %%xmm0 \n\t"
3024 "movaps 16(%2,%0), %%xmm1 \n\t"
3025 "mulps (%3,%0), %%xmm0 \n\t"
3026 "mulps 16(%3,%0), %%xmm1 \n\t"
3027 "addps (%4,%0), %%xmm0 \n\t"
3028 "addps 16(%4,%0), %%xmm1 \n\t"
3029 "movaps %%xmm0, (%1,%0) \n\t"
3030 "movaps %%xmm1, 16(%1,%0) \n\t"
3034 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
3039 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
3042 static void float_to_int16_3dnow(int16_t *dst, const float *src, int len){
3043 // not bit-exact: pf2id uses different rounding than C and SSE
3045 for(i=0; i<len; i+=4) {
3047 "pf2id %1, %%mm0 \n\t"
3048 "pf2id %2, %%mm1 \n\t"
3049 "packssdw %%mm1, %%mm0 \n\t"
3050 "movq %%mm0, %0 \n\t"
3052 :"m"(src[i]), "m"(src[i+2])
3055 asm volatile("femms");
3057 static void float_to_int16_sse(int16_t *dst, const float *src, int len){
3059 for(i=0; i<len; i+=4) {
3061 "cvtps2pi %1, %%mm0 \n\t"
3062 "cvtps2pi %2, %%mm1 \n\t"
3063 "packssdw %%mm1, %%mm0 \n\t"
3064 "movq %%mm0, %0 \n\t"
3066 :"m"(src[i]), "m"(src[i+2])
3069 asm volatile("emms");
3072 #ifdef CONFIG_SNOW_DECODER
3073 extern void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width);
3074 extern void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width);
3075 extern void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
3076 extern void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
3077 extern void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
3078 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
3079 extern void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
3080 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
3083 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
3085 mm_flags = mm_support();
3087 if (avctx->dsp_mask) {
3088 if (avctx->dsp_mask & FF_MM_FORCE)
3089 mm_flags |= (avctx->dsp_mask & 0xffff);
3091 mm_flags &= ~(avctx->dsp_mask & 0xffff);
3095 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
3096 if (mm_flags & MM_MMX)
3097 av_log(avctx, AV_LOG_INFO, " mmx");
3098 if (mm_flags & MM_MMXEXT)
3099 av_log(avctx, AV_LOG_INFO, " mmxext");
3100 if (mm_flags & MM_3DNOW)
3101 av_log(avctx, AV_LOG_INFO, " 3dnow");
3102 if (mm_flags & MM_SSE)
3103 av_log(avctx, AV_LOG_INFO, " sse");
3104 if (mm_flags & MM_SSE2)
3105 av_log(avctx, AV_LOG_INFO, " sse2");
3106 av_log(avctx, AV_LOG_INFO, "\n");
3109 if (mm_flags & MM_MMX) {
3110 const int idct_algo= avctx->idct_algo;
3112 #ifdef CONFIG_ENCODERS
3113 const int dct_algo = avctx->dct_algo;
3114 if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
3115 if(mm_flags & MM_SSE2){
3116 c->fdct = ff_fdct_sse2;
3117 }else if(mm_flags & MM_MMXEXT){
3118 c->fdct = ff_fdct_mmx2;
3120 c->fdct = ff_fdct_mmx;
3123 #endif //CONFIG_ENCODERS
3124 if(avctx->lowres==0){
3125 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
3126 c->idct_put= ff_simple_idct_put_mmx;
3127 c->idct_add= ff_simple_idct_add_mmx;
3128 c->idct = ff_simple_idct_mmx;
3129 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
3131 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
3132 if(mm_flags & MM_MMXEXT){
3133 c->idct_put= ff_libmpeg2mmx2_idct_put;
3134 c->idct_add= ff_libmpeg2mmx2_idct_add;
3135 c->idct = ff_mmxext_idct;
3137 c->idct_put= ff_libmpeg2mmx_idct_put;
3138 c->idct_add= ff_libmpeg2mmx_idct_add;
3139 c->idct = ff_mmx_idct;
3141 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
3143 }else if(idct_algo==FF_IDCT_VP3 &&
3144 avctx->codec->id!=CODEC_ID_THEORA &&
3145 !(avctx->flags & CODEC_FLAG_BITEXACT)){
3146 if(mm_flags & MM_SSE2){
3147 c->idct_put= ff_vp3_idct_put_sse2;
3148 c->idct_add= ff_vp3_idct_add_sse2;
3149 c->idct = ff_vp3_idct_sse2;
3150 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
3152 ff_vp3_dsp_init_mmx();
3153 c->idct_put= ff_vp3_idct_put_mmx;
3154 c->idct_add= ff_vp3_idct_add_mmx;
3155 c->idct = ff_vp3_idct_mmx;
3156 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
3158 }else if(idct_algo==FF_IDCT_CAVS){
3159 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
3160 }else if(idct_algo==FF_IDCT_XVIDMMX){
3161 if(mm_flags & MM_MMXEXT){
3162 c->idct_put= ff_idct_xvid_mmx2_put;
3163 c->idct_add= ff_idct_xvid_mmx2_add;
3164 c->idct = ff_idct_xvid_mmx2;
3166 c->idct_put= ff_idct_xvid_mmx_put;
3167 c->idct_add= ff_idct_xvid_mmx_add;
3168 c->idct = ff_idct_xvid_mmx;
3173 #ifdef CONFIG_ENCODERS
3174 c->get_pixels = get_pixels_mmx;
3175 c->diff_pixels = diff_pixels_mmx;
3176 #endif //CONFIG_ENCODERS
3177 c->put_pixels_clamped = put_pixels_clamped_mmx;
3178 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
3179 c->add_pixels_clamped = add_pixels_clamped_mmx;
3180 c->clear_blocks = clear_blocks_mmx;
3181 #ifdef CONFIG_ENCODERS
3182 c->pix_sum = pix_sum16_mmx;
3183 #endif //CONFIG_ENCODERS
3185 c->put_pixels_tab[0][0] = put_pixels16_mmx;
3186 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
3187 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
3188 c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
3190 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
3191 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
3192 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
3193 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
3195 c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
3196 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
3197 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
3198 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
3200 c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
3201 c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
3202 c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
3203 c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
3205 c->put_pixels_tab[1][0] = put_pixels8_mmx;
3206 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
3207 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
3208 c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
3210 c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
3211 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
3212 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
3213 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
3215 c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
3216 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
3217 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
3218 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
3220 c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
3221 c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
3222 c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
3223 c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
3227 c->add_bytes= add_bytes_mmx;
3228 #ifdef CONFIG_ENCODERS
3229 c->diff_bytes= diff_bytes_mmx;
3231 c->hadamard8_diff[0]= hadamard8_diff16_mmx;
3232 c->hadamard8_diff[1]= hadamard8_diff_mmx;
3234 c->pix_norm1 = pix_norm1_mmx;
3235 c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
3236 c->sse[1] = sse8_mmx;
3237 c->vsad[4]= vsad_intra16_mmx;
3239 c->nsse[0] = nsse16_mmx;
3240 c->nsse[1] = nsse8_mmx;
3241 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3242 c->vsad[0] = vsad16_mmx;
3245 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3246 c->try_8x8basis= try_8x8basis_mmx;
3248 c->add_8x8basis= add_8x8basis_mmx;
3250 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
3252 #endif //CONFIG_ENCODERS
3254 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
3255 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
3256 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
3257 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
3259 c->h264_idct_dc_add=
3260 c->h264_idct_add= ff_h264_idct_add_mmx;
3261 c->h264_idct8_dc_add=
3262 c->h264_idct8_add= ff_h264_idct8_add_mmx;
3264 if (mm_flags & MM_MMXEXT) {
3265 c->prefetch = prefetch_mmx2;
3267 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
3268 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
3270 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
3271 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
3272 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
3274 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
3275 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
3277 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
3278 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
3279 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
3281 #ifdef CONFIG_ENCODERS
3282 c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
3283 c->hadamard8_diff[1]= hadamard8_diff_mmx2;
3284 c->vsad[4]= vsad_intra16_mmx2;
3285 #endif //CONFIG_ENCODERS
3287 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
3288 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
3290 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3291 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
3292 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
3293 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
3294 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
3295 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
3296 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
3297 #ifdef CONFIG_ENCODERS
3298 c->vsad[0] = vsad16_mmx2;
3299 #endif //CONFIG_ENCODERS
3303 SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
3304 SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
3305 SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
3306 SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
3307 SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
3308 SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
3309 SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
3310 SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
3311 SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
3312 SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
3313 SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
3314 SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
3315 SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
3316 SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
3317 SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
3318 SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
3319 SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
3320 SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
3321 SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
3322 SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
3323 SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
3324 SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
3325 SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
3326 SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
3327 SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
3328 SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
3329 SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
3330 SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
3331 SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
3332 SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
3333 SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
3334 SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
3338 #define dspfunc(PFX, IDX, NUM) \
3339 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \
3340 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \
3341 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \
3342 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \
3343 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \
3344 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \
3345 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \
3346 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \
3347 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \
3348 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \
3349 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \
3350 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \
3351 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \
3352 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \
3353 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \
3354 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
3356 dspfunc(put_h264_qpel, 0, 16);
3357 dspfunc(put_h264_qpel, 1, 8);
3358 dspfunc(put_h264_qpel, 2, 4);
3359 dspfunc(avg_h264_qpel, 0, 16);
3360 dspfunc(avg_h264_qpel, 1, 8);
3361 dspfunc(avg_h264_qpel, 2, 4);
3363 dspfunc(put_2tap_qpel, 0, 16);
3364 dspfunc(put_2tap_qpel, 1, 8);
3365 dspfunc(avg_2tap_qpel, 0, 16);
3366 dspfunc(avg_2tap_qpel, 1, 8);
3369 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
3370 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
3371 c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
3372 c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
3373 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
3374 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
3375 c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
3376 c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
3377 c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
3378 c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
3379 c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
3381 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
3382 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
3383 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
3384 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
3385 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
3386 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
3387 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
3388 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
3390 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
3391 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
3392 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
3393 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
3394 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
3395 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
3396 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
3397 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
3399 #ifdef CONFIG_CAVS_DECODER
3400 ff_cavsdsp_init_mmx2(c, avctx);
3403 #ifdef CONFIG_ENCODERS
3404 c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
3405 #endif //CONFIG_ENCODERS
3406 } else if (mm_flags & MM_3DNOW) {
3407 c->prefetch = prefetch_3dnow;
3409 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
3410 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
3412 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
3413 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
3414 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
3416 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
3417 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
3419 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
3420 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
3421 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
3423 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3424 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
3425 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
3426 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
3427 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
3428 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
3429 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
3432 SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
3433 SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
3434 SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
3435 SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
3436 SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
3437 SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
3438 SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
3439 SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
3440 SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
3441 SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
3442 SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
3443 SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
3444 SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
3445 SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
3446 SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
3447 SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
3448 SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
3449 SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
3450 SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
3451 SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
3452 SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
3453 SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
3454 SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
3455 SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
3456 SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
3457 SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
3458 SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
3459 SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
3460 SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
3461 SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
3462 SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
3463 SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
3465 #define dspfunc(PFX, IDX, NUM) \
3466 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \
3467 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \
3468 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \
3469 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \
3470 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \
3471 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \
3472 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \
3473 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \
3474 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \
3475 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \
3476 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \
3477 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \
3478 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \
3479 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \
3480 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \
3481 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
3483 dspfunc(put_h264_qpel, 0, 16);
3484 dspfunc(put_h264_qpel, 1, 8);
3485 dspfunc(put_h264_qpel, 2, 4);
3486 dspfunc(avg_h264_qpel, 0, 16);
3487 dspfunc(avg_h264_qpel, 1, 8);
3488 dspfunc(avg_h264_qpel, 2, 4);
3490 dspfunc(put_2tap_qpel, 0, 16);
3491 dspfunc(put_2tap_qpel, 1, 8);
3492 dspfunc(avg_2tap_qpel, 0, 16);
3493 dspfunc(avg_2tap_qpel, 1, 8);
3495 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
3496 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
3499 #ifdef CONFIG_SNOW_DECODER
3500 if(mm_flags & MM_SSE2){
3501 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
3502 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
3503 c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
3506 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
3507 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
3508 c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
3512 if(mm_flags & MM_3DNOW){
3513 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
3514 c->vector_fmul = vector_fmul_3dnow;
3515 if(!(avctx->flags & CODEC_FLAG_BITEXACT))
3516 c->float_to_int16 = float_to_int16_3dnow;
3518 if(mm_flags & MM_3DNOWEXT)
3519 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
3520 if(mm_flags & MM_SSE){
3521 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
3522 c->vector_fmul = vector_fmul_sse;
3523 c->float_to_int16 = float_to_int16_sse;
3524 c->vector_fmul_reverse = vector_fmul_reverse_sse;
3525 c->vector_fmul_add_add = vector_fmul_add_add_sse;
3527 if(mm_flags & MM_3DNOW)
3528 c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
3531 #ifdef CONFIG_ENCODERS
3532 dsputil_init_pix_mmx(c, avctx);
3533 #endif //CONFIG_ENCODERS
3535 // for speed testing
3536 get_pixels = just_return;
3537 put_pixels_clamped = just_return;
3538 add_pixels_clamped = just_return;
3540 pix_abs16x16 = just_return;
3541 pix_abs16x16_x2 = just_return;
3542 pix_abs16x16_y2 = just_return;
3543 pix_abs16x16_xy2 = just_return;
3545 put_pixels_tab[0] = just_return;
3546 put_pixels_tab[1] = just_return;
3547 put_pixels_tab[2] = just_return;
3548 put_pixels_tab[3] = just_return;
3550 put_no_rnd_pixels_tab[0] = just_return;
3551 put_no_rnd_pixels_tab[1] = just_return;
3552 put_no_rnd_pixels_tab[2] = just_return;
3553 put_no_rnd_pixels_tab[3] = just_return;
3555 avg_pixels_tab[0] = just_return;
3556 avg_pixels_tab[1] = just_return;
3557 avg_pixels_tab[2] = just_return;
3558 avg_pixels_tab[3] = just_return;
3560 avg_no_rnd_pixels_tab[0] = just_return;
3561 avg_no_rnd_pixels_tab[1] = just_return;
3562 avg_no_rnd_pixels_tab[2] = just_return;
3563 avg_no_rnd_pixels_tab[3] = just_return;
3565 //av_fdct = just_return;
3566 //ff_idct = just_return;