2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #if COMPILE_TEMPLATE_MMX2
26 #define PREFETCH "prefetchnta"
28 #define PREFETCH " # nop"
31 #if COMPILE_TEMPLATE_MMX2
32 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
34 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
36 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
38 #define YSCALEYUV2YV12X(offset, dest, end, pos) \
40 "movq "DITHER16"+0(%0), %%mm3 \n\t"\
41 "movq "DITHER16"+8(%0), %%mm4 \n\t"\
42 "lea " offset "(%0), %%"REG_d" \n\t"\
43 "mov (%%"REG_d"), %%"REG_S" \n\t"\
44 ".p2align 4 \n\t" /* FIXME Unroll? */\
46 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
47 "movq (%%"REG_S", %3, 2), %%mm2 \n\t" /* srcData */\
48 "movq 8(%%"REG_S", %3, 2), %%mm5 \n\t" /* srcData */\
49 "add $16, %%"REG_d" \n\t"\
50 "mov (%%"REG_d"), %%"REG_S" \n\t"\
51 "test %%"REG_S", %%"REG_S" \n\t"\
52 "pmulhw %%mm0, %%mm2 \n\t"\
53 "pmulhw %%mm0, %%mm5 \n\t"\
54 "paddw %%mm2, %%mm3 \n\t"\
55 "paddw %%mm5, %%mm4 \n\t"\
57 "psraw $3, %%mm3 \n\t"\
58 "psraw $3, %%mm4 \n\t"\
59 "packuswb %%mm4, %%mm3 \n\t"\
60 MOVNTQ(%%mm3, (%1, %3))\
63 "movq "DITHER16"+0(%0), %%mm3 \n\t"\
64 "movq "DITHER16"+8(%0), %%mm4 \n\t"\
65 "lea " offset "(%0), %%"REG_d" \n\t"\
66 "mov (%%"REG_d"), %%"REG_S" \n\t"\
68 :: "r" (&c->redDither),\
69 "r" (dest), "g" ((x86_reg)(end)), "r"((x86_reg)(pos))\
73 #if !COMPILE_TEMPLATE_MMX2
74 static av_always_inline void
75 dither_8to16(SwsContext *c, const uint8_t *srcDither, int rot)
78 __asm__ volatile("pxor %%mm0, %%mm0\n\t"
79 "movq (%0), %%mm3\n\t"
80 "movq %%mm3, %%mm4\n\t"
81 "psrlq $24, %%mm3\n\t"
82 "psllq $40, %%mm4\n\t"
83 "por %%mm4, %%mm3\n\t"
84 "movq %%mm3, %%mm4\n\t"
85 "punpcklbw %%mm0, %%mm3\n\t"
86 "punpckhbw %%mm0, %%mm4\n\t"
89 "movq %%mm3, "DITHER16"+0(%1)\n\t"
90 "movq %%mm4, "DITHER16"+8(%1)\n\t"
91 :: "r"(srcDither), "r"(&c->redDither)
94 __asm__ volatile("pxor %%mm0, %%mm0\n\t"
95 "movq (%0), %%mm3\n\t"
96 "movq %%mm3, %%mm4\n\t"
97 "punpcklbw %%mm0, %%mm3\n\t"
98 "punpckhbw %%mm0, %%mm4\n\t"
100 "psraw $4, %%mm4\n\t"
101 "movq %%mm3, "DITHER16"+0(%1)\n\t"
102 "movq %%mm4, "DITHER16"+8(%1)\n\t"
103 :: "r"(srcDither), "r"(&c->redDither)
109 static void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter,
110 const int16_t **lumSrc, int lumFilterSize,
111 const int16_t *chrFilter, const int16_t **chrUSrc,
112 const int16_t **chrVSrc,
113 int chrFilterSize, const int16_t **alpSrc,
114 uint8_t *dest[4], int dstW, int chrDstW)
117 uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
118 *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
119 const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8;
122 x86_reg uv_off = c->uv_offx2 >> 1;
123 dither_8to16(c, chrDither, 0);
124 YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0)
125 dither_8to16(c, chrDither, 1);
126 YSCALEYUV2YV12X(CHR_MMX_FILTER_OFFSET, vDest - uv_off, chrDstW + uv_off, uv_off)
128 dither_8to16(c, lumDither, 0);
129 if (CONFIG_SWSCALE_ALPHA && aDest) {
130 YSCALEYUV2YV12X(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0)
133 YSCALEYUV2YV12X(LUM_MMX_FILTER_OFFSET, yDest, dstW, 0)
136 #define YSCALEYUV2YV12X_ACCURATE(offset, dest, end, pos) \
138 "lea " offset "(%0), %%"REG_d" \n\t"\
139 "movq "DITHER32"+0(%0), %%mm4 \n\t"\
140 "movq "DITHER32"+8(%0), %%mm5 \n\t"\
141 "movq "DITHER32"+16(%0), %%mm6 \n\t"\
142 "movq "DITHER32"+24(%0), %%mm7 \n\t"\
143 "mov (%%"REG_d"), %%"REG_S" \n\t"\
146 "movq (%%"REG_S", %3, 2), %%mm0 \n\t" /* srcData */\
147 "movq 8(%%"REG_S", %3, 2), %%mm2 \n\t" /* srcData */\
148 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
149 "movq (%%"REG_S", %3, 2), %%mm1 \n\t" /* srcData */\
150 "movq %%mm0, %%mm3 \n\t"\
151 "punpcklwd %%mm1, %%mm0 \n\t"\
152 "punpckhwd %%mm1, %%mm3 \n\t"\
153 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
154 "pmaddwd %%mm1, %%mm0 \n\t"\
155 "pmaddwd %%mm1, %%mm3 \n\t"\
156 "paddd %%mm0, %%mm4 \n\t"\
157 "paddd %%mm3, %%mm5 \n\t"\
158 "movq 8(%%"REG_S", %3, 2), %%mm3 \n\t" /* srcData */\
159 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
160 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
161 "test %%"REG_S", %%"REG_S" \n\t"\
162 "movq %%mm2, %%mm0 \n\t"\
163 "punpcklwd %%mm3, %%mm2 \n\t"\
164 "punpckhwd %%mm3, %%mm0 \n\t"\
165 "pmaddwd %%mm1, %%mm2 \n\t"\
166 "pmaddwd %%mm1, %%mm0 \n\t"\
167 "paddd %%mm2, %%mm6 \n\t"\
168 "paddd %%mm0, %%mm7 \n\t"\
170 "psrad $19, %%mm4 \n\t"\
171 "psrad $19, %%mm5 \n\t"\
172 "psrad $19, %%mm6 \n\t"\
173 "psrad $19, %%mm7 \n\t"\
174 "packssdw %%mm5, %%mm4 \n\t"\
175 "packssdw %%mm7, %%mm6 \n\t"\
176 "packuswb %%mm6, %%mm4 \n\t"\
177 MOVNTQ(%%mm4, (%1, %3))\
180 "lea " offset "(%0), %%"REG_d" \n\t"\
181 "movq "DITHER32"+0(%0), %%mm4 \n\t"\
182 "movq "DITHER32"+8(%0), %%mm5 \n\t"\
183 "movq "DITHER32"+16(%0), %%mm6 \n\t"\
184 "movq "DITHER32"+24(%0), %%mm7 \n\t"\
185 "mov (%%"REG_d"), %%"REG_S" \n\t"\
187 :: "r" (&c->redDither),\
188 "r" (dest), "g" ((x86_reg)(end)), "r"((x86_reg)(pos))\
189 : "%"REG_a, "%"REG_d, "%"REG_S\
192 #if !COMPILE_TEMPLATE_MMX2
193 static av_always_inline void
194 dither_8to32(SwsContext *c, const uint8_t *srcDither, int rot)
197 if(rot) for(i=0; i<8; i++) c->dither32[i] = srcDither[(i+3)&7]<<12;
198 else for(i=0; i<8; i++) c->dither32[i] = srcDither[i&7]<<12;
202 __asm__ volatile("pxor %%mm0, %%mm0\n\t"
203 "movq (%0), %%mm4\n\t"
204 "movq %%mm4, %%mm5\n\t"
205 "psrlq $24, %%mm4\n\t"
206 "psllq $40, %%mm5\n\t"
207 "por %%mm5, %%mm4\n\t"
208 "movq %%mm4, %%mm6\n\t"
209 "punpcklbw %%mm0, %%mm4\n\t"
210 "punpckhbw %%mm0, %%mm6\n\t"
211 "movq %%mm4, %%mm5\n\t"
212 "movq %%mm6, %%mm7\n\t"
213 "punpcklwd %%mm0, %%mm4\n\t"
214 "punpckhwd %%mm0, %%mm5\n\t"
215 "punpcklwd %%mm0, %%mm6\n\t"
216 "punpckhwd %%mm0, %%mm7\n\t"
217 "psllw $12, %%mm4\n\t"
218 "psllw $12, %%mm5\n\t"
219 "psllw $12, %%mm6\n\t"
220 "psllw $12, %%mm7\n\t"
221 "movq %%mm4, "DITHER32"+0(%1)\n\t"
222 "movq %%mm5, "DITHER32"+8(%1)\n\t"
223 "movq %%mm6, "DITHER32"+16(%1)\n\t"
224 "movq %%mm7, "DITHER32"+24(%1)\n\t"
225 :: "r"(srcDither), "r"(&c->redDither)
228 __asm__ volatile("pxor %%mm0, %%mm0\n\t"
229 "movq (%0), %%mm4\n\t"
230 "movq %%mm4, %%mm6\n\t"
231 "punpcklbw %%mm0, %%mm4\n\t"
232 "punpckhbw %%mm0, %%mm6\n\t"
233 "movq %%mm4, %%mm5\n\t"
234 "movq %%mm6, %%mm7\n\t"
235 "punpcklwd %%mm0, %%mm4\n\t"
236 "punpckhwd %%mm0, %%mm5\n\t"
237 "punpcklwd %%mm0, %%mm6\n\t"
238 "punpckhwd %%mm0, %%mm7\n\t"
239 "psllw $12, %%mm4\n\t"
240 "psllw $12, %%mm5\n\t"
241 "psllw $12, %%mm6\n\t"
242 "psllw $12, %%mm7\n\t"
243 "movq %%mm4, "DITHER32"+0(%1)\n\t"
244 "movq %%mm5, "DITHER32"+8(%1)\n\t"
245 "movq %%mm6, "DITHER32"+16(%1)\n\t"
246 "movq %%mm7, "DITHER32"+24(%1)\n\t"
247 :: "r"(srcDither), "r"(&c->redDither)
253 static void RENAME(yuv2yuvX_ar)(SwsContext *c, const int16_t *lumFilter,
254 const int16_t **lumSrc, int lumFilterSize,
255 const int16_t *chrFilter, const int16_t **chrUSrc,
256 const int16_t **chrVSrc,
257 int chrFilterSize, const int16_t **alpSrc,
258 uint8_t *dest[4], int dstW, int chrDstW)
261 uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
262 *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
263 const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8;
266 x86_reg uv_off = c->uv_offx2 >> 1;
267 dither_8to32(c, chrDither, 0);
268 YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, uDest, chrDstW, 0)
269 dither_8to32(c, chrDither, 1);
270 YSCALEYUV2YV12X_ACCURATE(CHR_MMX_FILTER_OFFSET, vDest - uv_off, chrDstW + uv_off, uv_off)
272 dither_8to32(c, lumDither, 0);
273 if (CONFIG_SWSCALE_ALPHA && aDest) {
274 YSCALEYUV2YV12X_ACCURATE(ALP_MMX_FILTER_OFFSET, aDest, dstW, 0)
277 YSCALEYUV2YV12X_ACCURATE(LUM_MMX_FILTER_OFFSET, yDest, dstW, 0)
280 static void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc,
281 const int16_t *chrUSrc, const int16_t *chrVSrc,
282 const int16_t *alpSrc,
283 uint8_t *dst[4], int dstW, int chrDstW)
286 const int16_t *src[4]= {
287 lumSrc + dstW, chrUSrc + chrDstW,
288 chrVSrc + chrDstW, alpSrc + dstW
290 x86_reg counter[4]= { dstW, chrDstW, chrDstW, dstW };
295 "mov %2, %%"REG_a" \n\t"
296 ".p2align 4 \n\t" /* FIXME Unroll? */
298 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"
299 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"
300 "psraw $7, %%mm0 \n\t"
301 "psraw $7, %%mm1 \n\t"
302 "packuswb %%mm1, %%mm0 \n\t"
303 MOVNTQ(%%mm0, (%1, %%REGa))
304 "add $8, %%"REG_a" \n\t"
306 :: "r" (src[p]), "r" (dst[p] + counter[p]),
314 static void RENAME(yuv2yuv1_ar)(SwsContext *c, const int16_t *lumSrc,
315 const int16_t *chrUSrc, const int16_t *chrVSrc,
316 const int16_t *alpSrc,
317 uint8_t *dst[4], int dstW, int chrDstW)
320 const int16_t *src[4]= {
321 lumSrc + dstW, chrUSrc + chrDstW,
322 chrVSrc + chrDstW, alpSrc + dstW
324 x86_reg counter[4]= { dstW, chrDstW, chrDstW, dstW };
325 const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8;
330 for(i=0; i<8; i++) c->dither16[i] = (p == 2 || p == 3) ? lumDither[i] : chrDither[i];
332 "mov %2, %%"REG_a" \n\t"
333 "movq "DITHER16"+0(%3), %%mm6 \n\t"
334 "movq "DITHER16"+8(%3), %%mm7 \n\t"
335 ".p2align 4 \n\t" /* FIXME Unroll? */
337 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"
338 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"
339 "paddsw %%mm6, %%mm0 \n\t"
340 "paddsw %%mm7, %%mm1 \n\t"
341 "psraw $7, %%mm0 \n\t"
342 "psraw $7, %%mm1 \n\t"
343 "packuswb %%mm1, %%mm0 \n\t"
344 MOVNTQ(%%mm0, (%1, %%REGa))
345 "add $8, %%"REG_a" \n\t"
347 :: "r" (src[p]), "r" (dst[p] + counter[p]),
348 "g" (-counter[p]), "r"(&c->redDither)
355 #define YSCALEYUV2PACKEDX_UV \
357 "xor %%"REG_a", %%"REG_a" \n\t"\
361 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
362 "mov (%%"REG_d"), %%"REG_S" \n\t"\
363 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
364 "movq %%mm3, %%mm4 \n\t"\
367 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
368 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
369 "add %6, %%"REG_S" \n\t" \
370 "movq (%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
371 "add $16, %%"REG_d" \n\t"\
372 "mov (%%"REG_d"), %%"REG_S" \n\t"\
373 "pmulhw %%mm0, %%mm2 \n\t"\
374 "pmulhw %%mm0, %%mm5 \n\t"\
375 "paddw %%mm2, %%mm3 \n\t"\
376 "paddw %%mm5, %%mm4 \n\t"\
377 "test %%"REG_S", %%"REG_S" \n\t"\
380 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
381 "lea "offset"(%0), %%"REG_d" \n\t"\
382 "mov (%%"REG_d"), %%"REG_S" \n\t"\
383 "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
384 "movq "#dst1", "#dst2" \n\t"\
387 "movq 8(%%"REG_d"), "#coeff" \n\t" /* filterCoeff */\
388 "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" /* Y1srcData */\
389 "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" /* Y2srcData */\
390 "add $16, %%"REG_d" \n\t"\
391 "mov (%%"REG_d"), %%"REG_S" \n\t"\
392 "pmulhw "#coeff", "#src1" \n\t"\
393 "pmulhw "#coeff", "#src2" \n\t"\
394 "paddw "#src1", "#dst1" \n\t"\
395 "paddw "#src2", "#dst2" \n\t"\
396 "test %%"REG_S", %%"REG_S" \n\t"\
399 #define YSCALEYUV2PACKEDX \
400 YSCALEYUV2PACKEDX_UV \
401 YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
403 #define YSCALEYUV2PACKEDX_END \
404 :: "r" (&c->redDither), \
405 "m" (dummy), "m" (dummy), "m" (dummy),\
406 "r" (dest), "m" (dstW_reg), "m"(uv_off) \
407 : "%"REG_a, "%"REG_d, "%"REG_S \
410 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
412 "xor %%"REG_a", %%"REG_a" \n\t"\
416 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
417 "mov (%%"REG_d"), %%"REG_S" \n\t"\
418 "pxor %%mm4, %%mm4 \n\t"\
419 "pxor %%mm5, %%mm5 \n\t"\
420 "pxor %%mm6, %%mm6 \n\t"\
421 "pxor %%mm7, %%mm7 \n\t"\
424 "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
425 "add %6, %%"REG_S" \n\t" \
426 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
427 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
428 "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
429 "movq %%mm0, %%mm3 \n\t"\
430 "punpcklwd %%mm1, %%mm0 \n\t"\
431 "punpckhwd %%mm1, %%mm3 \n\t"\
432 "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
433 "pmaddwd %%mm1, %%mm0 \n\t"\
434 "pmaddwd %%mm1, %%mm3 \n\t"\
435 "paddd %%mm0, %%mm4 \n\t"\
436 "paddd %%mm3, %%mm5 \n\t"\
437 "add %6, %%"REG_S" \n\t" \
438 "movq (%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
439 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
440 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
441 "test %%"REG_S", %%"REG_S" \n\t"\
442 "movq %%mm2, %%mm0 \n\t"\
443 "punpcklwd %%mm3, %%mm2 \n\t"\
444 "punpckhwd %%mm3, %%mm0 \n\t"\
445 "pmaddwd %%mm1, %%mm2 \n\t"\
446 "pmaddwd %%mm1, %%mm0 \n\t"\
447 "paddd %%mm2, %%mm6 \n\t"\
448 "paddd %%mm0, %%mm7 \n\t"\
450 "psrad $16, %%mm4 \n\t"\
451 "psrad $16, %%mm5 \n\t"\
452 "psrad $16, %%mm6 \n\t"\
453 "psrad $16, %%mm7 \n\t"\
454 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
455 "packssdw %%mm5, %%mm4 \n\t"\
456 "packssdw %%mm7, %%mm6 \n\t"\
457 "paddw %%mm0, %%mm4 \n\t"\
458 "paddw %%mm0, %%mm6 \n\t"\
459 "movq %%mm4, "U_TEMP"(%0) \n\t"\
460 "movq %%mm6, "V_TEMP"(%0) \n\t"\
462 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
463 "lea "offset"(%0), %%"REG_d" \n\t"\
464 "mov (%%"REG_d"), %%"REG_S" \n\t"\
465 "pxor %%mm1, %%mm1 \n\t"\
466 "pxor %%mm5, %%mm5 \n\t"\
467 "pxor %%mm7, %%mm7 \n\t"\
468 "pxor %%mm6, %%mm6 \n\t"\
471 "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
472 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
473 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
474 "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
475 "movq %%mm0, %%mm3 \n\t"\
476 "punpcklwd %%mm4, %%mm0 \n\t"\
477 "punpckhwd %%mm4, %%mm3 \n\t"\
478 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
479 "pmaddwd %%mm4, %%mm0 \n\t"\
480 "pmaddwd %%mm4, %%mm3 \n\t"\
481 "paddd %%mm0, %%mm1 \n\t"\
482 "paddd %%mm3, %%mm5 \n\t"\
483 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
484 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
485 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
486 "test %%"REG_S", %%"REG_S" \n\t"\
487 "movq %%mm2, %%mm0 \n\t"\
488 "punpcklwd %%mm3, %%mm2 \n\t"\
489 "punpckhwd %%mm3, %%mm0 \n\t"\
490 "pmaddwd %%mm4, %%mm2 \n\t"\
491 "pmaddwd %%mm4, %%mm0 \n\t"\
492 "paddd %%mm2, %%mm7 \n\t"\
493 "paddd %%mm0, %%mm6 \n\t"\
495 "psrad $16, %%mm1 \n\t"\
496 "psrad $16, %%mm5 \n\t"\
497 "psrad $16, %%mm7 \n\t"\
498 "psrad $16, %%mm6 \n\t"\
499 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
500 "packssdw %%mm5, %%mm1 \n\t"\
501 "packssdw %%mm6, %%mm7 \n\t"\
502 "paddw %%mm0, %%mm1 \n\t"\
503 "paddw %%mm0, %%mm7 \n\t"\
504 "movq "U_TEMP"(%0), %%mm3 \n\t"\
505 "movq "V_TEMP"(%0), %%mm4 \n\t"\
507 #define YSCALEYUV2PACKEDX_ACCURATE \
508 YSCALEYUV2PACKEDX_ACCURATE_UV \
509 YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
511 #define YSCALEYUV2RGBX \
512 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
513 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
514 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
515 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
516 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
517 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
518 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
519 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
520 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
521 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
522 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
523 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
524 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
525 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
526 "paddw %%mm3, %%mm4 \n\t"\
527 "movq %%mm2, %%mm0 \n\t"\
528 "movq %%mm5, %%mm6 \n\t"\
529 "movq %%mm4, %%mm3 \n\t"\
530 "punpcklwd %%mm2, %%mm2 \n\t"\
531 "punpcklwd %%mm5, %%mm5 \n\t"\
532 "punpcklwd %%mm4, %%mm4 \n\t"\
533 "paddw %%mm1, %%mm2 \n\t"\
534 "paddw %%mm1, %%mm5 \n\t"\
535 "paddw %%mm1, %%mm4 \n\t"\
536 "punpckhwd %%mm0, %%mm0 \n\t"\
537 "punpckhwd %%mm6, %%mm6 \n\t"\
538 "punpckhwd %%mm3, %%mm3 \n\t"\
539 "paddw %%mm7, %%mm0 \n\t"\
540 "paddw %%mm7, %%mm6 \n\t"\
541 "paddw %%mm7, %%mm3 \n\t"\
542 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
543 "packuswb %%mm0, %%mm2 \n\t"\
544 "packuswb %%mm6, %%mm5 \n\t"\
545 "packuswb %%mm3, %%mm4 \n\t"\
547 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
548 "movq "#b", "#q2" \n\t" /* B */\
549 "movq "#r", "#t" \n\t" /* R */\
550 "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
551 "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
552 "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
553 "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
554 "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
555 "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
556 "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
557 "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
558 "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
559 "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
561 MOVNTQ( q0, (dst, index, 4))\
562 MOVNTQ( b, 8(dst, index, 4))\
563 MOVNTQ( q2, 16(dst, index, 4))\
564 MOVNTQ( q3, 24(dst, index, 4))\
566 "add $8, "#index" \n\t"\
567 "cmp "#dstw", "#index" \n\t"\
569 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
571 static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter,
572 const int16_t **lumSrc, int lumFilterSize,
573 const int16_t *chrFilter, const int16_t **chrUSrc,
574 const int16_t **chrVSrc,
575 int chrFilterSize, const int16_t **alpSrc,
576 uint8_t *dest, int dstW, int dstY)
579 x86_reg dstW_reg = dstW;
580 x86_reg uv_off = c->uv_offx2;
582 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
583 YSCALEYUV2PACKEDX_ACCURATE
585 "movq %%mm2, "U_TEMP"(%0) \n\t"
586 "movq %%mm4, "V_TEMP"(%0) \n\t"
587 "movq %%mm5, "Y_TEMP"(%0) \n\t"
588 YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
589 "movq "Y_TEMP"(%0), %%mm5 \n\t"
590 "psraw $3, %%mm1 \n\t"
591 "psraw $3, %%mm7 \n\t"
592 "packuswb %%mm7, %%mm1 \n\t"
593 WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
594 YSCALEYUV2PACKEDX_END
596 YSCALEYUV2PACKEDX_ACCURATE
598 "pcmpeqd %%mm7, %%mm7 \n\t"
599 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
600 YSCALEYUV2PACKEDX_END
604 static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter,
605 const int16_t **lumSrc, int lumFilterSize,
606 const int16_t *chrFilter, const int16_t **chrUSrc,
607 const int16_t **chrVSrc,
608 int chrFilterSize, const int16_t **alpSrc,
609 uint8_t *dest, int dstW, int dstY)
612 x86_reg dstW_reg = dstW;
613 x86_reg uv_off = c->uv_offx2;
615 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
618 YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
619 "psraw $3, %%mm1 \n\t"
620 "psraw $3, %%mm7 \n\t"
621 "packuswb %%mm7, %%mm1 \n\t"
622 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
623 YSCALEYUV2PACKEDX_END
627 "pcmpeqd %%mm7, %%mm7 \n\t"
628 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
629 YSCALEYUV2PACKEDX_END
633 #define REAL_WRITERGB16(dst, dstw, index) \
634 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
635 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
636 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
637 "psrlq $3, %%mm2 \n\t"\
639 "movq %%mm2, %%mm1 \n\t"\
640 "movq %%mm4, %%mm3 \n\t"\
642 "punpcklbw %%mm7, %%mm3 \n\t"\
643 "punpcklbw %%mm5, %%mm2 \n\t"\
644 "punpckhbw %%mm7, %%mm4 \n\t"\
645 "punpckhbw %%mm5, %%mm1 \n\t"\
647 "psllq $3, %%mm3 \n\t"\
648 "psllq $3, %%mm4 \n\t"\
650 "por %%mm3, %%mm2 \n\t"\
651 "por %%mm4, %%mm1 \n\t"\
653 MOVNTQ(%%mm2, (dst, index, 2))\
654 MOVNTQ(%%mm1, 8(dst, index, 2))\
656 "add $8, "#index" \n\t"\
657 "cmp "#dstw", "#index" \n\t"\
659 #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
661 static void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter,
662 const int16_t **lumSrc, int lumFilterSize,
663 const int16_t *chrFilter, const int16_t **chrUSrc,
664 const int16_t **chrVSrc,
665 int chrFilterSize, const int16_t **alpSrc,
666 uint8_t *dest, int dstW, int dstY)
669 x86_reg dstW_reg = dstW;
670 x86_reg uv_off = c->uv_offx2;
672 YSCALEYUV2PACKEDX_ACCURATE
674 "pxor %%mm7, %%mm7 \n\t"
675 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
677 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
678 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
679 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
681 WRITERGB16(%4, %5, %%REGa)
682 YSCALEYUV2PACKEDX_END
685 static void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter,
686 const int16_t **lumSrc, int lumFilterSize,
687 const int16_t *chrFilter, const int16_t **chrUSrc,
688 const int16_t **chrVSrc,
689 int chrFilterSize, const int16_t **alpSrc,
690 uint8_t *dest, int dstW, int dstY)
693 x86_reg dstW_reg = dstW;
694 x86_reg uv_off = c->uv_offx2;
698 "pxor %%mm7, %%mm7 \n\t"
699 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
701 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
702 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
703 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
705 WRITERGB16(%4, %5, %%REGa)
706 YSCALEYUV2PACKEDX_END
709 #define REAL_WRITERGB15(dst, dstw, index) \
710 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
711 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
712 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
713 "psrlq $3, %%mm2 \n\t"\
714 "psrlq $1, %%mm5 \n\t"\
716 "movq %%mm2, %%mm1 \n\t"\
717 "movq %%mm4, %%mm3 \n\t"\
719 "punpcklbw %%mm7, %%mm3 \n\t"\
720 "punpcklbw %%mm5, %%mm2 \n\t"\
721 "punpckhbw %%mm7, %%mm4 \n\t"\
722 "punpckhbw %%mm5, %%mm1 \n\t"\
724 "psllq $2, %%mm3 \n\t"\
725 "psllq $2, %%mm4 \n\t"\
727 "por %%mm3, %%mm2 \n\t"\
728 "por %%mm4, %%mm1 \n\t"\
730 MOVNTQ(%%mm2, (dst, index, 2))\
731 MOVNTQ(%%mm1, 8(dst, index, 2))\
733 "add $8, "#index" \n\t"\
734 "cmp "#dstw", "#index" \n\t"\
736 #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
738 static void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter,
739 const int16_t **lumSrc, int lumFilterSize,
740 const int16_t *chrFilter, const int16_t **chrUSrc,
741 const int16_t **chrVSrc,
742 int chrFilterSize, const int16_t **alpSrc,
743 uint8_t *dest, int dstW, int dstY)
746 x86_reg dstW_reg = dstW;
747 x86_reg uv_off = c->uv_offx2;
749 YSCALEYUV2PACKEDX_ACCURATE
751 "pxor %%mm7, %%mm7 \n\t"
752 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
754 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
755 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
756 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
758 WRITERGB15(%4, %5, %%REGa)
759 YSCALEYUV2PACKEDX_END
762 static void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter,
763 const int16_t **lumSrc, int lumFilterSize,
764 const int16_t *chrFilter, const int16_t **chrUSrc,
765 const int16_t **chrVSrc,
766 int chrFilterSize, const int16_t **alpSrc,
767 uint8_t *dest, int dstW, int dstY)
770 x86_reg dstW_reg = dstW;
771 x86_reg uv_off = c->uv_offx2;
775 "pxor %%mm7, %%mm7 \n\t"
776 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
778 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
779 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
780 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
782 WRITERGB15(%4, %5, %%REGa)
783 YSCALEYUV2PACKEDX_END
786 #define WRITEBGR24MMX(dst, dstw, index) \
787 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
788 "movq %%mm2, %%mm1 \n\t" /* B */\
789 "movq %%mm5, %%mm6 \n\t" /* R */\
790 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
791 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
792 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
793 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
794 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
795 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
796 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
797 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
798 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
799 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
801 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
802 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
803 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
804 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
806 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
807 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
808 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
809 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
811 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
812 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
813 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
814 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
816 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
817 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
818 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
819 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
820 MOVNTQ(%%mm0, (dst))\
822 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
823 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
824 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
825 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
826 MOVNTQ(%%mm6, 8(dst))\
828 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
829 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
830 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
831 MOVNTQ(%%mm5, 16(dst))\
833 "add $24, "#dst" \n\t"\
835 "add $8, "#index" \n\t"\
836 "cmp "#dstw", "#index" \n\t"\
839 #define WRITEBGR24MMX2(dst, dstw, index) \
840 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
841 "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
842 "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
843 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
844 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
845 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
847 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
848 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
849 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
851 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
852 "por %%mm1, %%mm6 \n\t"\
853 "por %%mm3, %%mm6 \n\t"\
854 MOVNTQ(%%mm6, (dst))\
856 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
857 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
858 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
859 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
861 "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
862 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
863 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
865 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
866 "por %%mm3, %%mm6 \n\t"\
867 MOVNTQ(%%mm6, 8(dst))\
869 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
870 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
871 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
873 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
874 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
875 "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
877 "por %%mm1, %%mm3 \n\t"\
878 "por %%mm3, %%mm6 \n\t"\
879 MOVNTQ(%%mm6, 16(dst))\
881 "add $24, "#dst" \n\t"\
883 "add $8, "#index" \n\t"\
884 "cmp "#dstw", "#index" \n\t"\
887 #if COMPILE_TEMPLATE_MMX2
889 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
892 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
895 static void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter,
896 const int16_t **lumSrc, int lumFilterSize,
897 const int16_t *chrFilter, const int16_t **chrUSrc,
898 const int16_t **chrVSrc,
899 int chrFilterSize, const int16_t **alpSrc,
900 uint8_t *dest, int dstW, int dstY)
903 x86_reg dstW_reg = dstW;
904 x86_reg uv_off = c->uv_offx2;
906 YSCALEYUV2PACKEDX_ACCURATE
908 "pxor %%mm7, %%mm7 \n\t"
909 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
910 "add %4, %%"REG_c" \n\t"
911 WRITEBGR24(%%REGc, %5, %%REGa)
912 :: "r" (&c->redDither),
913 "m" (dummy), "m" (dummy), "m" (dummy),
914 "r" (dest), "m" (dstW_reg), "m"(uv_off)
915 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
919 static void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter,
920 const int16_t **lumSrc, int lumFilterSize,
921 const int16_t *chrFilter, const int16_t **chrUSrc,
922 const int16_t **chrVSrc,
923 int chrFilterSize, const int16_t **alpSrc,
924 uint8_t *dest, int dstW, int dstY)
927 x86_reg dstW_reg = dstW;
928 x86_reg uv_off = c->uv_offx2;
932 "pxor %%mm7, %%mm7 \n\t"
933 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
934 "add %4, %%"REG_c" \n\t"
935 WRITEBGR24(%%REGc, %5, %%REGa)
936 :: "r" (&c->redDither),
937 "m" (dummy), "m" (dummy), "m" (dummy),
938 "r" (dest), "m" (dstW_reg), "m"(uv_off)
939 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
943 #define REAL_WRITEYUY2(dst, dstw, index) \
944 "packuswb %%mm3, %%mm3 \n\t"\
945 "packuswb %%mm4, %%mm4 \n\t"\
946 "packuswb %%mm7, %%mm1 \n\t"\
947 "punpcklbw %%mm4, %%mm3 \n\t"\
948 "movq %%mm1, %%mm7 \n\t"\
949 "punpcklbw %%mm3, %%mm1 \n\t"\
950 "punpckhbw %%mm3, %%mm7 \n\t"\
952 MOVNTQ(%%mm1, (dst, index, 2))\
953 MOVNTQ(%%mm7, 8(dst, index, 2))\
955 "add $8, "#index" \n\t"\
956 "cmp "#dstw", "#index" \n\t"\
958 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
960 static void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter,
961 const int16_t **lumSrc, int lumFilterSize,
962 const int16_t *chrFilter, const int16_t **chrUSrc,
963 const int16_t **chrVSrc,
964 int chrFilterSize, const int16_t **alpSrc,
965 uint8_t *dest, int dstW, int dstY)
968 x86_reg dstW_reg = dstW;
969 x86_reg uv_off = c->uv_offx2;
971 YSCALEYUV2PACKEDX_ACCURATE
972 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
973 "psraw $3, %%mm3 \n\t"
974 "psraw $3, %%mm4 \n\t"
975 "psraw $3, %%mm1 \n\t"
976 "psraw $3, %%mm7 \n\t"
977 WRITEYUY2(%4, %5, %%REGa)
978 YSCALEYUV2PACKEDX_END
981 static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter,
982 const int16_t **lumSrc, int lumFilterSize,
983 const int16_t *chrFilter, const int16_t **chrUSrc,
984 const int16_t **chrVSrc,
985 int chrFilterSize, const int16_t **alpSrc,
986 uint8_t *dest, int dstW, int dstY)
989 x86_reg dstW_reg = dstW;
990 x86_reg uv_off = c->uv_offx2;
993 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
994 "psraw $3, %%mm3 \n\t"
995 "psraw $3, %%mm4 \n\t"
996 "psraw $3, %%mm1 \n\t"
997 "psraw $3, %%mm7 \n\t"
998 WRITEYUY2(%4, %5, %%REGa)
999 YSCALEYUV2PACKEDX_END
1002 #define REAL_YSCALEYUV2RGB_UV(index, c) \
1003 "xor "#index", "#index" \n\t"\
1006 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
1007 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
1008 "add "UV_OFFx2"("#c"), "#index" \n\t" \
1009 "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
1010 "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
1011 "sub "UV_OFFx2"("#c"), "#index" \n\t" \
1012 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
1013 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
1014 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
1015 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
1016 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
1017 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1018 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1019 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
1020 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
1021 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
1022 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
1023 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
1024 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
1025 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
1026 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
1027 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1029 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
1030 "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
1031 "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
1032 "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
1033 "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
1034 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
1035 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
1036 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1037 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1038 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1039 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1040 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1041 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1043 #define REAL_YSCALEYUV2RGB_COEFF(c) \
1044 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
1045 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
1046 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
1047 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
1048 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
1049 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
1050 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1051 "paddw %%mm3, %%mm4 \n\t"\
1052 "movq %%mm2, %%mm0 \n\t"\
1053 "movq %%mm5, %%mm6 \n\t"\
1054 "movq %%mm4, %%mm3 \n\t"\
1055 "punpcklwd %%mm2, %%mm2 \n\t"\
1056 "punpcklwd %%mm5, %%mm5 \n\t"\
1057 "punpcklwd %%mm4, %%mm4 \n\t"\
1058 "paddw %%mm1, %%mm2 \n\t"\
1059 "paddw %%mm1, %%mm5 \n\t"\
1060 "paddw %%mm1, %%mm4 \n\t"\
1061 "punpckhwd %%mm0, %%mm0 \n\t"\
1062 "punpckhwd %%mm6, %%mm6 \n\t"\
1063 "punpckhwd %%mm3, %%mm3 \n\t"\
1064 "paddw %%mm7, %%mm0 \n\t"\
1065 "paddw %%mm7, %%mm6 \n\t"\
1066 "paddw %%mm7, %%mm3 \n\t"\
1067 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1068 "packuswb %%mm0, %%mm2 \n\t"\
1069 "packuswb %%mm6, %%mm5 \n\t"\
1070 "packuswb %%mm3, %%mm4 \n\t"\
1072 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
1074 #define YSCALEYUV2RGB(index, c) \
1075 REAL_YSCALEYUV2RGB_UV(index, c) \
1076 REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
1077 REAL_YSCALEYUV2RGB_COEFF(c)
1080 * vertical bilinear scale YV12 to RGB
1082 static void RENAME(yuv2rgb32_2)(SwsContext *c, const int16_t *buf[2],
1083 const int16_t *ubuf[2], const int16_t *vbuf[2],
1084 const int16_t *abuf[2], uint8_t *dest,
1085 int dstW, int yalpha, int uvalpha, int y)
1087 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1088 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1090 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1091 const int16_t *abuf0 = abuf[0], *abuf1 = abuf[1];
1094 YSCALEYUV2RGB(%%r8, %5)
1095 YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
1096 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1097 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1098 "packuswb %%mm7, %%mm1 \n\t"
1099 WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1100 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "r" (dest),
1101 "a" (&c->redDither),
1102 "r" (abuf0), "r" (abuf1)
1106 c->u_temp=(intptr_t)abuf0;
1107 c->v_temp=(intptr_t)abuf1;
1109 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1110 "mov %4, %%"REG_b" \n\t"
1111 "push %%"REG_BP" \n\t"
1112 YSCALEYUV2RGB(%%REGBP, %5)
1115 "mov "U_TEMP"(%5), %0 \n\t"
1116 "mov "V_TEMP"(%5), %1 \n\t"
1117 YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
1118 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1119 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1120 "packuswb %%mm7, %%mm1 \n\t"
1123 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1124 "pop %%"REG_BP" \n\t"
1125 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1126 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1132 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1133 "mov %4, %%"REG_b" \n\t"
1134 "push %%"REG_BP" \n\t"
1135 YSCALEYUV2RGB(%%REGBP, %5)
1136 "pcmpeqd %%mm7, %%mm7 \n\t"
1137 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1138 "pop %%"REG_BP" \n\t"
1139 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1140 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1146 static void RENAME(yuv2bgr24_2)(SwsContext *c, const int16_t *buf[2],
1147 const int16_t *ubuf[2], const int16_t *vbuf[2],
1148 const int16_t *abuf[2], uint8_t *dest,
1149 int dstW, int yalpha, int uvalpha, int y)
1151 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1152 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1154 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1156 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1157 "mov %4, %%"REG_b" \n\t"
1158 "push %%"REG_BP" \n\t"
1159 YSCALEYUV2RGB(%%REGBP, %5)
1160 "pxor %%mm7, %%mm7 \n\t"
1161 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1162 "pop %%"REG_BP" \n\t"
1163 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1164 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1169 static void RENAME(yuv2rgb555_2)(SwsContext *c, const int16_t *buf[2],
1170 const int16_t *ubuf[2], const int16_t *vbuf[2],
1171 const int16_t *abuf[2], uint8_t *dest,
1172 int dstW, int yalpha, int uvalpha, int y)
1174 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1175 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1177 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1179 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1180 "mov %4, %%"REG_b" \n\t"
1181 "push %%"REG_BP" \n\t"
1182 YSCALEYUV2RGB(%%REGBP, %5)
1183 "pxor %%mm7, %%mm7 \n\t"
1184 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1186 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1187 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1188 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1190 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1191 "pop %%"REG_BP" \n\t"
1192 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1193 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1198 static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2],
1199 const int16_t *ubuf[2], const int16_t *vbuf[2],
1200 const int16_t *abuf[2], uint8_t *dest,
1201 int dstW, int yalpha, int uvalpha, int y)
1203 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1204 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1206 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1208 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1209 "mov %4, %%"REG_b" \n\t"
1210 "push %%"REG_BP" \n\t"
1211 YSCALEYUV2RGB(%%REGBP, %5)
1212 "pxor %%mm7, %%mm7 \n\t"
1213 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1215 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1216 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1217 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1219 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1220 "pop %%"REG_BP" \n\t"
1221 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1222 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1227 #define REAL_YSCALEYUV2PACKED(index, c) \
1228 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
1229 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
1230 "psraw $3, %%mm0 \n\t"\
1231 "psraw $3, %%mm1 \n\t"\
1232 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
1233 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
1234 "xor "#index", "#index" \n\t"\
1237 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
1238 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
1239 "add "UV_OFFx2"("#c"), "#index" \n\t" \
1240 "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
1241 "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
1242 "sub "UV_OFFx2"("#c"), "#index" \n\t" \
1243 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
1244 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
1245 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
1246 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
1247 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
1248 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1249 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1250 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
1251 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
1252 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
1253 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
1254 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
1255 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
1256 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
1257 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
1258 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1259 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
1260 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1261 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1262 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1263 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
1265 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
1267 static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2],
1268 const int16_t *ubuf[2], const int16_t *vbuf[2],
1269 const int16_t *abuf[2], uint8_t *dest,
1270 int dstW, int yalpha, int uvalpha, int y)
1272 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1273 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1275 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1277 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1278 "mov %4, %%"REG_b" \n\t"
1279 "push %%"REG_BP" \n\t"
1280 YSCALEYUV2PACKED(%%REGBP, %5)
1281 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1282 "pop %%"REG_BP" \n\t"
1283 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1284 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1289 #define REAL_YSCALEYUV2RGB1(index, c) \
1290 "xor "#index", "#index" \n\t"\
1293 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
1294 "add "UV_OFFx2"("#c"), "#index" \n\t" \
1295 "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
1296 "sub "UV_OFFx2"("#c"), "#index" \n\t" \
1297 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
1298 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
1299 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
1300 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
1301 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
1302 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
1303 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
1304 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
1305 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1306 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
1307 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
1308 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1309 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1310 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
1311 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
1312 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
1313 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
1314 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
1315 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
1316 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1317 "paddw %%mm3, %%mm4 \n\t"\
1318 "movq %%mm2, %%mm0 \n\t"\
1319 "movq %%mm5, %%mm6 \n\t"\
1320 "movq %%mm4, %%mm3 \n\t"\
1321 "punpcklwd %%mm2, %%mm2 \n\t"\
1322 "punpcklwd %%mm5, %%mm5 \n\t"\
1323 "punpcklwd %%mm4, %%mm4 \n\t"\
1324 "paddw %%mm1, %%mm2 \n\t"\
1325 "paddw %%mm1, %%mm5 \n\t"\
1326 "paddw %%mm1, %%mm4 \n\t"\
1327 "punpckhwd %%mm0, %%mm0 \n\t"\
1328 "punpckhwd %%mm6, %%mm6 \n\t"\
1329 "punpckhwd %%mm3, %%mm3 \n\t"\
1330 "paddw %%mm7, %%mm0 \n\t"\
1331 "paddw %%mm7, %%mm6 \n\t"\
1332 "paddw %%mm7, %%mm3 \n\t"\
1333 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1334 "packuswb %%mm0, %%mm2 \n\t"\
1335 "packuswb %%mm6, %%mm5 \n\t"\
1336 "packuswb %%mm3, %%mm4 \n\t"\
1338 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
1340 // do vertical chrominance interpolation
1341 #define REAL_YSCALEYUV2RGB1b(index, c) \
1342 "xor "#index", "#index" \n\t"\
1345 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
1346 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
1347 "add "UV_OFFx2"("#c"), "#index" \n\t" \
1348 "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
1349 "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
1350 "sub "UV_OFFx2"("#c"), "#index" \n\t" \
1351 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
1352 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
1353 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
1354 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
1355 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
1356 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
1357 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
1358 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
1359 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
1360 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
1361 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
1362 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
1363 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
1364 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1365 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
1366 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
1367 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
1368 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
1369 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
1370 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
1371 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
1372 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
1373 "paddw %%mm3, %%mm4 \n\t"\
1374 "movq %%mm2, %%mm0 \n\t"\
1375 "movq %%mm5, %%mm6 \n\t"\
1376 "movq %%mm4, %%mm3 \n\t"\
1377 "punpcklwd %%mm2, %%mm2 \n\t"\
1378 "punpcklwd %%mm5, %%mm5 \n\t"\
1379 "punpcklwd %%mm4, %%mm4 \n\t"\
1380 "paddw %%mm1, %%mm2 \n\t"\
1381 "paddw %%mm1, %%mm5 \n\t"\
1382 "paddw %%mm1, %%mm4 \n\t"\
1383 "punpckhwd %%mm0, %%mm0 \n\t"\
1384 "punpckhwd %%mm6, %%mm6 \n\t"\
1385 "punpckhwd %%mm3, %%mm3 \n\t"\
1386 "paddw %%mm7, %%mm0 \n\t"\
1387 "paddw %%mm7, %%mm6 \n\t"\
1388 "paddw %%mm7, %%mm3 \n\t"\
1389 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
1390 "packuswb %%mm0, %%mm2 \n\t"\
1391 "packuswb %%mm6, %%mm5 \n\t"\
1392 "packuswb %%mm3, %%mm4 \n\t"\
1394 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
1396 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
1397 "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
1398 "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
1399 "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
1400 "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
1401 "packuswb %%mm1, %%mm7 \n\t"
1402 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
1405 * YV12 to RGB without scaling or interpolating
1407 static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0,
1408 const int16_t *ubuf[2], const int16_t *bguf[2],
1409 const int16_t *abuf0, uint8_t *dest,
1410 int dstW, int uvalpha, int y)
1412 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1413 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1415 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1416 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1418 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1419 "mov %4, %%"REG_b" \n\t"
1420 "push %%"REG_BP" \n\t"
1421 YSCALEYUV2RGB1(%%REGBP, %5)
1422 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1423 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1424 "pop %%"REG_BP" \n\t"
1425 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1426 :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1431 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1432 "mov %4, %%"REG_b" \n\t"
1433 "push %%"REG_BP" \n\t"
1434 YSCALEYUV2RGB1(%%REGBP, %5)
1435 "pcmpeqd %%mm7, %%mm7 \n\t"
1436 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1437 "pop %%"REG_BP" \n\t"
1438 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1439 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1444 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1446 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1447 "mov %4, %%"REG_b" \n\t"
1448 "push %%"REG_BP" \n\t"
1449 YSCALEYUV2RGB1b(%%REGBP, %5)
1450 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1451 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1452 "pop %%"REG_BP" \n\t"
1453 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1454 :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1459 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1460 "mov %4, %%"REG_b" \n\t"
1461 "push %%"REG_BP" \n\t"
1462 YSCALEYUV2RGB1b(%%REGBP, %5)
1463 "pcmpeqd %%mm7, %%mm7 \n\t"
1464 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1465 "pop %%"REG_BP" \n\t"
1466 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1467 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1474 static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0,
1475 const int16_t *ubuf[2], const int16_t *bguf[2],
1476 const int16_t *abuf0, uint8_t *dest,
1477 int dstW, int uvalpha, int y)
1479 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1480 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1482 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1484 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1485 "mov %4, %%"REG_b" \n\t"
1486 "push %%"REG_BP" \n\t"
1487 YSCALEYUV2RGB1(%%REGBP, %5)
1488 "pxor %%mm7, %%mm7 \n\t"
1489 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1490 "pop %%"REG_BP" \n\t"
1491 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1492 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1497 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1498 "mov %4, %%"REG_b" \n\t"
1499 "push %%"REG_BP" \n\t"
1500 YSCALEYUV2RGB1b(%%REGBP, %5)
1501 "pxor %%mm7, %%mm7 \n\t"
1502 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1503 "pop %%"REG_BP" \n\t"
1504 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1505 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1511 static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0,
1512 const int16_t *ubuf[2], const int16_t *bguf[2],
1513 const int16_t *abuf0, uint8_t *dest,
1514 int dstW, int uvalpha, int y)
1516 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1517 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1519 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1521 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1522 "mov %4, %%"REG_b" \n\t"
1523 "push %%"REG_BP" \n\t"
1524 YSCALEYUV2RGB1(%%REGBP, %5)
1525 "pxor %%mm7, %%mm7 \n\t"
1526 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1528 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1529 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1530 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1532 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1533 "pop %%"REG_BP" \n\t"
1534 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1535 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1540 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1541 "mov %4, %%"REG_b" \n\t"
1542 "push %%"REG_BP" \n\t"
1543 YSCALEYUV2RGB1b(%%REGBP, %5)
1544 "pxor %%mm7, %%mm7 \n\t"
1545 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1547 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1548 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1549 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1551 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1552 "pop %%"REG_BP" \n\t"
1553 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1554 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1560 static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0,
1561 const int16_t *ubuf[2], const int16_t *bguf[2],
1562 const int16_t *abuf0, uint8_t *dest,
1563 int dstW, int uvalpha, int y)
1565 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1566 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1568 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1570 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1571 "mov %4, %%"REG_b" \n\t"
1572 "push %%"REG_BP" \n\t"
1573 YSCALEYUV2RGB1(%%REGBP, %5)
1574 "pxor %%mm7, %%mm7 \n\t"
1575 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1577 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1578 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1579 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1581 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1582 "pop %%"REG_BP" \n\t"
1583 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1584 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1589 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1590 "mov %4, %%"REG_b" \n\t"
1591 "push %%"REG_BP" \n\t"
1592 YSCALEYUV2RGB1b(%%REGBP, %5)
1593 "pxor %%mm7, %%mm7 \n\t"
1594 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1596 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1597 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1598 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1600 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1601 "pop %%"REG_BP" \n\t"
1602 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1603 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1609 #define REAL_YSCALEYUV2PACKED1(index, c) \
1610 "xor "#index", "#index" \n\t"\
1613 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
1614 "add "UV_OFFx2"("#c"), "#index" \n\t" \
1615 "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
1616 "sub "UV_OFFx2"("#c"), "#index" \n\t" \
1617 "psraw $7, %%mm3 \n\t" \
1618 "psraw $7, %%mm4 \n\t" \
1619 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
1620 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
1621 "psraw $7, %%mm1 \n\t" \
1622 "psraw $7, %%mm7 \n\t" \
1624 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
1626 #define REAL_YSCALEYUV2PACKED1b(index, c) \
1627 "xor "#index", "#index" \n\t"\
1630 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
1631 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
1632 "add "UV_OFFx2"("#c"), "#index" \n\t" \
1633 "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
1634 "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
1635 "sub "UV_OFFx2"("#c"), "#index" \n\t" \
1636 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
1637 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
1638 "psrlw $8, %%mm3 \n\t" \
1639 "psrlw $8, %%mm4 \n\t" \
1640 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
1641 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
1642 "psraw $7, %%mm1 \n\t" \
1643 "psraw $7, %%mm7 \n\t"
1644 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
1646 static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0,
1647 const int16_t *ubuf[2], const int16_t *bguf[2],
1648 const int16_t *abuf0, uint8_t *dest,
1649 int dstW, int uvalpha, int y)
1651 const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1652 const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1654 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1656 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1657 "mov %4, %%"REG_b" \n\t"
1658 "push %%"REG_BP" \n\t"
1659 YSCALEYUV2PACKED1(%%REGBP, %5)
1660 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1661 "pop %%"REG_BP" \n\t"
1662 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1663 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1668 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1669 "mov %4, %%"REG_b" \n\t"
1670 "push %%"REG_BP" \n\t"
1671 YSCALEYUV2PACKED1b(%%REGBP, %5)
1672 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1673 "pop %%"REG_BP" \n\t"
1674 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1675 :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
1681 #if !COMPILE_TEMPLATE_MMX2
1682 //FIXME yuy2* can read up to 7 samples too much
1684 static void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src,
1685 int width, uint32_t *unused)
1688 "movq "MANGLE(bm01010101)", %%mm2 \n\t"
1689 "mov %0, %%"REG_a" \n\t"
1691 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1692 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1693 "pand %%mm2, %%mm0 \n\t"
1694 "pand %%mm2, %%mm1 \n\t"
1695 "packuswb %%mm1, %%mm0 \n\t"
1696 "movq %%mm0, (%2, %%"REG_a") \n\t"
1697 "add $8, %%"REG_a" \n\t"
1699 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1704 static void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV,
1705 const uint8_t *src1, const uint8_t *src2,
1706 int width, uint32_t *unused)
1709 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1710 "mov %0, %%"REG_a" \n\t"
1712 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1713 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1714 "psrlw $8, %%mm0 \n\t"
1715 "psrlw $8, %%mm1 \n\t"
1716 "packuswb %%mm1, %%mm0 \n\t"
1717 "movq %%mm0, %%mm1 \n\t"
1718 "psrlw $8, %%mm0 \n\t"
1719 "pand %%mm4, %%mm1 \n\t"
1720 "packuswb %%mm0, %%mm0 \n\t"
1721 "packuswb %%mm1, %%mm1 \n\t"
1722 "movd %%mm0, (%3, %%"REG_a") \n\t"
1723 "movd %%mm1, (%2, %%"REG_a") \n\t"
1724 "add $4, %%"REG_a" \n\t"
1726 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1729 assert(src1 == src2);
1732 /* This is almost identical to the previous, end exists only because
1733 * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1734 static void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src,
1735 int width, uint32_t *unused)
1738 "mov %0, %%"REG_a" \n\t"
1740 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1741 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1742 "psrlw $8, %%mm0 \n\t"
1743 "psrlw $8, %%mm1 \n\t"
1744 "packuswb %%mm1, %%mm0 \n\t"
1745 "movq %%mm0, (%2, %%"REG_a") \n\t"
1746 "add $8, %%"REG_a" \n\t"
1748 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1753 static void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV,
1754 const uint8_t *src1, const uint8_t *src2,
1755 int width, uint32_t *unused)
1758 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1759 "mov %0, %%"REG_a" \n\t"
1761 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1762 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1763 "pand %%mm4, %%mm0 \n\t"
1764 "pand %%mm4, %%mm1 \n\t"
1765 "packuswb %%mm1, %%mm0 \n\t"
1766 "movq %%mm0, %%mm1 \n\t"
1767 "psrlw $8, %%mm0 \n\t"
1768 "pand %%mm4, %%mm1 \n\t"
1769 "packuswb %%mm0, %%mm0 \n\t"
1770 "packuswb %%mm1, %%mm1 \n\t"
1771 "movd %%mm0, (%3, %%"REG_a") \n\t"
1772 "movd %%mm1, (%2, %%"REG_a") \n\t"
1773 "add $4, %%"REG_a" \n\t"
1775 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1778 assert(src1 == src2);
1781 static av_always_inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2,
1782 const uint8_t *src, int width)
1785 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1786 "mov %0, %%"REG_a" \n\t"
1788 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1789 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1790 "movq %%mm0, %%mm2 \n\t"
1791 "movq %%mm1, %%mm3 \n\t"
1792 "pand %%mm4, %%mm0 \n\t"
1793 "pand %%mm4, %%mm1 \n\t"
1794 "psrlw $8, %%mm2 \n\t"
1795 "psrlw $8, %%mm3 \n\t"
1796 "packuswb %%mm1, %%mm0 \n\t"
1797 "packuswb %%mm3, %%mm2 \n\t"
1798 "movq %%mm0, (%2, %%"REG_a") \n\t"
1799 "movq %%mm2, (%3, %%"REG_a") \n\t"
1800 "add $8, %%"REG_a" \n\t"
1802 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst1+width), "r" (dst2+width)
1807 static void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
1808 const uint8_t *src1, const uint8_t *src2,
1809 int width, uint32_t *unused)
1811 RENAME(nvXXtoUV)(dstU, dstV, src1, width);
1814 static void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
1815 const uint8_t *src1, const uint8_t *src2,
1816 int width, uint32_t *unused)
1818 RENAME(nvXXtoUV)(dstV, dstU, src1, width);
1820 #endif /* !COMPILE_TEMPLATE_MMX2 */
1822 static av_always_inline void RENAME(bgr24ToY_mmx)(int16_t *dst, const uint8_t *src,
1823 int width, enum PixelFormat srcFormat)
1826 if(srcFormat == PIX_FMT_BGR24) {
1828 "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
1829 "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
1834 "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
1835 "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
1841 "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
1842 "mov %2, %%"REG_a" \n\t"
1843 "pxor %%mm7, %%mm7 \n\t"
1845 PREFETCH" 64(%0) \n\t"
1846 "movd (%0), %%mm0 \n\t"
1847 "movd 2(%0), %%mm1 \n\t"
1848 "movd 6(%0), %%mm2 \n\t"
1849 "movd 8(%0), %%mm3 \n\t"
1851 "punpcklbw %%mm7, %%mm0 \n\t"
1852 "punpcklbw %%mm7, %%mm1 \n\t"
1853 "punpcklbw %%mm7, %%mm2 \n\t"
1854 "punpcklbw %%mm7, %%mm3 \n\t"
1855 "pmaddwd %%mm5, %%mm0 \n\t"
1856 "pmaddwd %%mm6, %%mm1 \n\t"
1857 "pmaddwd %%mm5, %%mm2 \n\t"
1858 "pmaddwd %%mm6, %%mm3 \n\t"
1859 "paddd %%mm1, %%mm0 \n\t"
1860 "paddd %%mm3, %%mm2 \n\t"
1861 "paddd %%mm4, %%mm0 \n\t"
1862 "paddd %%mm4, %%mm2 \n\t"
1863 "psrad $9, %%mm0 \n\t"
1864 "psrad $9, %%mm2 \n\t"
1865 "packssdw %%mm2, %%mm0 \n\t"
1866 "movq %%mm0, (%1, %%"REG_a") \n\t"
1867 "add $8, %%"REG_a" \n\t"
1870 : "r" (dst+width), "g" ((x86_reg)-2*width)
1875 static void RENAME(bgr24ToY)(int16_t *dst, const uint8_t *src,
1876 int width, uint32_t *unused)
1878 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1881 static void RENAME(rgb24ToY)(int16_t *dst, const uint8_t *src,
1882 int width, uint32_t *unused)
1884 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1887 static av_always_inline void RENAME(bgr24ToUV_mmx)(int16_t *dstU, int16_t *dstV,
1888 const uint8_t *src, int width,
1889 enum PixelFormat srcFormat)
1892 "movq 24(%4), %%mm6 \n\t"
1893 "mov %3, %%"REG_a" \n\t"
1894 "pxor %%mm7, %%mm7 \n\t"
1896 PREFETCH" 64(%0) \n\t"
1897 "movd (%0), %%mm0 \n\t"
1898 "movd 2(%0), %%mm1 \n\t"
1899 "punpcklbw %%mm7, %%mm0 \n\t"
1900 "punpcklbw %%mm7, %%mm1 \n\t"
1901 "movq %%mm0, %%mm2 \n\t"
1902 "movq %%mm1, %%mm3 \n\t"
1903 "pmaddwd (%4), %%mm0 \n\t"
1904 "pmaddwd 8(%4), %%mm1 \n\t"
1905 "pmaddwd 16(%4), %%mm2 \n\t"
1906 "pmaddwd %%mm6, %%mm3 \n\t"
1907 "paddd %%mm1, %%mm0 \n\t"
1908 "paddd %%mm3, %%mm2 \n\t"
1910 "movd 6(%0), %%mm1 \n\t"
1911 "movd 8(%0), %%mm3 \n\t"
1913 "punpcklbw %%mm7, %%mm1 \n\t"
1914 "punpcklbw %%mm7, %%mm3 \n\t"
1915 "movq %%mm1, %%mm4 \n\t"
1916 "movq %%mm3, %%mm5 \n\t"
1917 "pmaddwd (%4), %%mm1 \n\t"
1918 "pmaddwd 8(%4), %%mm3 \n\t"
1919 "pmaddwd 16(%4), %%mm4 \n\t"
1920 "pmaddwd %%mm6, %%mm5 \n\t"
1921 "paddd %%mm3, %%mm1 \n\t"
1922 "paddd %%mm5, %%mm4 \n\t"
1924 "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
1925 "paddd %%mm3, %%mm0 \n\t"
1926 "paddd %%mm3, %%mm2 \n\t"
1927 "paddd %%mm3, %%mm1 \n\t"
1928 "paddd %%mm3, %%mm4 \n\t"
1929 "psrad $9, %%mm0 \n\t"
1930 "psrad $9, %%mm2 \n\t"
1931 "psrad $9, %%mm1 \n\t"
1932 "psrad $9, %%mm4 \n\t"
1933 "packssdw %%mm1, %%mm0 \n\t"
1934 "packssdw %%mm4, %%mm2 \n\t"
1935 "movq %%mm0, (%1, %%"REG_a") \n\t"
1936 "movq %%mm2, (%2, %%"REG_a") \n\t"
1937 "add $8, %%"REG_a" \n\t"
1940 : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-2*width), "r"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24])
1945 static void RENAME(bgr24ToUV)(int16_t *dstU, int16_t *dstV,
1946 const uint8_t *src1, const uint8_t *src2,
1947 int width, uint32_t *unused)
1949 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1950 assert(src1 == src2);
1953 static void RENAME(rgb24ToUV)(int16_t *dstU, int16_t *dstV,
1954 const uint8_t *src1, const uint8_t *src2,
1955 int width, uint32_t *unused)
1958 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1961 #if !COMPILE_TEMPLATE_MMX2
1962 // bilinear / bicubic scaling
1963 static void RENAME(hScale)(SwsContext *c, int16_t *dst, int dstW,
1964 const uint8_t *src, const int16_t *filter,
1965 const int16_t *filterPos, int filterSize)
1967 assert(filterSize % 4 == 0 && filterSize>0);
1968 if (filterSize==4) { // Always true for upscaling, sometimes for down, too.
1969 x86_reg counter= -2*dstW;
1971 filterPos-= counter/2;
1975 "push %%"REG_b" \n\t"
1977 "pxor %%mm7, %%mm7 \n\t"
1978 "push %%"REG_BP" \n\t" // we use 7 regs here ...
1979 "mov %%"REG_a", %%"REG_BP" \n\t"
1982 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
1983 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
1984 "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
1985 "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
1986 "movd (%3, %%"REG_a"), %%mm0 \n\t"
1987 "movd (%3, %%"REG_b"), %%mm2 \n\t"
1988 "punpcklbw %%mm7, %%mm0 \n\t"
1989 "punpcklbw %%mm7, %%mm2 \n\t"
1990 "pmaddwd %%mm1, %%mm0 \n\t"
1991 "pmaddwd %%mm2, %%mm3 \n\t"
1992 "movq %%mm0, %%mm4 \n\t"
1993 "punpckldq %%mm3, %%mm0 \n\t"
1994 "punpckhdq %%mm3, %%mm4 \n\t"
1995 "paddd %%mm4, %%mm0 \n\t"
1996 "psrad $7, %%mm0 \n\t"
1997 "packssdw %%mm0, %%mm0 \n\t"
1998 "movd %%mm0, (%4, %%"REG_BP") \n\t"
1999 "add $4, %%"REG_BP" \n\t"
2002 "pop %%"REG_BP" \n\t"
2004 "pop %%"REG_b" \n\t"
2007 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2012 } else if (filterSize==8) {
2013 x86_reg counter= -2*dstW;
2015 filterPos-= counter/2;
2019 "push %%"REG_b" \n\t"
2021 "pxor %%mm7, %%mm7 \n\t"
2022 "push %%"REG_BP" \n\t" // we use 7 regs here ...
2023 "mov %%"REG_a", %%"REG_BP" \n\t"
2026 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2027 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
2028 "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
2029 "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
2030 "movd (%3, %%"REG_a"), %%mm0 \n\t"
2031 "movd (%3, %%"REG_b"), %%mm2 \n\t"
2032 "punpcklbw %%mm7, %%mm0 \n\t"
2033 "punpcklbw %%mm7, %%mm2 \n\t"
2034 "pmaddwd %%mm1, %%mm0 \n\t"
2035 "pmaddwd %%mm2, %%mm3 \n\t"
2037 "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
2038 "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
2039 "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
2040 "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
2041 "punpcklbw %%mm7, %%mm4 \n\t"
2042 "punpcklbw %%mm7, %%mm2 \n\t"
2043 "pmaddwd %%mm1, %%mm4 \n\t"
2044 "pmaddwd %%mm2, %%mm5 \n\t"
2045 "paddd %%mm4, %%mm0 \n\t"
2046 "paddd %%mm5, %%mm3 \n\t"
2047 "movq %%mm0, %%mm4 \n\t"
2048 "punpckldq %%mm3, %%mm0 \n\t"
2049 "punpckhdq %%mm3, %%mm4 \n\t"
2050 "paddd %%mm4, %%mm0 \n\t"
2051 "psrad $7, %%mm0 \n\t"
2052 "packssdw %%mm0, %%mm0 \n\t"
2053 "movd %%mm0, (%4, %%"REG_BP") \n\t"
2054 "add $4, %%"REG_BP" \n\t"
2057 "pop %%"REG_BP" \n\t"
2059 "pop %%"REG_b" \n\t"
2062 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2068 const uint8_t *offset = src+filterSize;
2069 x86_reg counter= -2*dstW;
2070 //filter-= counter*filterSize/2;
2071 filterPos-= counter/2;
2074 "pxor %%mm7, %%mm7 \n\t"
2077 "mov %2, %%"REG_c" \n\t"
2078 "movzwl (%%"REG_c", %0), %%eax \n\t"
2079 "movzwl 2(%%"REG_c", %0), %%edx \n\t"
2080 "mov %5, %%"REG_c" \n\t"
2081 "pxor %%mm4, %%mm4 \n\t"
2082 "pxor %%mm5, %%mm5 \n\t"
2084 "movq (%1), %%mm1 \n\t"
2085 "movq (%1, %6), %%mm3 \n\t"
2086 "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
2087 "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
2088 "punpcklbw %%mm7, %%mm0 \n\t"
2089 "punpcklbw %%mm7, %%mm2 \n\t"
2090 "pmaddwd %%mm1, %%mm0 \n\t"
2091 "pmaddwd %%mm2, %%mm3 \n\t"
2092 "paddd %%mm3, %%mm5 \n\t"
2093 "paddd %%mm0, %%mm4 \n\t"
2095 "add $4, %%"REG_c" \n\t"
2096 "cmp %4, %%"REG_c" \n\t"
2099 "movq %%mm4, %%mm0 \n\t"
2100 "punpckldq %%mm5, %%mm4 \n\t"
2101 "punpckhdq %%mm5, %%mm0 \n\t"
2102 "paddd %%mm0, %%mm4 \n\t"
2103 "psrad $7, %%mm4 \n\t"
2104 "packssdw %%mm4, %%mm4 \n\t"
2105 "mov %3, %%"REG_a" \n\t"
2106 "movd %%mm4, (%%"REG_a", %0) \n\t"
2110 : "+r" (counter), "+r" (filter)
2111 : "m" (filterPos), "m" (dst), "m"(offset),
2112 "m" (src), "r" ((x86_reg)filterSize*2)
2113 : "%"REG_a, "%"REG_c, "%"REG_d
2117 #endif /* !COMPILE_TEMPLATE_MMX2 */
2119 static inline void RENAME(hScale16)(int16_t *dst, int dstW, const uint16_t *src, int srcW, int xInc,
2120 const int16_t *filter, const int16_t *filterPos, long filterSize, int shift)
2124 assert(filterSize % 4 == 0 && filterSize>0);
2125 if (filterSize==4 && shift<15) { // Always true for upscaling, sometimes for down, too.
2126 x86_reg counter= -2*dstW;
2128 filterPos-= counter/2;
2131 "movd %5, %%mm7 \n\t"
2133 "push %%"REG_b" \n\t"
2135 "push %%"REG_BP" \n\t" // we use 7 regs here ...
2136 "mov %%"REG_a", %%"REG_BP" \n\t"
2139 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2140 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
2141 "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
2142 "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
2143 "movq (%3, %%"REG_a", 2), %%mm0 \n\t"
2144 "movq (%3, %%"REG_b", 2), %%mm2 \n\t"
2145 "pmaddwd %%mm1, %%mm0 \n\t"
2146 "pmaddwd %%mm2, %%mm3 \n\t"
2147 "movq %%mm0, %%mm4 \n\t"
2148 "punpckldq %%mm3, %%mm0 \n\t"
2149 "punpckhdq %%mm3, %%mm4 \n\t"
2150 "paddd %%mm4, %%mm0 \n\t"
2151 "psrad %%mm7, %%mm0 \n\t"
2152 "packssdw %%mm0, %%mm0 \n\t"
2153 "movd %%mm0, (%4, %%"REG_BP") \n\t"
2154 "add $4, %%"REG_BP" \n\t"
2157 "pop %%"REG_BP" \n\t"
2159 "pop %%"REG_b" \n\t"
2162 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst), "m"(shift)
2167 } else if (filterSize==8 && shift<15) {
2168 x86_reg counter= -2*dstW;
2170 filterPos-= counter/2;
2173 "movd %5, %%mm7 \n\t"
2175 "push %%"REG_b" \n\t"
2177 "push %%"REG_BP" \n\t" // we use 7 regs here ...
2178 "mov %%"REG_a", %%"REG_BP" \n\t"
2181 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2182 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
2183 "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
2184 "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
2185 "movq (%3, %%"REG_a", 2), %%mm0 \n\t"
2186 "movq (%3, %%"REG_b", 2), %%mm2 \n\t"
2187 "pmaddwd %%mm1, %%mm0 \n\t"
2188 "pmaddwd %%mm2, %%mm3 \n\t"
2190 "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
2191 "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
2192 "movq 8(%3, %%"REG_a", 2), %%mm4 \n\t"
2193 "movq 8(%3, %%"REG_b", 2), %%mm2 \n\t"
2194 "pmaddwd %%mm1, %%mm4 \n\t"
2195 "pmaddwd %%mm2, %%mm5 \n\t"
2196 "paddd %%mm4, %%mm0 \n\t"
2197 "paddd %%mm5, %%mm3 \n\t"
2198 "movq %%mm0, %%mm4 \n\t"
2199 "punpckldq %%mm3, %%mm0 \n\t"
2200 "punpckhdq %%mm3, %%mm4 \n\t"
2201 "paddd %%mm4, %%mm0 \n\t"
2202 "psrad %%mm7, %%mm0 \n\t"
2203 "packssdw %%mm0, %%mm0 \n\t"
2204 "movd %%mm0, (%4, %%"REG_BP") \n\t"
2205 "add $4, %%"REG_BP" \n\t"
2208 "pop %%"REG_BP" \n\t"
2210 "pop %%"REG_b" \n\t"
2213 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst), "m"(shift)
2218 } else if (shift<15){
2219 const uint16_t *offset = src+filterSize;
2220 x86_reg counter= -2*dstW;
2221 //filter-= counter*filterSize/2;
2222 filterPos-= counter/2;
2225 "movd %7, %%mm7 \n\t"
2228 "mov %2, %%"REG_c" \n\t"
2229 "movzwl (%%"REG_c", %0), %%eax \n\t"
2230 "movzwl 2(%%"REG_c", %0), %%edx \n\t"
2231 "mov %5, %%"REG_c" \n\t"
2232 "pxor %%mm4, %%mm4 \n\t"
2233 "pxor %%mm5, %%mm5 \n\t"
2235 "movq (%1), %%mm1 \n\t"
2236 "movq (%1, %6), %%mm3 \n\t"
2237 "movq (%%"REG_c", %%"REG_a", 2), %%mm0 \n\t"
2238 "movq (%%"REG_c", %%"REG_d", 2), %%mm2 \n\t"
2239 "pmaddwd %%mm1, %%mm0 \n\t"
2240 "pmaddwd %%mm2, %%mm3 \n\t"
2241 "paddd %%mm3, %%mm5 \n\t"
2242 "paddd %%mm0, %%mm4 \n\t"
2244 "add $8, %%"REG_c" \n\t"
2245 "cmp %4, %%"REG_c" \n\t"
2248 "movq %%mm4, %%mm0 \n\t"
2249 "punpckldq %%mm5, %%mm4 \n\t"
2250 "punpckhdq %%mm5, %%mm0 \n\t"
2251 "paddd %%mm0, %%mm4 \n\t"
2252 "psrad %%mm7, %%mm4 \n\t"
2253 "packssdw %%mm4, %%mm4 \n\t"
2254 "mov %3, %%"REG_a" \n\t"
2255 "movd %%mm4, (%%"REG_a", %0) \n\t"
2259 : "+r" (counter), "+r" (filter)
2260 : "m" (filterPos), "m" (dst), "m"(offset),
2261 "m" (src), "r" ((x86_reg)filterSize*2), "m"(shift)
2262 : "%"REG_a, "%"REG_c, "%"REG_d
2265 for (i=0; i<dstW; i++) {
2266 int srcPos= filterPos[i];
2268 for (j=0; j<filterSize; j++) {
2269 val += ((int)src[srcPos + j])*filter[filterSize*i + j];
2271 dst[i] = FFMIN(val>>shift, (1<<15)-1); // the cubic equation does overflow ...
2276 #if COMPILE_TEMPLATE_MMX2
2277 static void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
2278 int dstWidth, const uint8_t *src,
2281 int16_t *filterPos = c->hLumFilterPos;
2282 int16_t *filter = c->hLumFilter;
2283 void *mmx2FilterCode= c->lumMmx2FilterCode;
2286 DECLARE_ALIGNED(8, uint64_t, ebxsave);
2291 "mov %%"REG_b", %5 \n\t"
2293 "pxor %%mm7, %%mm7 \n\t"
2294 "mov %0, %%"REG_c" \n\t"
2295 "mov %1, %%"REG_D" \n\t"
2296 "mov %2, %%"REG_d" \n\t"
2297 "mov %3, %%"REG_b" \n\t"
2298 "xor %%"REG_a", %%"REG_a" \n\t" // i
2299 PREFETCH" (%%"REG_c") \n\t"
2300 PREFETCH" 32(%%"REG_c") \n\t"
2301 PREFETCH" 64(%%"REG_c") \n\t"
2304 #define CALL_MMX2_FILTER_CODE \
2305 "movl (%%"REG_b"), %%esi \n\t"\
2307 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2308 "add %%"REG_S", %%"REG_c" \n\t"\
2309 "add %%"REG_a", %%"REG_D" \n\t"\
2310 "xor %%"REG_a", %%"REG_a" \n\t"\
2313 #define CALL_MMX2_FILTER_CODE \
2314 "movl (%%"REG_b"), %%esi \n\t"\
2316 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2317 "add %%"REG_a", %%"REG_D" \n\t"\
2318 "xor %%"REG_a", %%"REG_a" \n\t"\
2320 #endif /* ARCH_X86_64 */
2322 CALL_MMX2_FILTER_CODE
2323 CALL_MMX2_FILTER_CODE
2324 CALL_MMX2_FILTER_CODE
2325 CALL_MMX2_FILTER_CODE
2326 CALL_MMX2_FILTER_CODE
2327 CALL_MMX2_FILTER_CODE
2328 CALL_MMX2_FILTER_CODE
2329 CALL_MMX2_FILTER_CODE
2332 "mov %5, %%"REG_b" \n\t"
2334 :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
2335 "m" (mmx2FilterCode)
2339 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2345 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
2346 dst[i] = src[srcW-1]*128;
2349 static void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *dst2,
2350 int dstWidth, const uint8_t *src1,
2351 const uint8_t *src2, int srcW, int xInc)
2353 int16_t *filterPos = c->hChrFilterPos;
2354 int16_t *filter = c->hChrFilter;
2355 void *mmx2FilterCode= c->chrMmx2FilterCode;
2358 DECLARE_ALIGNED(8, uint64_t, ebxsave);
2363 "mov %%"REG_b", %7 \n\t"
2365 "pxor %%mm7, %%mm7 \n\t"
2366 "mov %0, %%"REG_c" \n\t"
2367 "mov %1, %%"REG_D" \n\t"
2368 "mov %2, %%"REG_d" \n\t"
2369 "mov %3, %%"REG_b" \n\t"
2370 "xor %%"REG_a", %%"REG_a" \n\t" // i
2371 PREFETCH" (%%"REG_c") \n\t"
2372 PREFETCH" 32(%%"REG_c") \n\t"
2373 PREFETCH" 64(%%"REG_c") \n\t"
2375 CALL_MMX2_FILTER_CODE
2376 CALL_MMX2_FILTER_CODE
2377 CALL_MMX2_FILTER_CODE
2378 CALL_MMX2_FILTER_CODE
2379 "xor %%"REG_a", %%"REG_a" \n\t" // i
2380 "mov %5, %%"REG_c" \n\t" // src
2381 "mov %6, %%"REG_D" \n\t" // buf2
2382 PREFETCH" (%%"REG_c") \n\t"
2383 PREFETCH" 32(%%"REG_c") \n\t"
2384 PREFETCH" 64(%%"REG_c") \n\t"
2386 CALL_MMX2_FILTER_CODE
2387 CALL_MMX2_FILTER_CODE
2388 CALL_MMX2_FILTER_CODE
2389 CALL_MMX2_FILTER_CODE
2392 "mov %7, %%"REG_b" \n\t"
2394 :: "m" (src1), "m" (dst1), "m" (filter), "m" (filterPos),
2395 "m" (mmx2FilterCode), "m" (src2), "m"(dst2)
2399 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2405 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
2406 dst1[i] = src1[srcW-1]*128;
2407 dst2[i] = src2[srcW-1]*128;
2410 #endif /* COMPILE_TEMPLATE_MMX2 */
2412 static av_cold void RENAME(sws_init_swScale)(SwsContext *c)
2414 enum PixelFormat srcFormat = c->srcFormat,
2415 dstFormat = c->dstFormat;
2417 if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat) && dstFormat != PIX_FMT_NV12
2418 && dstFormat != PIX_FMT_NV21 && !(c->flags & SWS_BITEXACT)) {
2419 if (c->flags & SWS_ACCURATE_RND) {
2420 c->yuv2yuv1 = RENAME(yuv2yuv1_ar );
2421 c->yuv2yuvX = RENAME(yuv2yuvX_ar );
2422 if (!(c->flags & SWS_FULL_CHR_H_INT)) {
2423 switch (c->dstFormat) {
2424 case PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X_ar); break;
2425 case PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X_ar); break;
2426 case PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X_ar); break;
2427 case PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X_ar); break;
2428 case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X_ar); break;
2433 int should_dither= isNBPS(c->srcFormat) || is16BPS(c->srcFormat);
2434 c->yuv2yuv1 = should_dither ? RENAME(yuv2yuv1_ar ) : RENAME(yuv2yuv1 );
2435 c->yuv2yuvX = RENAME(yuv2yuvX );
2436 if (!(c->flags & SWS_FULL_CHR_H_INT)) {
2437 switch (c->dstFormat) {
2438 case PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X); break;
2439 case PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X); break;
2440 case PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X); break;
2441 case PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X); break;
2442 case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X); break;
2447 if (!(c->flags & SWS_FULL_CHR_H_INT)) {
2448 switch (c->dstFormat) {
2450 c->yuv2packed1 = RENAME(yuv2rgb32_1);
2451 c->yuv2packed2 = RENAME(yuv2rgb32_2);
2454 c->yuv2packed1 = RENAME(yuv2bgr24_1);
2455 c->yuv2packed2 = RENAME(yuv2bgr24_2);
2457 case PIX_FMT_RGB555:
2458 c->yuv2packed1 = RENAME(yuv2rgb555_1);
2459 c->yuv2packed2 = RENAME(yuv2rgb555_2);
2461 case PIX_FMT_RGB565:
2462 c->yuv2packed1 = RENAME(yuv2rgb565_1);
2463 c->yuv2packed2 = RENAME(yuv2rgb565_2);
2465 case PIX_FMT_YUYV422:
2466 c->yuv2packed1 = RENAME(yuv2yuyv422_1);
2467 c->yuv2packed2 = RENAME(yuv2yuyv422_2);
2475 if (c->srcBpc == 8 && c->dstBpc <= 10) {
2476 #if !COMPILE_TEMPLATE_MMX2
2477 c->hyScale = c->hcScale = RENAME(hScale );
2478 #endif /* !COMPILE_TEMPLATE_MMX2 */
2480 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2481 #if COMPILE_TEMPLATE_MMX2
2482 if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
2484 c->hyscale_fast = RENAME(hyscale_fast);
2485 c->hcscale_fast = RENAME(hcscale_fast);
2487 #endif /* COMPILE_TEMPLATE_MMX2 */
2488 c->hyscale_fast = NULL;
2489 c->hcscale_fast = NULL;
2490 #if COMPILE_TEMPLATE_MMX2
2492 #endif /* COMPILE_TEMPLATE_MMX2 */
2495 #if !COMPILE_TEMPLATE_MMX2
2497 case PIX_FMT_YUYV422 : c->chrToYV12 = RENAME(yuy2ToUV); break;
2498 case PIX_FMT_UYVY422 : c->chrToYV12 = RENAME(uyvyToUV); break;
2499 case PIX_FMT_NV12 : c->chrToYV12 = RENAME(nv12ToUV); break;
2500 case PIX_FMT_NV21 : c->chrToYV12 = RENAME(nv21ToUV); break;
2501 case PIX_FMT_YUV420P9LE:
2502 case PIX_FMT_YUV422P10LE:
2503 case PIX_FMT_YUV420P10LE: c->hScale16= RENAME(hScale16); break;
2506 #endif /* !COMPILE_TEMPLATE_MMX2 */
2507 if (!c->chrSrcHSubSample) {
2509 case PIX_FMT_BGR24 : c->chrToYV12 = RENAME(bgr24ToUV); break;
2510 case PIX_FMT_RGB24 : c->chrToYV12 = RENAME(rgb24ToUV); break;
2515 switch (srcFormat) {
2516 #if !COMPILE_TEMPLATE_MMX2
2517 case PIX_FMT_YUYV422 :
2518 case PIX_FMT_Y400A : c->lumToYV12 = RENAME(yuy2ToY); break;
2519 case PIX_FMT_UYVY422 : c->lumToYV12 = RENAME(uyvyToY); break;
2520 #endif /* !COMPILE_TEMPLATE_MMX2 */
2521 case PIX_FMT_BGR24 : c->lumToYV12 = RENAME(bgr24ToY); break;
2522 case PIX_FMT_RGB24 : c->lumToYV12 = RENAME(rgb24ToY); break;
2525 #if !COMPILE_TEMPLATE_MMX2
2527 switch (srcFormat) {
2528 case PIX_FMT_Y400A : c->alpToYV12 = RENAME(yuy2ToY); break;
2532 #endif /* !COMPILE_TEMPLATE_MMX2 */
2533 if(isAnyRGB(c->srcFormat) && av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1<15)
2534 c->hScale16= RENAME(hScale16);