2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "swscale_template.h"
27 #if COMPILE_TEMPLATE_MMX2
28 #define PREFETCH "prefetchnta"
30 #define PREFETCH " # nop"
33 #if COMPILE_TEMPLATE_MMX2
34 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
36 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
38 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
40 #define YSCALEYUV2YV12X(x, offset, dest, width) \
42 "xor %%"REG_a", %%"REG_a" \n\t"\
43 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
44 "movq %%mm3, %%mm4 \n\t"\
45 "lea " offset "(%0), %%"REG_d" \n\t"\
46 "mov (%%"REG_d"), %%"REG_S" \n\t"\
47 ".p2align 4 \n\t" /* FIXME Unroll? */\
49 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
50 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
51 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
52 "add $16, %%"REG_d" \n\t"\
53 "mov (%%"REG_d"), %%"REG_S" \n\t"\
54 "test %%"REG_S", %%"REG_S" \n\t"\
55 "pmulhw %%mm0, %%mm2 \n\t"\
56 "pmulhw %%mm0, %%mm5 \n\t"\
57 "paddw %%mm2, %%mm3 \n\t"\
58 "paddw %%mm5, %%mm4 \n\t"\
60 "psraw $3, %%mm3 \n\t"\
61 "psraw $3, %%mm4 \n\t"\
62 "packuswb %%mm4, %%mm3 \n\t"\
63 MOVNTQ(%%mm3, (%1, %%REGa))\
64 "add $8, %%"REG_a" \n\t"\
65 "cmp %2, %%"REG_a" \n\t"\
66 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
67 "movq %%mm3, %%mm4 \n\t"\
68 "lea " offset "(%0), %%"REG_d" \n\t"\
69 "mov (%%"REG_d"), %%"REG_S" \n\t"\
71 :: "r" (&c->redDither),\
72 "r" (dest), "g" ((x86_reg)width)\
73 : "%"REG_a, "%"REG_d, "%"REG_S\
76 #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
78 "lea " offset "(%0), %%"REG_d" \n\t"\
79 "xor %%"REG_a", %%"REG_a" \n\t"\
80 "pxor %%mm4, %%mm4 \n\t"\
81 "pxor %%mm5, %%mm5 \n\t"\
82 "pxor %%mm6, %%mm6 \n\t"\
83 "pxor %%mm7, %%mm7 \n\t"\
84 "mov (%%"REG_d"), %%"REG_S" \n\t"\
87 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
88 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
89 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
90 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
91 "movq %%mm0, %%mm3 \n\t"\
92 "punpcklwd %%mm1, %%mm0 \n\t"\
93 "punpckhwd %%mm1, %%mm3 \n\t"\
94 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
95 "pmaddwd %%mm1, %%mm0 \n\t"\
96 "pmaddwd %%mm1, %%mm3 \n\t"\
97 "paddd %%mm0, %%mm4 \n\t"\
98 "paddd %%mm3, %%mm5 \n\t"\
99 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
100 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
101 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
102 "test %%"REG_S", %%"REG_S" \n\t"\
103 "movq %%mm2, %%mm0 \n\t"\
104 "punpcklwd %%mm3, %%mm2 \n\t"\
105 "punpckhwd %%mm3, %%mm0 \n\t"\
106 "pmaddwd %%mm1, %%mm2 \n\t"\
107 "pmaddwd %%mm1, %%mm0 \n\t"\
108 "paddd %%mm2, %%mm6 \n\t"\
109 "paddd %%mm0, %%mm7 \n\t"\
111 "psrad $16, %%mm4 \n\t"\
112 "psrad $16, %%mm5 \n\t"\
113 "psrad $16, %%mm6 \n\t"\
114 "psrad $16, %%mm7 \n\t"\
115 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
116 "packssdw %%mm5, %%mm4 \n\t"\
117 "packssdw %%mm7, %%mm6 \n\t"\
118 "paddw %%mm0, %%mm4 \n\t"\
119 "paddw %%mm0, %%mm6 \n\t"\
120 "psraw $3, %%mm4 \n\t"\
121 "psraw $3, %%mm6 \n\t"\
122 "packuswb %%mm6, %%mm4 \n\t"\
123 MOVNTQ(%%mm4, (%1, %%REGa))\
124 "add $8, %%"REG_a" \n\t"\
125 "cmp %2, %%"REG_a" \n\t"\
126 "lea " offset "(%0), %%"REG_d" \n\t"\
127 "pxor %%mm4, %%mm4 \n\t"\
128 "pxor %%mm5, %%mm5 \n\t"\
129 "pxor %%mm6, %%mm6 \n\t"\
130 "pxor %%mm7, %%mm7 \n\t"\
131 "mov (%%"REG_d"), %%"REG_S" \n\t"\
133 :: "r" (&c->redDither),\
134 "r" (dest), "g" ((x86_reg)width)\
135 : "%"REG_a, "%"REG_d, "%"REG_S\
138 #define YSCALEYUV2YV121 \
139 "mov %2, %%"REG_a" \n\t"\
140 ".p2align 4 \n\t" /* FIXME Unroll? */\
142 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
143 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
144 "psraw $7, %%mm0 \n\t"\
145 "psraw $7, %%mm1 \n\t"\
146 "packuswb %%mm1, %%mm0 \n\t"\
147 MOVNTQ(%%mm0, (%1, %%REGa))\
148 "add $8, %%"REG_a" \n\t"\
151 #define YSCALEYUV2YV121_ACCURATE \
152 "mov %2, %%"REG_a" \n\t"\
153 "pcmpeqw %%mm7, %%mm7 \n\t"\
154 "psrlw $15, %%mm7 \n\t"\
155 "psllw $6, %%mm7 \n\t"\
156 ".p2align 4 \n\t" /* FIXME Unroll? */\
158 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
159 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
160 "paddsw %%mm7, %%mm0 \n\t"\
161 "paddsw %%mm7, %%mm1 \n\t"\
162 "psraw $7, %%mm0 \n\t"\
163 "psraw $7, %%mm1 \n\t"\
164 "packuswb %%mm1, %%mm0 \n\t"\
165 MOVNTQ(%%mm0, (%1, %%REGa))\
166 "add $8, %%"REG_a" \n\t"\
170 :: "m" (-lumFilterSize), "m" (-chrFilterSize),
171 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
172 "r" (dest), "m" (dstW_reg),
173 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
174 : "%eax", "%ebx", "%ecx", "%edx", "%esi"
176 #define YSCALEYUV2PACKEDX_UV \
178 "xor %%"REG_a", %%"REG_a" \n\t"\
182 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
183 "mov (%%"REG_d"), %%"REG_S" \n\t"\
184 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
185 "movq %%mm3, %%mm4 \n\t"\
188 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
189 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
190 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
191 "add $16, %%"REG_d" \n\t"\
192 "mov (%%"REG_d"), %%"REG_S" \n\t"\
193 "pmulhw %%mm0, %%mm2 \n\t"\
194 "pmulhw %%mm0, %%mm5 \n\t"\
195 "paddw %%mm2, %%mm3 \n\t"\
196 "paddw %%mm5, %%mm4 \n\t"\
197 "test %%"REG_S", %%"REG_S" \n\t"\
200 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
201 "lea "offset"(%0), %%"REG_d" \n\t"\
202 "mov (%%"REG_d"), %%"REG_S" \n\t"\
203 "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
204 "movq "#dst1", "#dst2" \n\t"\
207 "movq 8(%%"REG_d"), "#coeff" \n\t" /* filterCoeff */\
208 "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" /* Y1srcData */\
209 "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" /* Y2srcData */\
210 "add $16, %%"REG_d" \n\t"\
211 "mov (%%"REG_d"), %%"REG_S" \n\t"\
212 "pmulhw "#coeff", "#src1" \n\t"\
213 "pmulhw "#coeff", "#src2" \n\t"\
214 "paddw "#src1", "#dst1" \n\t"\
215 "paddw "#src2", "#dst2" \n\t"\
216 "test %%"REG_S", %%"REG_S" \n\t"\
219 #define YSCALEYUV2PACKEDX \
220 YSCALEYUV2PACKEDX_UV \
221 YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
223 #define YSCALEYUV2PACKEDX_END \
224 :: "r" (&c->redDither), \
225 "m" (dummy), "m" (dummy), "m" (dummy),\
226 "r" (dest), "m" (dstW_reg) \
227 : "%"REG_a, "%"REG_d, "%"REG_S \
230 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
232 "xor %%"REG_a", %%"REG_a" \n\t"\
236 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
237 "mov (%%"REG_d"), %%"REG_S" \n\t"\
238 "pxor %%mm4, %%mm4 \n\t"\
239 "pxor %%mm5, %%mm5 \n\t"\
240 "pxor %%mm6, %%mm6 \n\t"\
241 "pxor %%mm7, %%mm7 \n\t"\
244 "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
245 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
246 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
247 "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
248 "movq %%mm0, %%mm3 \n\t"\
249 "punpcklwd %%mm1, %%mm0 \n\t"\
250 "punpckhwd %%mm1, %%mm3 \n\t"\
251 "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
252 "pmaddwd %%mm1, %%mm0 \n\t"\
253 "pmaddwd %%mm1, %%mm3 \n\t"\
254 "paddd %%mm0, %%mm4 \n\t"\
255 "paddd %%mm3, %%mm5 \n\t"\
256 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
257 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
258 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
259 "test %%"REG_S", %%"REG_S" \n\t"\
260 "movq %%mm2, %%mm0 \n\t"\
261 "punpcklwd %%mm3, %%mm2 \n\t"\
262 "punpckhwd %%mm3, %%mm0 \n\t"\
263 "pmaddwd %%mm1, %%mm2 \n\t"\
264 "pmaddwd %%mm1, %%mm0 \n\t"\
265 "paddd %%mm2, %%mm6 \n\t"\
266 "paddd %%mm0, %%mm7 \n\t"\
268 "psrad $16, %%mm4 \n\t"\
269 "psrad $16, %%mm5 \n\t"\
270 "psrad $16, %%mm6 \n\t"\
271 "psrad $16, %%mm7 \n\t"\
272 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
273 "packssdw %%mm5, %%mm4 \n\t"\
274 "packssdw %%mm7, %%mm6 \n\t"\
275 "paddw %%mm0, %%mm4 \n\t"\
276 "paddw %%mm0, %%mm6 \n\t"\
277 "movq %%mm4, "U_TEMP"(%0) \n\t"\
278 "movq %%mm6, "V_TEMP"(%0) \n\t"\
280 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
281 "lea "offset"(%0), %%"REG_d" \n\t"\
282 "mov (%%"REG_d"), %%"REG_S" \n\t"\
283 "pxor %%mm1, %%mm1 \n\t"\
284 "pxor %%mm5, %%mm5 \n\t"\
285 "pxor %%mm7, %%mm7 \n\t"\
286 "pxor %%mm6, %%mm6 \n\t"\
289 "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
290 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
291 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
292 "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
293 "movq %%mm0, %%mm3 \n\t"\
294 "punpcklwd %%mm4, %%mm0 \n\t"\
295 "punpckhwd %%mm4, %%mm3 \n\t"\
296 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
297 "pmaddwd %%mm4, %%mm0 \n\t"\
298 "pmaddwd %%mm4, %%mm3 \n\t"\
299 "paddd %%mm0, %%mm1 \n\t"\
300 "paddd %%mm3, %%mm5 \n\t"\
301 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
302 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
303 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
304 "test %%"REG_S", %%"REG_S" \n\t"\
305 "movq %%mm2, %%mm0 \n\t"\
306 "punpcklwd %%mm3, %%mm2 \n\t"\
307 "punpckhwd %%mm3, %%mm0 \n\t"\
308 "pmaddwd %%mm4, %%mm2 \n\t"\
309 "pmaddwd %%mm4, %%mm0 \n\t"\
310 "paddd %%mm2, %%mm7 \n\t"\
311 "paddd %%mm0, %%mm6 \n\t"\
313 "psrad $16, %%mm1 \n\t"\
314 "psrad $16, %%mm5 \n\t"\
315 "psrad $16, %%mm7 \n\t"\
316 "psrad $16, %%mm6 \n\t"\
317 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
318 "packssdw %%mm5, %%mm1 \n\t"\
319 "packssdw %%mm6, %%mm7 \n\t"\
320 "paddw %%mm0, %%mm1 \n\t"\
321 "paddw %%mm0, %%mm7 \n\t"\
322 "movq "U_TEMP"(%0), %%mm3 \n\t"\
323 "movq "V_TEMP"(%0), %%mm4 \n\t"\
325 #define YSCALEYUV2PACKEDX_ACCURATE \
326 YSCALEYUV2PACKEDX_ACCURATE_UV \
327 YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
329 #define YSCALEYUV2RGBX \
330 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
331 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
332 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
333 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
334 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
335 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
336 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
337 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
338 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
339 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
340 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
341 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
342 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
343 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
344 "paddw %%mm3, %%mm4 \n\t"\
345 "movq %%mm2, %%mm0 \n\t"\
346 "movq %%mm5, %%mm6 \n\t"\
347 "movq %%mm4, %%mm3 \n\t"\
348 "punpcklwd %%mm2, %%mm2 \n\t"\
349 "punpcklwd %%mm5, %%mm5 \n\t"\
350 "punpcklwd %%mm4, %%mm4 \n\t"\
351 "paddw %%mm1, %%mm2 \n\t"\
352 "paddw %%mm1, %%mm5 \n\t"\
353 "paddw %%mm1, %%mm4 \n\t"\
354 "punpckhwd %%mm0, %%mm0 \n\t"\
355 "punpckhwd %%mm6, %%mm6 \n\t"\
356 "punpckhwd %%mm3, %%mm3 \n\t"\
357 "paddw %%mm7, %%mm0 \n\t"\
358 "paddw %%mm7, %%mm6 \n\t"\
359 "paddw %%mm7, %%mm3 \n\t"\
360 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
361 "packuswb %%mm0, %%mm2 \n\t"\
362 "packuswb %%mm6, %%mm5 \n\t"\
363 "packuswb %%mm3, %%mm4 \n\t"\
365 #define REAL_YSCALEYUV2PACKED(index, c) \
366 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
367 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
368 "psraw $3, %%mm0 \n\t"\
369 "psraw $3, %%mm1 \n\t"\
370 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
371 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
372 "xor "#index", "#index" \n\t"\
375 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
376 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
377 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
378 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
379 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
380 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
381 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
382 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
383 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
384 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
385 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
386 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
387 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
388 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
389 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
390 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
391 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
392 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
393 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
394 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
395 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
396 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
397 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
398 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
399 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
401 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
403 #define REAL_YSCALEYUV2RGB_UV(index, c) \
404 "xor "#index", "#index" \n\t"\
407 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
408 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
409 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
410 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
411 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
412 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
413 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
414 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
415 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
416 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
417 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
418 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
419 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
420 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
421 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
422 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
423 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
424 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
425 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
426 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
428 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
429 "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
430 "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
431 "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
432 "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
433 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
434 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
435 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
436 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
437 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
438 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
439 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
440 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
442 #define REAL_YSCALEYUV2RGB_COEFF(c) \
443 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
444 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
445 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
446 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
447 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
448 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
449 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
450 "paddw %%mm3, %%mm4 \n\t"\
451 "movq %%mm2, %%mm0 \n\t"\
452 "movq %%mm5, %%mm6 \n\t"\
453 "movq %%mm4, %%mm3 \n\t"\
454 "punpcklwd %%mm2, %%mm2 \n\t"\
455 "punpcklwd %%mm5, %%mm5 \n\t"\
456 "punpcklwd %%mm4, %%mm4 \n\t"\
457 "paddw %%mm1, %%mm2 \n\t"\
458 "paddw %%mm1, %%mm5 \n\t"\
459 "paddw %%mm1, %%mm4 \n\t"\
460 "punpckhwd %%mm0, %%mm0 \n\t"\
461 "punpckhwd %%mm6, %%mm6 \n\t"\
462 "punpckhwd %%mm3, %%mm3 \n\t"\
463 "paddw %%mm7, %%mm0 \n\t"\
464 "paddw %%mm7, %%mm6 \n\t"\
465 "paddw %%mm7, %%mm3 \n\t"\
466 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
467 "packuswb %%mm0, %%mm2 \n\t"\
468 "packuswb %%mm6, %%mm5 \n\t"\
469 "packuswb %%mm3, %%mm4 \n\t"\
471 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
473 #define YSCALEYUV2RGB(index, c) \
474 REAL_YSCALEYUV2RGB_UV(index, c) \
475 REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
476 REAL_YSCALEYUV2RGB_COEFF(c)
478 #define REAL_YSCALEYUV2PACKED1(index, c) \
479 "xor "#index", "#index" \n\t"\
482 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
483 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
484 "psraw $7, %%mm3 \n\t" \
485 "psraw $7, %%mm4 \n\t" \
486 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
487 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
488 "psraw $7, %%mm1 \n\t" \
489 "psraw $7, %%mm7 \n\t" \
491 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
493 #define REAL_YSCALEYUV2RGB1(index, c) \
494 "xor "#index", "#index" \n\t"\
497 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
498 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
499 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
500 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
501 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
502 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
503 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
504 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
505 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
506 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
507 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
508 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
509 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
510 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
511 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
512 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
513 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
514 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
515 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
516 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
517 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
518 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
519 "paddw %%mm3, %%mm4 \n\t"\
520 "movq %%mm2, %%mm0 \n\t"\
521 "movq %%mm5, %%mm6 \n\t"\
522 "movq %%mm4, %%mm3 \n\t"\
523 "punpcklwd %%mm2, %%mm2 \n\t"\
524 "punpcklwd %%mm5, %%mm5 \n\t"\
525 "punpcklwd %%mm4, %%mm4 \n\t"\
526 "paddw %%mm1, %%mm2 \n\t"\
527 "paddw %%mm1, %%mm5 \n\t"\
528 "paddw %%mm1, %%mm4 \n\t"\
529 "punpckhwd %%mm0, %%mm0 \n\t"\
530 "punpckhwd %%mm6, %%mm6 \n\t"\
531 "punpckhwd %%mm3, %%mm3 \n\t"\
532 "paddw %%mm7, %%mm0 \n\t"\
533 "paddw %%mm7, %%mm6 \n\t"\
534 "paddw %%mm7, %%mm3 \n\t"\
535 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
536 "packuswb %%mm0, %%mm2 \n\t"\
537 "packuswb %%mm6, %%mm5 \n\t"\
538 "packuswb %%mm3, %%mm4 \n\t"\
540 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
542 #define REAL_YSCALEYUV2PACKED1b(index, c) \
543 "xor "#index", "#index" \n\t"\
546 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
547 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
548 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
549 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
550 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
551 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
552 "psrlw $8, %%mm3 \n\t" \
553 "psrlw $8, %%mm4 \n\t" \
554 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
555 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
556 "psraw $7, %%mm1 \n\t" \
557 "psraw $7, %%mm7 \n\t"
558 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
560 // do vertical chrominance interpolation
561 #define REAL_YSCALEYUV2RGB1b(index, c) \
562 "xor "#index", "#index" \n\t"\
565 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
566 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
567 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
568 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
569 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
570 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
571 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
572 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
573 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
574 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
575 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
576 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
577 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
578 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
579 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
580 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
581 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
582 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
583 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
584 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
585 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
586 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
587 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
588 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
589 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
590 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
591 "paddw %%mm3, %%mm4 \n\t"\
592 "movq %%mm2, %%mm0 \n\t"\
593 "movq %%mm5, %%mm6 \n\t"\
594 "movq %%mm4, %%mm3 \n\t"\
595 "punpcklwd %%mm2, %%mm2 \n\t"\
596 "punpcklwd %%mm5, %%mm5 \n\t"\
597 "punpcklwd %%mm4, %%mm4 \n\t"\
598 "paddw %%mm1, %%mm2 \n\t"\
599 "paddw %%mm1, %%mm5 \n\t"\
600 "paddw %%mm1, %%mm4 \n\t"\
601 "punpckhwd %%mm0, %%mm0 \n\t"\
602 "punpckhwd %%mm6, %%mm6 \n\t"\
603 "punpckhwd %%mm3, %%mm3 \n\t"\
604 "paddw %%mm7, %%mm0 \n\t"\
605 "paddw %%mm7, %%mm6 \n\t"\
606 "paddw %%mm7, %%mm3 \n\t"\
607 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
608 "packuswb %%mm0, %%mm2 \n\t"\
609 "packuswb %%mm6, %%mm5 \n\t"\
610 "packuswb %%mm3, %%mm4 \n\t"\
612 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
614 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
615 "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
616 "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
617 "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
618 "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
619 "packuswb %%mm1, %%mm7 \n\t"
620 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
622 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
623 "movq "#b", "#q2" \n\t" /* B */\
624 "movq "#r", "#t" \n\t" /* R */\
625 "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
626 "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
627 "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
628 "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
629 "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
630 "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
631 "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
632 "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
633 "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
634 "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
636 MOVNTQ( q0, (dst, index, 4))\
637 MOVNTQ( b, 8(dst, index, 4))\
638 MOVNTQ( q2, 16(dst, index, 4))\
639 MOVNTQ( q3, 24(dst, index, 4))\
641 "add $8, "#index" \n\t"\
642 "cmp "#dstw", "#index" \n\t"\
644 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
646 #define REAL_WRITERGB16(dst, dstw, index) \
647 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
648 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
649 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
650 "psrlq $3, %%mm2 \n\t"\
652 "movq %%mm2, %%mm1 \n\t"\
653 "movq %%mm4, %%mm3 \n\t"\
655 "punpcklbw %%mm7, %%mm3 \n\t"\
656 "punpcklbw %%mm5, %%mm2 \n\t"\
657 "punpckhbw %%mm7, %%mm4 \n\t"\
658 "punpckhbw %%mm5, %%mm1 \n\t"\
660 "psllq $3, %%mm3 \n\t"\
661 "psllq $3, %%mm4 \n\t"\
663 "por %%mm3, %%mm2 \n\t"\
664 "por %%mm4, %%mm1 \n\t"\
666 MOVNTQ(%%mm2, (dst, index, 2))\
667 MOVNTQ(%%mm1, 8(dst, index, 2))\
669 "add $8, "#index" \n\t"\
670 "cmp "#dstw", "#index" \n\t"\
672 #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
674 #define REAL_WRITERGB15(dst, dstw, index) \
675 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
676 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
677 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
678 "psrlq $3, %%mm2 \n\t"\
679 "psrlq $1, %%mm5 \n\t"\
681 "movq %%mm2, %%mm1 \n\t"\
682 "movq %%mm4, %%mm3 \n\t"\
684 "punpcklbw %%mm7, %%mm3 \n\t"\
685 "punpcklbw %%mm5, %%mm2 \n\t"\
686 "punpckhbw %%mm7, %%mm4 \n\t"\
687 "punpckhbw %%mm5, %%mm1 \n\t"\
689 "psllq $2, %%mm3 \n\t"\
690 "psllq $2, %%mm4 \n\t"\
692 "por %%mm3, %%mm2 \n\t"\
693 "por %%mm4, %%mm1 \n\t"\
695 MOVNTQ(%%mm2, (dst, index, 2))\
696 MOVNTQ(%%mm1, 8(dst, index, 2))\
698 "add $8, "#index" \n\t"\
699 "cmp "#dstw", "#index" \n\t"\
701 #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
703 #define WRITEBGR24MMX(dst, dstw, index) \
704 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
705 "movq %%mm2, %%mm1 \n\t" /* B */\
706 "movq %%mm5, %%mm6 \n\t" /* R */\
707 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
708 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
709 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
710 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
711 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
712 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
713 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
714 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
715 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
716 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
718 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
719 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
720 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
721 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
723 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
724 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
725 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
726 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
728 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
729 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
730 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
731 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
733 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
734 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
735 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
736 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
737 MOVNTQ(%%mm0, (dst))\
739 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
740 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
741 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
742 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
743 MOVNTQ(%%mm6, 8(dst))\
745 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
746 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
747 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
748 MOVNTQ(%%mm5, 16(dst))\
750 "add $24, "#dst" \n\t"\
752 "add $8, "#index" \n\t"\
753 "cmp "#dstw", "#index" \n\t"\
756 #define WRITEBGR24MMX2(dst, dstw, index) \
757 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
758 "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
759 "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
760 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
761 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
762 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
764 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
765 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
766 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
768 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
769 "por %%mm1, %%mm6 \n\t"\
770 "por %%mm3, %%mm6 \n\t"\
771 MOVNTQ(%%mm6, (dst))\
773 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
774 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
775 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
776 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
778 "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
779 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
780 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
782 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
783 "por %%mm3, %%mm6 \n\t"\
784 MOVNTQ(%%mm6, 8(dst))\
786 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
787 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
788 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
790 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
791 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
792 "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
794 "por %%mm1, %%mm3 \n\t"\
795 "por %%mm3, %%mm6 \n\t"\
796 MOVNTQ(%%mm6, 16(dst))\
798 "add $24, "#dst" \n\t"\
800 "add $8, "#index" \n\t"\
801 "cmp "#dstw", "#index" \n\t"\
804 #if COMPILE_TEMPLATE_MMX2
806 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
809 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
812 #define REAL_WRITEYUY2(dst, dstw, index) \
813 "packuswb %%mm3, %%mm3 \n\t"\
814 "packuswb %%mm4, %%mm4 \n\t"\
815 "packuswb %%mm7, %%mm1 \n\t"\
816 "punpcklbw %%mm4, %%mm3 \n\t"\
817 "movq %%mm1, %%mm7 \n\t"\
818 "punpcklbw %%mm3, %%mm1 \n\t"\
819 "punpckhbw %%mm3, %%mm7 \n\t"\
821 MOVNTQ(%%mm1, (dst, index, 2))\
822 MOVNTQ(%%mm7, 8(dst, index, 2))\
824 "add $8, "#index" \n\t"\
825 "cmp "#dstw", "#index" \n\t"\
827 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
830 static inline void RENAME(yuv2yuvX_ar)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
831 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize, const int16_t **alpSrc,
832 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
835 YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
836 YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
838 if (CONFIG_SWSCALE_ALPHA && aDest) {
839 YSCALEYUV2YV12X_ACCURATE( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
842 YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
845 static inline void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
846 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize, const int16_t **alpSrc,
847 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
850 YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
851 YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
853 if (CONFIG_SWSCALE_ALPHA && aDest) {
854 YSCALEYUV2YV12X( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
857 YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
860 static inline void RENAME(yuv2yuv1_ar)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
861 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
864 const uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
865 uint8_t *dst[4]= {aDest, dest, uDest, vDest};
866 x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
871 YSCALEYUV2YV121_ACCURATE
872 :: "r" (src[p]), "r" (dst[p] + counter[p]),
880 static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
881 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
884 const uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
885 uint8_t *dst[4]= {aDest, dest, uDest, vDest};
886 x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
892 :: "r" (src[p]), "r" (dst[p] + counter[p]),
902 * vertical scale YV12 to RGB
904 static inline void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
905 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
906 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
909 x86_reg dstW_reg = dstW;
911 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
912 YSCALEYUV2PACKEDX_ACCURATE
914 "movq %%mm2, "U_TEMP"(%0) \n\t"
915 "movq %%mm4, "V_TEMP"(%0) \n\t"
916 "movq %%mm5, "Y_TEMP"(%0) \n\t"
917 YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
918 "movq "Y_TEMP"(%0), %%mm5 \n\t"
919 "psraw $3, %%mm1 \n\t"
920 "psraw $3, %%mm7 \n\t"
921 "packuswb %%mm7, %%mm1 \n\t"
922 WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
924 YSCALEYUV2PACKEDX_END
926 YSCALEYUV2PACKEDX_ACCURATE
928 "pcmpeqd %%mm7, %%mm7 \n\t"
929 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
931 YSCALEYUV2PACKEDX_END
935 static inline void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
936 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
937 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
940 x86_reg dstW_reg = dstW;
942 YSCALEYUV2PACKEDX_ACCURATE
944 "pxor %%mm7, %%mm7 \n\t"
945 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
946 "add %4, %%"REG_c" \n\t"
947 WRITEBGR24(%%REGc, %5, %%REGa)
950 :: "r" (&c->redDither),
951 "m" (dummy), "m" (dummy), "m" (dummy),
952 "r" (dest), "m" (dstW_reg)
953 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
958 static inline void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
959 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
960 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
963 x86_reg dstW_reg = dstW;
965 YSCALEYUV2PACKEDX_ACCURATE
967 "pxor %%mm7, %%mm7 \n\t"
968 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
970 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
971 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
972 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
975 WRITERGB15(%4, %5, %%REGa)
976 YSCALEYUV2PACKEDX_END
979 static inline void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
980 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
981 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
984 x86_reg dstW_reg = dstW;
986 YSCALEYUV2PACKEDX_ACCURATE
988 "pxor %%mm7, %%mm7 \n\t"
989 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
991 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
992 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
993 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
996 WRITERGB16(%4, %5, %%REGa)
997 YSCALEYUV2PACKEDX_END
1000 static inline void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
1001 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
1002 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
1005 x86_reg dstW_reg = dstW;
1007 YSCALEYUV2PACKEDX_ACCURATE
1008 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1010 "psraw $3, %%mm3 \n\t"
1011 "psraw $3, %%mm4 \n\t"
1012 "psraw $3, %%mm1 \n\t"
1013 "psraw $3, %%mm7 \n\t"
1014 WRITEYUY2(%4, %5, %%REGa)
1015 YSCALEYUV2PACKEDX_END
1018 static inline void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
1019 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
1020 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
1023 x86_reg dstW_reg = dstW;
1025 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1028 YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
1029 "psraw $3, %%mm1 \n\t"
1030 "psraw $3, %%mm7 \n\t"
1031 "packuswb %%mm7, %%mm1 \n\t"
1032 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1033 YSCALEYUV2PACKEDX_END
1037 "pcmpeqd %%mm7, %%mm7 \n\t"
1038 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1039 YSCALEYUV2PACKEDX_END
1043 static inline void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
1044 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
1045 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
1048 x86_reg dstW_reg = dstW;
1052 "pxor %%mm7, %%mm7 \n\t"
1053 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
1054 "add %4, %%"REG_c" \n\t"
1055 WRITEBGR24(%%REGc, %5, %%REGa)
1057 :: "r" (&c->redDither),
1058 "m" (dummy), "m" (dummy), "m" (dummy),
1059 "r" (dest), "m" (dstW_reg)
1060 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1064 static inline void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
1065 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
1066 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
1069 x86_reg dstW_reg = dstW;
1073 "pxor %%mm7, %%mm7 \n\t"
1074 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1076 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1077 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1078 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1081 WRITERGB15(%4, %5, %%REGa)
1082 YSCALEYUV2PACKEDX_END
1085 static inline void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
1086 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
1087 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
1090 x86_reg dstW_reg = dstW;
1094 "pxor %%mm7, %%mm7 \n\t"
1095 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1097 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1098 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1099 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1102 WRITERGB16(%4, %5, %%REGa)
1103 YSCALEYUV2PACKEDX_END
1106 static inline void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
1107 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
1108 const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
1111 x86_reg dstW_reg = dstW;
1114 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1116 "psraw $3, %%mm3 \n\t"
1117 "psraw $3, %%mm4 \n\t"
1118 "psraw $3, %%mm1 \n\t"
1119 "psraw $3, %%mm7 \n\t"
1120 WRITEYUY2(%4, %5, %%REGa)
1121 YSCALEYUV2PACKEDX_END
1125 * vertical bilinear scale YV12 to RGB
1127 static inline void RENAME(yuv2rgb32_2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1128 const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1130 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1133 YSCALEYUV2RGB(%%r8, %5)
1134 YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
1135 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1136 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1137 "packuswb %%mm7, %%mm1 \n\t"
1138 WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1140 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "r" (dest),
1142 ,"r" (abuf0), "r" (abuf1)
1146 *(const uint16_t **)(&c->u_temp)=abuf0;
1147 *(const uint16_t **)(&c->v_temp)=abuf1;
1149 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1150 "mov %4, %%"REG_b" \n\t"
1151 "push %%"REG_BP" \n\t"
1152 YSCALEYUV2RGB(%%REGBP, %5)
1155 "mov "U_TEMP"(%5), %0 \n\t"
1156 "mov "V_TEMP"(%5), %1 \n\t"
1157 YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
1158 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1159 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1160 "packuswb %%mm7, %%mm1 \n\t"
1163 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1164 "pop %%"REG_BP" \n\t"
1165 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1167 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1173 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1174 "mov %4, %%"REG_b" \n\t"
1175 "push %%"REG_BP" \n\t"
1176 YSCALEYUV2RGB(%%REGBP, %5)
1177 "pcmpeqd %%mm7, %%mm7 \n\t"
1178 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1179 "pop %%"REG_BP" \n\t"
1180 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1182 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1188 static inline void RENAME(yuv2bgr24_2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1189 const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1191 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1193 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1194 "mov %4, %%"REG_b" \n\t"
1195 "push %%"REG_BP" \n\t"
1196 YSCALEYUV2RGB(%%REGBP, %5)
1197 "pxor %%mm7, %%mm7 \n\t"
1198 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1199 "pop %%"REG_BP" \n\t"
1200 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1201 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1206 static inline void RENAME(yuv2rgb555_2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1207 const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1209 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1211 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1212 "mov %4, %%"REG_b" \n\t"
1213 "push %%"REG_BP" \n\t"
1214 YSCALEYUV2RGB(%%REGBP, %5)
1215 "pxor %%mm7, %%mm7 \n\t"
1216 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1218 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1219 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1220 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1223 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1224 "pop %%"REG_BP" \n\t"
1225 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1227 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1232 static inline void RENAME(yuv2rgb565_2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1233 const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1235 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1237 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1238 "mov %4, %%"REG_b" \n\t"
1239 "push %%"REG_BP" \n\t"
1240 YSCALEYUV2RGB(%%REGBP, %5)
1241 "pxor %%mm7, %%mm7 \n\t"
1242 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1244 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1245 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1246 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1249 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1250 "pop %%"REG_BP" \n\t"
1251 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1252 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1257 static inline void RENAME(yuv2yuyv422_2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1258 const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1260 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1262 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1263 "mov %4, %%"REG_b" \n\t"
1264 "push %%"REG_BP" \n\t"
1265 YSCALEYUV2PACKED(%%REGBP, %5)
1266 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1267 "pop %%"REG_BP" \n\t"
1268 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1269 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1275 * YV12 to RGB without scaling or interpolating
1277 static inline void RENAME(yuv2rgb32_1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1278 const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
1280 const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1282 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1283 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1285 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1286 "mov %4, %%"REG_b" \n\t"
1287 "push %%"REG_BP" \n\t"
1288 YSCALEYUV2RGB1(%%REGBP, %5)
1289 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1290 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1291 "pop %%"REG_BP" \n\t"
1292 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1294 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1299 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1300 "mov %4, %%"REG_b" \n\t"
1301 "push %%"REG_BP" \n\t"
1302 YSCALEYUV2RGB1(%%REGBP, %5)
1303 "pcmpeqd %%mm7, %%mm7 \n\t"
1304 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1305 "pop %%"REG_BP" \n\t"
1306 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1308 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1313 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
1315 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1316 "mov %4, %%"REG_b" \n\t"
1317 "push %%"REG_BP" \n\t"
1318 YSCALEYUV2RGB1b(%%REGBP, %5)
1319 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1320 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1321 "pop %%"REG_BP" \n\t"
1322 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1324 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1329 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1330 "mov %4, %%"REG_b" \n\t"
1331 "push %%"REG_BP" \n\t"
1332 YSCALEYUV2RGB1b(%%REGBP, %5)
1333 "pcmpeqd %%mm7, %%mm7 \n\t"
1334 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1335 "pop %%"REG_BP" \n\t"
1336 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1338 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1345 static inline void RENAME(yuv2bgr24_1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1346 const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
1348 const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1350 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1352 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1353 "mov %4, %%"REG_b" \n\t"
1354 "push %%"REG_BP" \n\t"
1355 YSCALEYUV2RGB1(%%REGBP, %5)
1356 "pxor %%mm7, %%mm7 \n\t"
1357 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1358 "pop %%"REG_BP" \n\t"
1359 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1361 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1366 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1367 "mov %4, %%"REG_b" \n\t"
1368 "push %%"REG_BP" \n\t"
1369 YSCALEYUV2RGB1b(%%REGBP, %5)
1370 "pxor %%mm7, %%mm7 \n\t"
1371 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1372 "pop %%"REG_BP" \n\t"
1373 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1375 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1381 static inline void RENAME(yuv2rgb555_1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1382 const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
1384 const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1386 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1388 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1389 "mov %4, %%"REG_b" \n\t"
1390 "push %%"REG_BP" \n\t"
1391 YSCALEYUV2RGB1(%%REGBP, %5)
1392 "pxor %%mm7, %%mm7 \n\t"
1393 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1395 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1396 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1397 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1399 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1400 "pop %%"REG_BP" \n\t"
1401 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1403 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1408 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1409 "mov %4, %%"REG_b" \n\t"
1410 "push %%"REG_BP" \n\t"
1411 YSCALEYUV2RGB1b(%%REGBP, %5)
1412 "pxor %%mm7, %%mm7 \n\t"
1413 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1415 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1416 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1417 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1419 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1420 "pop %%"REG_BP" \n\t"
1421 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1423 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1429 static inline void RENAME(yuv2rgb565_1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1430 const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
1432 const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1434 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1436 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1437 "mov %4, %%"REG_b" \n\t"
1438 "push %%"REG_BP" \n\t"
1439 YSCALEYUV2RGB1(%%REGBP, %5)
1440 "pxor %%mm7, %%mm7 \n\t"
1441 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1443 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1444 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1445 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1448 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1449 "pop %%"REG_BP" \n\t"
1450 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1452 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1457 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1458 "mov %4, %%"REG_b" \n\t"
1459 "push %%"REG_BP" \n\t"
1460 YSCALEYUV2RGB1b(%%REGBP, %5)
1461 "pxor %%mm7, %%mm7 \n\t"
1462 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1464 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1465 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1466 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1469 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1470 "pop %%"REG_BP" \n\t"
1471 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1473 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1479 static inline void RENAME(yuv2yuyv422_1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
1480 const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
1482 const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1484 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1486 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1487 "mov %4, %%"REG_b" \n\t"
1488 "push %%"REG_BP" \n\t"
1489 YSCALEYUV2PACKED1(%%REGBP, %5)
1490 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1491 "pop %%"REG_BP" \n\t"
1492 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1494 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1499 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1500 "mov %4, %%"REG_b" \n\t"
1501 "push %%"REG_BP" \n\t"
1502 YSCALEYUV2PACKED1b(%%REGBP, %5)
1503 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1504 "pop %%"REG_BP" \n\t"
1505 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1507 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1513 //FIXME yuy2* can read up to 7 samples too much
1515 static inline void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1518 "movq "MANGLE(bm01010101)", %%mm2 \n\t"
1519 "mov %0, %%"REG_a" \n\t"
1521 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1522 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1523 "pand %%mm2, %%mm0 \n\t"
1524 "pand %%mm2, %%mm1 \n\t"
1525 "packuswb %%mm1, %%mm0 \n\t"
1526 "movq %%mm0, (%2, %%"REG_a") \n\t"
1527 "add $8, %%"REG_a" \n\t"
1529 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1534 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1537 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1538 "mov %0, %%"REG_a" \n\t"
1540 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1541 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1542 "psrlw $8, %%mm0 \n\t"
1543 "psrlw $8, %%mm1 \n\t"
1544 "packuswb %%mm1, %%mm0 \n\t"
1545 "movq %%mm0, %%mm1 \n\t"
1546 "psrlw $8, %%mm0 \n\t"
1547 "pand %%mm4, %%mm1 \n\t"
1548 "packuswb %%mm0, %%mm0 \n\t"
1549 "packuswb %%mm1, %%mm1 \n\t"
1550 "movd %%mm0, (%3, %%"REG_a") \n\t"
1551 "movd %%mm1, (%2, %%"REG_a") \n\t"
1552 "add $4, %%"REG_a" \n\t"
1554 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1557 assert(src1 == src2);
1560 static inline void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1563 "mov %0, %%"REG_a" \n\t"
1565 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1566 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1567 "movq (%2, %%"REG_a",2), %%mm2 \n\t"
1568 "movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
1569 "psrlw $8, %%mm0 \n\t"
1570 "psrlw $8, %%mm1 \n\t"
1571 "psrlw $8, %%mm2 \n\t"
1572 "psrlw $8, %%mm3 \n\t"
1573 "packuswb %%mm1, %%mm0 \n\t"
1574 "packuswb %%mm3, %%mm2 \n\t"
1575 "movq %%mm0, (%3, %%"REG_a") \n\t"
1576 "movq %%mm2, (%4, %%"REG_a") \n\t"
1577 "add $8, %%"REG_a" \n\t"
1579 : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
1584 /* This is almost identical to the previous, end exists only because
1585 * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1586 static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1589 "mov %0, %%"REG_a" \n\t"
1591 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1592 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1593 "psrlw $8, %%mm0 \n\t"
1594 "psrlw $8, %%mm1 \n\t"
1595 "packuswb %%mm1, %%mm0 \n\t"
1596 "movq %%mm0, (%2, %%"REG_a") \n\t"
1597 "add $8, %%"REG_a" \n\t"
1599 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1604 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1607 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1608 "mov %0, %%"REG_a" \n\t"
1610 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1611 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1612 "pand %%mm4, %%mm0 \n\t"
1613 "pand %%mm4, %%mm1 \n\t"
1614 "packuswb %%mm1, %%mm0 \n\t"
1615 "movq %%mm0, %%mm1 \n\t"
1616 "psrlw $8, %%mm0 \n\t"
1617 "pand %%mm4, %%mm1 \n\t"
1618 "packuswb %%mm0, %%mm0 \n\t"
1619 "packuswb %%mm1, %%mm1 \n\t"
1620 "movd %%mm0, (%3, %%"REG_a") \n\t"
1621 "movd %%mm1, (%2, %%"REG_a") \n\t"
1622 "add $4, %%"REG_a" \n\t"
1624 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1627 assert(src1 == src2);
1630 static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1633 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1634 "mov %0, %%"REG_a" \n\t"
1636 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1637 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1638 "movq (%2, %%"REG_a",2), %%mm2 \n\t"
1639 "movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
1640 "pand %%mm4, %%mm0 \n\t"
1641 "pand %%mm4, %%mm1 \n\t"
1642 "pand %%mm4, %%mm2 \n\t"
1643 "pand %%mm4, %%mm3 \n\t"
1644 "packuswb %%mm1, %%mm0 \n\t"
1645 "packuswb %%mm3, %%mm2 \n\t"
1646 "movq %%mm0, (%3, %%"REG_a") \n\t"
1647 "movq %%mm2, (%4, %%"REG_a") \n\t"
1648 "add $8, %%"REG_a" \n\t"
1650 : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
1655 static inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2,
1656 const uint8_t *src, long width)
1659 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1660 "mov %0, %%"REG_a" \n\t"
1662 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1663 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1664 "movq %%mm0, %%mm2 \n\t"
1665 "movq %%mm1, %%mm3 \n\t"
1666 "pand %%mm4, %%mm0 \n\t"
1667 "pand %%mm4, %%mm1 \n\t"
1668 "psrlw $8, %%mm2 \n\t"
1669 "psrlw $8, %%mm3 \n\t"
1670 "packuswb %%mm1, %%mm0 \n\t"
1671 "packuswb %%mm3, %%mm2 \n\t"
1672 "movq %%mm0, (%2, %%"REG_a") \n\t"
1673 "movq %%mm2, (%3, %%"REG_a") \n\t"
1674 "add $8, %%"REG_a" \n\t"
1676 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst1+width), "r" (dst2+width)
1681 static inline void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
1682 const uint8_t *src1, const uint8_t *src2,
1683 long width, uint32_t *unused)
1685 RENAME(nvXXtoUV)(dstU, dstV, src1, width);
1688 static inline void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
1689 const uint8_t *src1, const uint8_t *src2,
1690 long width, uint32_t *unused)
1692 RENAME(nvXXtoUV)(dstV, dstU, src1, width);
1695 static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src, long width, enum PixelFormat srcFormat)
1698 if(srcFormat == PIX_FMT_BGR24) {
1700 "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
1701 "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
1706 "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
1707 "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
1713 "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
1714 "mov %2, %%"REG_a" \n\t"
1715 "pxor %%mm7, %%mm7 \n\t"
1717 PREFETCH" 64(%0) \n\t"
1718 "movd (%0), %%mm0 \n\t"
1719 "movd 2(%0), %%mm1 \n\t"
1720 "movd 6(%0), %%mm2 \n\t"
1721 "movd 8(%0), %%mm3 \n\t"
1723 "punpcklbw %%mm7, %%mm0 \n\t"
1724 "punpcklbw %%mm7, %%mm1 \n\t"
1725 "punpcklbw %%mm7, %%mm2 \n\t"
1726 "punpcklbw %%mm7, %%mm3 \n\t"
1727 "pmaddwd %%mm5, %%mm0 \n\t"
1728 "pmaddwd %%mm6, %%mm1 \n\t"
1729 "pmaddwd %%mm5, %%mm2 \n\t"
1730 "pmaddwd %%mm6, %%mm3 \n\t"
1731 "paddd %%mm1, %%mm0 \n\t"
1732 "paddd %%mm3, %%mm2 \n\t"
1733 "paddd %%mm4, %%mm0 \n\t"
1734 "paddd %%mm4, %%mm2 \n\t"
1735 "psrad $15, %%mm0 \n\t"
1736 "psrad $15, %%mm2 \n\t"
1737 "packssdw %%mm2, %%mm0 \n\t"
1738 "packuswb %%mm0, %%mm0 \n\t"
1739 "movd %%mm0, (%1, %%"REG_a") \n\t"
1740 "add $4, %%"REG_a" \n\t"
1743 : "r" (dst+width), "g" ((x86_reg)-width)
1748 static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, long width, enum PixelFormat srcFormat)
1751 "movq 24(%4), %%mm6 \n\t"
1752 "mov %3, %%"REG_a" \n\t"
1753 "pxor %%mm7, %%mm7 \n\t"
1755 PREFETCH" 64(%0) \n\t"
1756 "movd (%0), %%mm0 \n\t"
1757 "movd 2(%0), %%mm1 \n\t"
1758 "punpcklbw %%mm7, %%mm0 \n\t"
1759 "punpcklbw %%mm7, %%mm1 \n\t"
1760 "movq %%mm0, %%mm2 \n\t"
1761 "movq %%mm1, %%mm3 \n\t"
1762 "pmaddwd (%4), %%mm0 \n\t"
1763 "pmaddwd 8(%4), %%mm1 \n\t"
1764 "pmaddwd 16(%4), %%mm2 \n\t"
1765 "pmaddwd %%mm6, %%mm3 \n\t"
1766 "paddd %%mm1, %%mm0 \n\t"
1767 "paddd %%mm3, %%mm2 \n\t"
1769 "movd 6(%0), %%mm1 \n\t"
1770 "movd 8(%0), %%mm3 \n\t"
1772 "punpcklbw %%mm7, %%mm1 \n\t"
1773 "punpcklbw %%mm7, %%mm3 \n\t"
1774 "movq %%mm1, %%mm4 \n\t"
1775 "movq %%mm3, %%mm5 \n\t"
1776 "pmaddwd (%4), %%mm1 \n\t"
1777 "pmaddwd 8(%4), %%mm3 \n\t"
1778 "pmaddwd 16(%4), %%mm4 \n\t"
1779 "pmaddwd %%mm6, %%mm5 \n\t"
1780 "paddd %%mm3, %%mm1 \n\t"
1781 "paddd %%mm5, %%mm4 \n\t"
1783 "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
1784 "paddd %%mm3, %%mm0 \n\t"
1785 "paddd %%mm3, %%mm2 \n\t"
1786 "paddd %%mm3, %%mm1 \n\t"
1787 "paddd %%mm3, %%mm4 \n\t"
1788 "psrad $15, %%mm0 \n\t"
1789 "psrad $15, %%mm2 \n\t"
1790 "psrad $15, %%mm1 \n\t"
1791 "psrad $15, %%mm4 \n\t"
1792 "packssdw %%mm1, %%mm0 \n\t"
1793 "packssdw %%mm4, %%mm2 \n\t"
1794 "packuswb %%mm0, %%mm0 \n\t"
1795 "packuswb %%mm2, %%mm2 \n\t"
1796 "movd %%mm0, (%1, %%"REG_a") \n\t"
1797 "movd %%mm2, (%2, %%"REG_a") \n\t"
1798 "add $4, %%"REG_a" \n\t"
1801 : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "r"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24])
1806 static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1808 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1811 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1813 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1814 assert(src1 == src2);
1817 static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
1819 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1822 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
1825 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1829 // bilinear / bicubic scaling
1830 static inline void RENAME(hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW, int xInc,
1831 const int16_t *filter, const int16_t *filterPos, long filterSize)
1833 assert(filterSize % 4 == 0 && filterSize>0);
1834 if (filterSize==4) { // Always true for upscaling, sometimes for down, too.
1835 x86_reg counter= -2*dstW;
1837 filterPos-= counter/2;
1841 "push %%"REG_b" \n\t"
1843 "pxor %%mm7, %%mm7 \n\t"
1844 "push %%"REG_BP" \n\t" // we use 7 regs here ...
1845 "mov %%"REG_a", %%"REG_BP" \n\t"
1848 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
1849 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
1850 "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
1851 "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
1852 "movd (%3, %%"REG_a"), %%mm0 \n\t"
1853 "movd (%3, %%"REG_b"), %%mm2 \n\t"
1854 "punpcklbw %%mm7, %%mm0 \n\t"
1855 "punpcklbw %%mm7, %%mm2 \n\t"
1856 "pmaddwd %%mm1, %%mm0 \n\t"
1857 "pmaddwd %%mm2, %%mm3 \n\t"
1858 "movq %%mm0, %%mm4 \n\t"
1859 "punpckldq %%mm3, %%mm0 \n\t"
1860 "punpckhdq %%mm3, %%mm4 \n\t"
1861 "paddd %%mm4, %%mm0 \n\t"
1862 "psrad $7, %%mm0 \n\t"
1863 "packssdw %%mm0, %%mm0 \n\t"
1864 "movd %%mm0, (%4, %%"REG_BP") \n\t"
1865 "add $4, %%"REG_BP" \n\t"
1868 "pop %%"REG_BP" \n\t"
1870 "pop %%"REG_b" \n\t"
1873 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1878 } else if (filterSize==8) {
1879 x86_reg counter= -2*dstW;
1881 filterPos-= counter/2;
1885 "push %%"REG_b" \n\t"
1887 "pxor %%mm7, %%mm7 \n\t"
1888 "push %%"REG_BP" \n\t" // we use 7 regs here ...
1889 "mov %%"REG_a", %%"REG_BP" \n\t"
1892 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
1893 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
1894 "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
1895 "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
1896 "movd (%3, %%"REG_a"), %%mm0 \n\t"
1897 "movd (%3, %%"REG_b"), %%mm2 \n\t"
1898 "punpcklbw %%mm7, %%mm0 \n\t"
1899 "punpcklbw %%mm7, %%mm2 \n\t"
1900 "pmaddwd %%mm1, %%mm0 \n\t"
1901 "pmaddwd %%mm2, %%mm3 \n\t"
1903 "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
1904 "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
1905 "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
1906 "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
1907 "punpcklbw %%mm7, %%mm4 \n\t"
1908 "punpcklbw %%mm7, %%mm2 \n\t"
1909 "pmaddwd %%mm1, %%mm4 \n\t"
1910 "pmaddwd %%mm2, %%mm5 \n\t"
1911 "paddd %%mm4, %%mm0 \n\t"
1912 "paddd %%mm5, %%mm3 \n\t"
1913 "movq %%mm0, %%mm4 \n\t"
1914 "punpckldq %%mm3, %%mm0 \n\t"
1915 "punpckhdq %%mm3, %%mm4 \n\t"
1916 "paddd %%mm4, %%mm0 \n\t"
1917 "psrad $7, %%mm0 \n\t"
1918 "packssdw %%mm0, %%mm0 \n\t"
1919 "movd %%mm0, (%4, %%"REG_BP") \n\t"
1920 "add $4, %%"REG_BP" \n\t"
1923 "pop %%"REG_BP" \n\t"
1925 "pop %%"REG_b" \n\t"
1928 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1934 const uint8_t *offset = src+filterSize;
1935 x86_reg counter= -2*dstW;
1936 //filter-= counter*filterSize/2;
1937 filterPos-= counter/2;
1940 "pxor %%mm7, %%mm7 \n\t"
1943 "mov %2, %%"REG_c" \n\t"
1944 "movzwl (%%"REG_c", %0), %%eax \n\t"
1945 "movzwl 2(%%"REG_c", %0), %%edx \n\t"
1946 "mov %5, %%"REG_c" \n\t"
1947 "pxor %%mm4, %%mm4 \n\t"
1948 "pxor %%mm5, %%mm5 \n\t"
1950 "movq (%1), %%mm1 \n\t"
1951 "movq (%1, %6), %%mm3 \n\t"
1952 "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
1953 "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
1954 "punpcklbw %%mm7, %%mm0 \n\t"
1955 "punpcklbw %%mm7, %%mm2 \n\t"
1956 "pmaddwd %%mm1, %%mm0 \n\t"
1957 "pmaddwd %%mm2, %%mm3 \n\t"
1958 "paddd %%mm3, %%mm5 \n\t"
1959 "paddd %%mm0, %%mm4 \n\t"
1961 "add $4, %%"REG_c" \n\t"
1962 "cmp %4, %%"REG_c" \n\t"
1965 "movq %%mm4, %%mm0 \n\t"
1966 "punpckldq %%mm5, %%mm4 \n\t"
1967 "punpckhdq %%mm5, %%mm0 \n\t"
1968 "paddd %%mm0, %%mm4 \n\t"
1969 "psrad $7, %%mm4 \n\t"
1970 "packssdw %%mm4, %%mm4 \n\t"
1971 "mov %3, %%"REG_a" \n\t"
1972 "movd %%mm4, (%%"REG_a", %0) \n\t"
1976 : "+r" (counter), "+r" (filter)
1977 : "m" (filterPos), "m" (dst), "m"(offset),
1978 "m" (src), "r" ((x86_reg)filterSize*2)
1979 : "%"REG_a, "%"REG_c, "%"REG_d
1984 #if COMPILE_TEMPLATE_MMX2
1985 static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
1986 long dstWidth, const uint8_t *src, int srcW,
1989 int32_t *filterPos = c->hLumFilterPos;
1990 int16_t *filter = c->hLumFilter;
1991 int canMMX2BeUsed = c->canMMX2BeUsed;
1992 void *mmx2FilterCode= c->lumMmx2FilterCode;
1995 DECLARE_ALIGNED(8, uint64_t, ebxsave);
2000 "mov %%"REG_b", %5 \n\t"
2002 "pxor %%mm7, %%mm7 \n\t"
2003 "mov %0, %%"REG_c" \n\t"
2004 "mov %1, %%"REG_D" \n\t"
2005 "mov %2, %%"REG_d" \n\t"
2006 "mov %3, %%"REG_b" \n\t"
2007 "xor %%"REG_a", %%"REG_a" \n\t" // i
2008 PREFETCH" (%%"REG_c") \n\t"
2009 PREFETCH" 32(%%"REG_c") \n\t"
2010 PREFETCH" 64(%%"REG_c") \n\t"
2014 #define CALL_MMX2_FILTER_CODE \
2015 "movl (%%"REG_b"), %%esi \n\t"\
2017 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2018 "add %%"REG_S", %%"REG_c" \n\t"\
2019 "add %%"REG_a", %%"REG_D" \n\t"\
2020 "xor %%"REG_a", %%"REG_a" \n\t"\
2024 #define CALL_MMX2_FILTER_CODE \
2025 "movl (%%"REG_b"), %%esi \n\t"\
2027 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2028 "add %%"REG_a", %%"REG_D" \n\t"\
2029 "xor %%"REG_a", %%"REG_a" \n\t"\
2031 #endif /* ARCH_X86_64 */
2033 CALL_MMX2_FILTER_CODE
2034 CALL_MMX2_FILTER_CODE
2035 CALL_MMX2_FILTER_CODE
2036 CALL_MMX2_FILTER_CODE
2037 CALL_MMX2_FILTER_CODE
2038 CALL_MMX2_FILTER_CODE
2039 CALL_MMX2_FILTER_CODE
2040 CALL_MMX2_FILTER_CODE
2043 "mov %5, %%"REG_b" \n\t"
2045 :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
2046 "m" (mmx2FilterCode)
2050 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2055 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
2058 static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst,
2059 long dstWidth, const uint8_t *src1,
2060 const uint8_t *src2, int srcW, int xInc)
2062 int32_t *filterPos = c->hChrFilterPos;
2063 int16_t *filter = c->hChrFilter;
2064 int canMMX2BeUsed = c->canMMX2BeUsed;
2065 void *mmx2FilterCode= c->chrMmx2FilterCode;
2068 DECLARE_ALIGNED(8, uint64_t, ebxsave);
2073 "mov %%"REG_b", %6 \n\t"
2075 "pxor %%mm7, %%mm7 \n\t"
2076 "mov %0, %%"REG_c" \n\t"
2077 "mov %1, %%"REG_D" \n\t"
2078 "mov %2, %%"REG_d" \n\t"
2079 "mov %3, %%"REG_b" \n\t"
2080 "xor %%"REG_a", %%"REG_a" \n\t" // i
2081 PREFETCH" (%%"REG_c") \n\t"
2082 PREFETCH" 32(%%"REG_c") \n\t"
2083 PREFETCH" 64(%%"REG_c") \n\t"
2085 CALL_MMX2_FILTER_CODE
2086 CALL_MMX2_FILTER_CODE
2087 CALL_MMX2_FILTER_CODE
2088 CALL_MMX2_FILTER_CODE
2089 "xor %%"REG_a", %%"REG_a" \n\t" // i
2090 "mov %5, %%"REG_c" \n\t" // src
2091 "mov %1, %%"REG_D" \n\t" // buf1
2092 "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
2093 PREFETCH" (%%"REG_c") \n\t"
2094 PREFETCH" 32(%%"REG_c") \n\t"
2095 PREFETCH" 64(%%"REG_c") \n\t"
2097 CALL_MMX2_FILTER_CODE
2098 CALL_MMX2_FILTER_CODE
2099 CALL_MMX2_FILTER_CODE
2100 CALL_MMX2_FILTER_CODE
2103 "mov %6, %%"REG_b" \n\t"
2105 :: "m" (src1), "m" (dst), "m" (filter), "m" (filterPos),
2106 "m" (mmx2FilterCode), "m" (src2)
2110 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2115 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
2116 dst[i] = src1[srcW-1]*128;
2117 dst[i+VOFW] = src2[srcW-1]*128;
2120 #endif /* COMPILE_TEMPLATE_MMX2 */
2122 #if !COMPILE_TEMPLATE_MMX2
2123 static void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex,
2124 int lastInLumBuf, int lastInChrBuf)
2126 const int dstH= c->dstH;
2127 const int flags= c->flags;
2128 int16_t **lumPixBuf= c->lumPixBuf;
2129 int16_t **chrPixBuf= c->chrPixBuf;
2130 int16_t **alpPixBuf= c->alpPixBuf;
2131 const int vLumBufSize= c->vLumBufSize;
2132 const int vChrBufSize= c->vChrBufSize;
2133 int16_t *vLumFilterPos= c->vLumFilterPos;
2134 int16_t *vChrFilterPos= c->vChrFilterPos;
2135 int16_t *vLumFilter= c->vLumFilter;
2136 int16_t *vChrFilter= c->vChrFilter;
2137 int32_t *lumMmxFilter= c->lumMmxFilter;
2138 int32_t *chrMmxFilter= c->chrMmxFilter;
2139 int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
2140 const int vLumFilterSize= c->vLumFilterSize;
2141 const int vChrFilterSize= c->vChrFilterSize;
2142 const int chrDstY= dstY>>c->chrDstVSubSample;
2143 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
2144 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
2146 c->blueDither= ff_dither8[dstY&1];
2147 if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
2148 c->greenDither= ff_dither8[dstY&1];
2150 c->greenDither= ff_dither4[dstY&1];
2151 c->redDither= ff_dither8[(dstY+1)&1];
2152 if (dstY < dstH - 2) {
2153 const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
2154 const int16_t **chrSrcPtr= (const int16_t **) chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
2155 const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
2157 if (flags & SWS_ACCURATE_RND) {
2158 int s= APCK_SIZE / 8;
2159 for (i=0; i<vLumFilterSize; i+=2) {
2160 *(const void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
2161 *(const void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
2162 lumMmxFilter[s*i+APCK_COEF/4 ]=
2163 lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
2164 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
2165 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
2166 *(const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
2167 *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
2168 alpMmxFilter[s*i+APCK_COEF/4 ]=
2169 alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ];
2172 for (i=0; i<vChrFilterSize; i+=2) {
2173 *(const void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
2174 *(const void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
2175 chrMmxFilter[s*i+APCK_COEF/4 ]=
2176 chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
2177 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
2180 for (i=0; i<vLumFilterSize; i++) {
2181 lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
2182 lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
2183 lumMmxFilter[4*i+2]=
2184 lumMmxFilter[4*i+3]=
2185 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
2186 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
2187 alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i];
2188 alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32;
2189 alpMmxFilter[4*i+2]=
2190 alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
2193 for (i=0; i<vChrFilterSize; i++) {
2194 chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
2195 chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
2196 chrMmxFilter[4*i+2]=
2197 chrMmxFilter[4*i+3]=
2198 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
2203 #endif /* !COMPILE_TEMPLATE_MMX2 */
2205 static void RENAME(sws_init_swScale)(SwsContext *c)
2207 enum PixelFormat srcFormat = c->srcFormat;
2209 if (!(c->flags & SWS_BITEXACT)) {
2210 if (c->flags & SWS_ACCURATE_RND) {
2211 c->yuv2yuv1 = RENAME(yuv2yuv1_ar );
2212 c->yuv2yuvX = RENAME(yuv2yuvX_ar );
2213 switch (c->dstFormat) {
2214 case PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X_ar); break;
2215 case PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X_ar); break;
2216 case PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X_ar); break;
2217 case PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X_ar); break;
2218 case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X_ar); break;
2222 c->yuv2yuv1 = RENAME(yuv2yuv1 );
2223 c->yuv2yuvX = RENAME(yuv2yuvX );
2224 switch (c->dstFormat) {
2225 case PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X); break;
2226 case PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X); break;
2227 case PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X); break;
2228 case PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X); break;
2229 case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X); break;
2233 switch (c->dstFormat) {
2235 c->yuv2packed1 = RENAME(yuv2rgb32_1);
2236 c->yuv2packed2 = RENAME(yuv2rgb32_2);
2239 c->yuv2packed1 = RENAME(yuv2bgr24_1);
2240 c->yuv2packed2 = RENAME(yuv2bgr24_2);
2242 case PIX_FMT_RGB555:
2243 c->yuv2packed1 = RENAME(yuv2rgb555_1);
2244 c->yuv2packed2 = RENAME(yuv2rgb555_2);
2246 case PIX_FMT_RGB565:
2247 c->yuv2packed1 = RENAME(yuv2rgb565_1);
2248 c->yuv2packed2 = RENAME(yuv2rgb565_2);
2250 case PIX_FMT_YUYV422:
2251 c->yuv2packed1 = RENAME(yuv2yuyv422_1);
2252 c->yuv2packed2 = RENAME(yuv2yuyv422_2);
2259 c->hScale = RENAME(hScale );
2261 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2262 #if COMPILE_TEMPLATE_MMX2
2263 if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
2265 c->hyscale_fast = RENAME(hyscale_fast);
2266 c->hcscale_fast = RENAME(hcscale_fast);
2268 #endif /* COMPILE_TEMPLATE_MMX2 */
2269 c->hyscale_fast = NULL;
2270 c->hcscale_fast = NULL;
2271 #if COMPILE_TEMPLATE_MMX2
2273 #endif /* COMPILE_TEMPLATE_MMX2 */
2276 case PIX_FMT_YUYV422 : c->chrToYV12 = RENAME(yuy2ToUV); break;
2277 case PIX_FMT_UYVY422 : c->chrToYV12 = RENAME(uyvyToUV); break;
2278 case PIX_FMT_NV12 : c->chrToYV12 = RENAME(nv12ToUV); break;
2279 case PIX_FMT_NV21 : c->chrToYV12 = RENAME(nv21ToUV); break;
2280 case PIX_FMT_YUV420P16BE:
2281 case PIX_FMT_YUV422P16BE:
2282 case PIX_FMT_YUV444P16BE: c->chrToYV12 = RENAME(BEToUV); break;
2283 case PIX_FMT_YUV420P16LE:
2284 case PIX_FMT_YUV422P16LE:
2285 case PIX_FMT_YUV444P16LE: c->chrToYV12 = RENAME(LEToUV); break;
2288 if (!c->chrSrcHSubSample) {
2290 case PIX_FMT_BGR24 : c->chrToYV12 = RENAME(bgr24ToUV); break;
2291 case PIX_FMT_RGB24 : c->chrToYV12 = RENAME(rgb24ToUV); break;
2296 switch (srcFormat) {
2297 case PIX_FMT_YUYV422 :
2298 case PIX_FMT_YUV420P16BE:
2299 case PIX_FMT_YUV422P16BE:
2300 case PIX_FMT_YUV444P16BE:
2301 case PIX_FMT_Y400A :
2302 case PIX_FMT_GRAY16BE : c->lumToYV12 = RENAME(yuy2ToY); break;
2303 case PIX_FMT_UYVY422 :
2304 case PIX_FMT_YUV420P16LE:
2305 case PIX_FMT_YUV422P16LE:
2306 case PIX_FMT_YUV444P16LE:
2307 case PIX_FMT_GRAY16LE : c->lumToYV12 = RENAME(uyvyToY); break;
2308 case PIX_FMT_BGR24 : c->lumToYV12 = RENAME(bgr24ToY); break;
2309 case PIX_FMT_RGB24 : c->lumToYV12 = RENAME(rgb24ToY); break;
2313 switch (srcFormat) {
2314 case PIX_FMT_Y400A : c->alpToYV12 = RENAME(yuy2ToY); break;