26 #if COMPILE_TEMPLATE_MMXEXT
27 #define PREFETCH "prefetchnta"
29 #define PREFETCH " # nop"
32 #if COMPILE_TEMPLATE_MMXEXT
33 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
34 #define MOVNTQ2 "movntq "
36 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
37 #define MOVNTQ2 "movq "
39 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
41 #if !COMPILE_TEMPLATE_MMXEXT
46 __asm__
volatile(
"pxor %%mm0, %%mm0\n\t"
47 "movq (%0), %%mm3\n\t"
48 "movq %%mm3, %%mm4\n\t"
49 "psrlq $24, %%mm3\n\t"
50 "psllq $40, %%mm4\n\t"
51 "por %%mm4, %%mm3\n\t"
52 "movq %%mm3, %%mm4\n\t"
53 "punpcklbw %%mm0, %%mm3\n\t"
54 "punpckhbw %%mm0, %%mm4\n\t"
58 __asm__
volatile(
"pxor %%mm0, %%mm0\n\t"
59 "movq (%0), %%mm3\n\t"
60 "movq %%mm3, %%mm4\n\t"
61 "punpcklbw %%mm0, %%mm3\n\t"
62 "punpckhbw %%mm0, %%mm4\n\t"
69 static void RENAME(yuv2yuvX)(
const int16_t *
filter,
int filterSize,
77 "movq %%mm3, %%mm6\n\t"
78 "movq %%mm4, %%mm7\n\t"
80 "mov %0, %%"REG_d
" \n\t"\
81 "mov (%%"REG_d
"), %%"REG_S
" \n\t"\
84 "movq 8(%%"REG_d
"), %%mm0 \n\t" \
85 "movq (%%"REG_S
", %%"REG_c
", 2), %%mm2 \n\t" \
86 "movq 8(%%"REG_S
", %%"REG_c
", 2), %%mm5 \n\t" \
87 "add $16, %%"REG_d
" \n\t"\
88 "mov (%%"REG_d
"), %%"REG_S
" \n\t"\
89 "test %%"REG_S
", %%"REG_S
" \n\t"\
90 "pmulhw %%mm0, %%mm2 \n\t"\
91 "pmulhw %%mm0, %%mm5 \n\t"\
92 "paddw %%mm2, %%mm3 \n\t"\
93 "paddw %%mm5, %%mm4 \n\t"\
95 "psraw $3, %%mm3 \n\t"\
96 "psraw $3, %%mm4 \n\t"\
97 "packuswb %%mm4, %%mm3 \n\t"
98 MOVNTQ2 " %%mm3, (%1, %%"REG_c
")\n\t"
99 "add $8, %%"REG_c
" \n\t"\
100 "cmp %2, %%"REG_c
" \n\t"\
101 "movq %%mm6, %%mm3\n\t"
102 "movq %%mm7, %%mm4\n\t"
103 "mov %0, %%"REG_d
" \n\t"\
104 "mov (%%"REG_d
"), %%"REG_S
" \n\t"\
108 :
"%"REG_d,
"%"REG_S,
"%"REG_c
112 #define YSCALEYUV2PACKEDX_UV \
114 "xor %%"REG_a", %%"REG_a" \n\t"\
118 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
119 "mov (%%"REG_d"), %%"REG_S" \n\t"\
120 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
121 "movq %%mm3, %%mm4 \n\t"\
124 "movq 8(%%"REG_d"), %%mm0 \n\t" \
125 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" \
126 "add %6, %%"REG_S" \n\t" \
127 "movq (%%"REG_S", %%"REG_a"), %%mm5 \n\t" \
128 "add $16, %%"REG_d" \n\t"\
129 "mov (%%"REG_d"), %%"REG_S" \n\t"\
130 "pmulhw %%mm0, %%mm2 \n\t"\
131 "pmulhw %%mm0, %%mm5 \n\t"\
132 "paddw %%mm2, %%mm3 \n\t"\
133 "paddw %%mm5, %%mm4 \n\t"\
134 "test %%"REG_S", %%"REG_S" \n\t"\
137 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
138 "lea "offset"(%0), %%"REG_d" \n\t"\
139 "mov (%%"REG_d"), %%"REG_S" \n\t"\
140 "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
141 "movq "#dst1", "#dst2" \n\t"\
144 "movq 8(%%"REG_d"), "#coeff" \n\t" \
145 "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" \
146 "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" \
147 "add $16, %%"REG_d" \n\t"\
148 "mov (%%"REG_d"), %%"REG_S" \n\t"\
149 "pmulhw "#coeff", "#src1" \n\t"\
150 "pmulhw "#coeff", "#src2" \n\t"\
151 "paddw "#src1", "#dst1" \n\t"\
152 "paddw "#src2", "#dst2" \n\t"\
153 "test %%"REG_S", %%"REG_S" \n\t"\
156 #define YSCALEYUV2PACKEDX \
157 YSCALEYUV2PACKEDX_UV \
158 YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
160 #define YSCALEYUV2PACKEDX_END \
161 :: "r" (&c->redDither), \
162 "m" (dummy), "m" (dummy), "m" (dummy),\
163 "r" (dest), "m" (dstW_reg), "m"(uv_off) \
164 : "%"REG_a, "%"REG_d, "%"REG_S \
167 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
169 "xor %%"REG_a", %%"REG_a" \n\t"\
173 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
174 "mov (%%"REG_d"), %%"REG_S" \n\t"\
175 "pxor %%mm4, %%mm4 \n\t"\
176 "pxor %%mm5, %%mm5 \n\t"\
177 "pxor %%mm6, %%mm6 \n\t"\
178 "pxor %%mm7, %%mm7 \n\t"\
181 "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" \
182 "add %6, %%"REG_S" \n\t" \
183 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" \
184 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
185 "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" \
186 "movq %%mm0, %%mm3 \n\t"\
187 "punpcklwd %%mm1, %%mm0 \n\t"\
188 "punpckhwd %%mm1, %%mm3 \n\t"\
189 "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" \
190 "pmaddwd %%mm1, %%mm0 \n\t"\
191 "pmaddwd %%mm1, %%mm3 \n\t"\
192 "paddd %%mm0, %%mm4 \n\t"\
193 "paddd %%mm3, %%mm5 \n\t"\
194 "add %6, %%"REG_S" \n\t" \
195 "movq (%%"REG_S", %%"REG_a"), %%mm3 \n\t" \
196 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
197 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
198 "test %%"REG_S", %%"REG_S" \n\t"\
199 "movq %%mm2, %%mm0 \n\t"\
200 "punpcklwd %%mm3, %%mm2 \n\t"\
201 "punpckhwd %%mm3, %%mm0 \n\t"\
202 "pmaddwd %%mm1, %%mm2 \n\t"\
203 "pmaddwd %%mm1, %%mm0 \n\t"\
204 "paddd %%mm2, %%mm6 \n\t"\
205 "paddd %%mm0, %%mm7 \n\t"\
207 "psrad $16, %%mm4 \n\t"\
208 "psrad $16, %%mm5 \n\t"\
209 "psrad $16, %%mm6 \n\t"\
210 "psrad $16, %%mm7 \n\t"\
211 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
212 "packssdw %%mm5, %%mm4 \n\t"\
213 "packssdw %%mm7, %%mm6 \n\t"\
214 "paddw %%mm0, %%mm4 \n\t"\
215 "paddw %%mm0, %%mm6 \n\t"\
216 "movq %%mm4, "U_TEMP"(%0) \n\t"\
217 "movq %%mm6, "V_TEMP"(%0) \n\t"\
219 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
220 "lea "offset"(%0), %%"REG_d" \n\t"\
221 "mov (%%"REG_d"), %%"REG_S" \n\t"\
222 "pxor %%mm1, %%mm1 \n\t"\
223 "pxor %%mm5, %%mm5 \n\t"\
224 "pxor %%mm7, %%mm7 \n\t"\
225 "pxor %%mm6, %%mm6 \n\t"\
228 "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" \
229 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" \
230 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
231 "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" \
232 "movq %%mm0, %%mm3 \n\t"\
233 "punpcklwd %%mm4, %%mm0 \n\t"\
234 "punpckhwd %%mm4, %%mm3 \n\t"\
235 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" \
236 "pmaddwd %%mm4, %%mm0 \n\t"\
237 "pmaddwd %%mm4, %%mm3 \n\t"\
238 "paddd %%mm0, %%mm1 \n\t"\
239 "paddd %%mm3, %%mm5 \n\t"\
240 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" \
241 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
242 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
243 "test %%"REG_S", %%"REG_S" \n\t"\
244 "movq %%mm2, %%mm0 \n\t"\
245 "punpcklwd %%mm3, %%mm2 \n\t"\
246 "punpckhwd %%mm3, %%mm0 \n\t"\
247 "pmaddwd %%mm4, %%mm2 \n\t"\
248 "pmaddwd %%mm4, %%mm0 \n\t"\
249 "paddd %%mm2, %%mm7 \n\t"\
250 "paddd %%mm0, %%mm6 \n\t"\
252 "psrad $16, %%mm1 \n\t"\
253 "psrad $16, %%mm5 \n\t"\
254 "psrad $16, %%mm7 \n\t"\
255 "psrad $16, %%mm6 \n\t"\
256 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
257 "packssdw %%mm5, %%mm1 \n\t"\
258 "packssdw %%mm6, %%mm7 \n\t"\
259 "paddw %%mm0, %%mm1 \n\t"\
260 "paddw %%mm0, %%mm7 \n\t"\
261 "movq "U_TEMP"(%0), %%mm3 \n\t"\
262 "movq "V_TEMP"(%0), %%mm4 \n\t"\
264 #define YSCALEYUV2PACKEDX_ACCURATE \
265 YSCALEYUV2PACKEDX_ACCURATE_UV \
266 YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
268 #define YSCALEYUV2RGBX \
269 "psubw "U_OFFSET"(%0), %%mm3 \n\t" \
270 "psubw "V_OFFSET"(%0), %%mm4 \n\t" \
271 "movq %%mm3, %%mm2 \n\t" \
272 "movq %%mm4, %%mm5 \n\t" \
273 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
274 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
276 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
277 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
278 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" \
279 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" \
280 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
281 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
283 "paddw %%mm3, %%mm4 \n\t"\
284 "movq %%mm2, %%mm0 \n\t"\
285 "movq %%mm5, %%mm6 \n\t"\
286 "movq %%mm4, %%mm3 \n\t"\
287 "punpcklwd %%mm2, %%mm2 \n\t"\
288 "punpcklwd %%mm5, %%mm5 \n\t"\
289 "punpcklwd %%mm4, %%mm4 \n\t"\
290 "paddw %%mm1, %%mm2 \n\t"\
291 "paddw %%mm1, %%mm5 \n\t"\
292 "paddw %%mm1, %%mm4 \n\t"\
293 "punpckhwd %%mm0, %%mm0 \n\t"\
294 "punpckhwd %%mm6, %%mm6 \n\t"\
295 "punpckhwd %%mm3, %%mm3 \n\t"\
296 "paddw %%mm7, %%mm0 \n\t"\
297 "paddw %%mm7, %%mm6 \n\t"\
298 "paddw %%mm7, %%mm3 \n\t"\
300 "packuswb %%mm0, %%mm2 \n\t"\
301 "packuswb %%mm6, %%mm5 \n\t"\
302 "packuswb %%mm3, %%mm4 \n\t"\
304 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
305 "movq "#b", "#q2" \n\t" \
306 "movq "#r", "#t" \n\t" \
307 "punpcklbw "#g", "#b" \n\t" \
308 "punpcklbw "#a", "#r" \n\t" \
309 "punpckhbw "#g", "#q2" \n\t" \
310 "punpckhbw "#a", "#t" \n\t" \
311 "movq "#b", "#q0" \n\t" \
312 "movq "#q2", "#q3" \n\t" \
313 "punpcklwd "#r", "#q0" \n\t" \
314 "punpckhwd "#r", "#b" \n\t" \
315 "punpcklwd "#t", "#q2" \n\t" \
316 "punpckhwd "#t", "#q3" \n\t" \
318 MOVNTQ( q0, (dst, index, 4))\
319 MOVNTQ( b, 8(dst, index, 4))\
320 MOVNTQ( q2, 16(dst, index, 4))\
321 MOVNTQ( q3, 24(dst, index, 4))\
323 "add $8, "#index" \n\t"\
324 "cmp "#dstw", "#index" \n\t"\
326 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
329 const int16_t **lumSrc,
int lumFilterSize,
330 const int16_t *chrFilter,
const int16_t **chrUSrc,
331 const int16_t **chrVSrc,
332 int chrFilterSize,
const int16_t **alpSrc,
339 if (CONFIG_SWSCALE_ALPHA &&
c->alpPixBuf) {
342 "movq %%mm2, "U_TEMP"(%0) \n\t"
343 "movq %%mm4, "V_TEMP"(%0) \n\t"
344 "movq %%mm5, "Y_TEMP"(%0) \n\t"
346 "movq "Y_TEMP"(%0), %%mm5 \n\t"
347 "psraw $3, %%mm1 \n\t"
348 "psraw $3, %%mm7 \n\t"
349 "packuswb %%mm7, %%mm1 \n\t"
350 WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
355 "pcmpeqd %%mm7, %%mm7 \n\t"
356 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
362 const int16_t **lumSrc,
int lumFilterSize,
363 const int16_t *chrFilter,
const int16_t **chrUSrc,
364 const int16_t **chrVSrc,
365 int chrFilterSize,
const int16_t **alpSrc,
372 if (CONFIG_SWSCALE_ALPHA &&
c->alpPixBuf) {
376 "psraw $3, %%mm1 \n\t"
377 "psraw $3, %%mm7 \n\t"
378 "packuswb %%mm7, %%mm1 \n\t"
379 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
384 "pcmpeqd %%mm7, %%mm7 \n\t"
385 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
390 #define REAL_WRITERGB16(dst, dstw, index) \
391 "pand "MANGLE(bF8)", %%mm2 \n\t" \
392 "pand "MANGLE(bFC)", %%mm4 \n\t" \
393 "pand "MANGLE(bF8)", %%mm5 \n\t" \
394 "psrlq $3, %%mm2 \n\t"\
396 "movq %%mm2, %%mm1 \n\t"\
397 "movq %%mm4, %%mm3 \n\t"\
399 "punpcklbw %%mm7, %%mm3 \n\t"\
400 "punpcklbw %%mm5, %%mm2 \n\t"\
401 "punpckhbw %%mm7, %%mm4 \n\t"\
402 "punpckhbw %%mm5, %%mm1 \n\t"\
404 "psllq $3, %%mm3 \n\t"\
405 "psllq $3, %%mm4 \n\t"\
407 "por %%mm3, %%mm2 \n\t"\
408 "por %%mm4, %%mm1 \n\t"\
410 MOVNTQ(%%mm2, (dst, index, 2))\
411 MOVNTQ(%%mm1, 8(dst, index, 2))\
413 "add $8, "#index" \n\t"\
414 "cmp "#dstw", "#index" \n\t"\
416 #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
419 const int16_t **lumSrc,
int lumFilterSize,
420 const int16_t *chrFilter,
const int16_t **chrUSrc,
421 const int16_t **chrVSrc,
422 int chrFilterSize,
const int16_t **alpSrc,
431 "pxor %%mm7, %%mm7 \n\t"
443 const int16_t **lumSrc,
int lumFilterSize,
444 const int16_t *chrFilter,
const int16_t **chrUSrc,
445 const int16_t **chrVSrc,
446 int chrFilterSize,
const int16_t **alpSrc,
455 "pxor %%mm7, %%mm7 \n\t"
466 #define REAL_WRITERGB15(dst, dstw, index) \
467 "pand "MANGLE(bF8)", %%mm2 \n\t" \
468 "pand "MANGLE(bF8)", %%mm4 \n\t" \
469 "pand "MANGLE(bF8)", %%mm5 \n\t" \
470 "psrlq $3, %%mm2 \n\t"\
471 "psrlq $1, %%mm5 \n\t"\
473 "movq %%mm2, %%mm1 \n\t"\
474 "movq %%mm4, %%mm3 \n\t"\
476 "punpcklbw %%mm7, %%mm3 \n\t"\
477 "punpcklbw %%mm5, %%mm2 \n\t"\
478 "punpckhbw %%mm7, %%mm4 \n\t"\
479 "punpckhbw %%mm5, %%mm1 \n\t"\
481 "psllq $2, %%mm3 \n\t"\
482 "psllq $2, %%mm4 \n\t"\
484 "por %%mm3, %%mm2 \n\t"\
485 "por %%mm4, %%mm1 \n\t"\
487 MOVNTQ(%%mm2, (dst, index, 2))\
488 MOVNTQ(%%mm1, 8(dst, index, 2))\
490 "add $8, "#index" \n\t"\
491 "cmp "#dstw", "#index" \n\t"\
493 #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
496 const int16_t **lumSrc,
int lumFilterSize,
497 const int16_t *chrFilter,
const int16_t **chrUSrc,
498 const int16_t **chrVSrc,
499 int chrFilterSize,
const int16_t **alpSrc,
508 "pxor %%mm7, %%mm7 \n\t"
520 const int16_t **lumSrc,
int lumFilterSize,
521 const int16_t *chrFilter,
const int16_t **chrUSrc,
522 const int16_t **chrVSrc,
523 int chrFilterSize,
const int16_t **alpSrc,
532 "pxor %%mm7, %%mm7 \n\t"
543 #define WRITEBGR24MMX(dst, dstw, index) \
545 "movq %%mm2, %%mm1 \n\t" \
546 "movq %%mm5, %%mm6 \n\t" \
547 "punpcklbw %%mm4, %%mm2 \n\t" \
548 "punpcklbw %%mm7, %%mm5 \n\t" \
549 "punpckhbw %%mm4, %%mm1 \n\t" \
550 "punpckhbw %%mm7, %%mm6 \n\t" \
551 "movq %%mm2, %%mm0 \n\t" \
552 "movq %%mm1, %%mm3 \n\t" \
553 "punpcklwd %%mm5, %%mm0 \n\t" \
554 "punpckhwd %%mm5, %%mm2 \n\t" \
555 "punpcklwd %%mm6, %%mm1 \n\t" \
556 "punpckhwd %%mm6, %%mm3 \n\t" \
558 "movq %%mm0, %%mm4 \n\t" \
559 "movq %%mm2, %%mm6 \n\t" \
560 "movq %%mm1, %%mm5 \n\t" \
561 "movq %%mm3, %%mm7 \n\t" \
563 "psllq $40, %%mm0 \n\t" \
564 "psllq $40, %%mm2 \n\t" \
565 "psllq $40, %%mm1 \n\t" \
566 "psllq $40, %%mm3 \n\t" \
568 "punpckhdq %%mm4, %%mm0 \n\t" \
569 "punpckhdq %%mm6, %%mm2 \n\t" \
570 "punpckhdq %%mm5, %%mm1 \n\t" \
571 "punpckhdq %%mm7, %%mm3 \n\t" \
573 "psrlq $8, %%mm0 \n\t" \
574 "movq %%mm2, %%mm6 \n\t" \
575 "psllq $40, %%mm2 \n\t" \
576 "por %%mm2, %%mm0 \n\t" \
577 MOVNTQ(%%mm0, (dst))\
579 "psrlq $24, %%mm6 \n\t" \
580 "movq %%mm1, %%mm5 \n\t" \
581 "psllq $24, %%mm1 \n\t" \
582 "por %%mm1, %%mm6 \n\t" \
583 MOVNTQ(%%mm6, 8(dst))\
585 "psrlq $40, %%mm5 \n\t" \
586 "psllq $8, %%mm3 \n\t" \
587 "por %%mm3, %%mm5 \n\t" \
588 MOVNTQ(%%mm5, 16(dst))\
590 "add $24, "#dst" \n\t"\
592 "add $8, "#index" \n\t"\
593 "cmp "#dstw", "#index" \n\t"\
596 #define WRITEBGR24MMXEXT(dst, dstw, index) \
598 "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
599 "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
600 "pshufw $0x50, %%mm2, %%mm1 \n\t" \
601 "pshufw $0x50, %%mm4, %%mm3 \n\t" \
602 "pshufw $0x00, %%mm5, %%mm6 \n\t" \
604 "pand %%mm0, %%mm1 \n\t" \
605 "pand %%mm0, %%mm3 \n\t" \
606 "pand %%mm7, %%mm6 \n\t" \
608 "psllq $8, %%mm3 \n\t" \
609 "por %%mm1, %%mm6 \n\t"\
610 "por %%mm3, %%mm6 \n\t"\
611 MOVNTQ(%%mm6, (dst))\
613 "psrlq $8, %%mm4 \n\t" \
614 "pshufw $0xA5, %%mm2, %%mm1 \n\t" \
615 "pshufw $0x55, %%mm4, %%mm3 \n\t" \
616 "pshufw $0xA5, %%mm5, %%mm6 \n\t" \
618 "pand "MANGLE(ff_M24B)", %%mm1 \n\t" \
619 "pand %%mm7, %%mm3 \n\t" \
620 "pand %%mm0, %%mm6 \n\t" \
622 "por %%mm1, %%mm3 \n\t" \
623 "por %%mm3, %%mm6 \n\t"\
624 MOVNTQ(%%mm6, 8(dst))\
626 "pshufw $0xFF, %%mm2, %%mm1 \n\t" \
627 "pshufw $0xFA, %%mm4, %%mm3 \n\t" \
628 "pshufw $0xFA, %%mm5, %%mm6 \n\t" \
630 "pand %%mm7, %%mm1 \n\t" \
631 "pand %%mm0, %%mm3 \n\t" \
632 "pand "MANGLE(ff_M24B)", %%mm6 \n\t" \
634 "por %%mm1, %%mm3 \n\t"\
635 "por %%mm3, %%mm6 \n\t"\
636 MOVNTQ(%%mm6, 16(dst))\
638 "add $24, "#dst" \n\t"\
640 "add $8, "#index" \n\t"\
641 "cmp "#dstw", "#index" \n\t"\
644 #if COMPILE_TEMPLATE_MMXEXT
646 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMXEXT(dst, dstw, index)
649 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
653 const int16_t **lumSrc,
int lumFilterSize,
654 const int16_t *chrFilter,
const int16_t **chrUSrc,
655 const int16_t **chrVSrc,
656 int chrFilterSize,
const int16_t **alpSrc,
665 "pxor %%mm7, %%mm7 \n\t"
666 "lea (%%"REG_a
", %%"REG_a
", 2), %%"REG_c
"\n\t"
667 "add %4, %%"REG_c
" \n\t"
669 ::
"r" (&
c->redDither),
670 "m" (dummy),
"m" (
dummy),
"m" (dummy),
671 "r" (dest),
"m" (dstW_reg),
"m"(
uv_off)
672 :
"%"REG_a,
"%"REG_c,
"%"REG_d,
"%"REG_S
677 const int16_t **lumSrc,
int lumFilterSize,
678 const int16_t *chrFilter,
const int16_t **chrUSrc,
679 const int16_t **chrVSrc,
680 int chrFilterSize,
const int16_t **alpSrc,
689 "pxor %%mm7, %%mm7 \n\t"
690 "lea (%%"REG_a
", %%"REG_a
", 2), %%"REG_c
" \n\t"
691 "add %4, %%"REG_c
" \n\t"
693 ::
"r" (&
c->redDither),
694 "m" (dummy),
"m" (
dummy),
"m" (dummy),
695 "r" (dest),
"m" (dstW_reg),
"m"(
uv_off)
696 :
"%"REG_a,
"%"REG_c,
"%"REG_d,
"%"REG_S
700 #define REAL_WRITEYUY2(dst, dstw, index) \
701 "packuswb %%mm3, %%mm3 \n\t"\
702 "packuswb %%mm4, %%mm4 \n\t"\
703 "packuswb %%mm7, %%mm1 \n\t"\
704 "punpcklbw %%mm4, %%mm3 \n\t"\
705 "movq %%mm1, %%mm7 \n\t"\
706 "punpcklbw %%mm3, %%mm1 \n\t"\
707 "punpckhbw %%mm3, %%mm7 \n\t"\
709 MOVNTQ(%%mm1, (dst, index, 2))\
710 MOVNTQ(%%mm7, 8(dst, index, 2))\
712 "add $8, "#index" \n\t"\
713 "cmp "#dstw", "#index" \n\t"\
715 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
718 const int16_t **lumSrc,
int lumFilterSize,
719 const int16_t *chrFilter,
const int16_t **chrUSrc,
720 const int16_t **chrVSrc,
721 int chrFilterSize,
const int16_t **alpSrc,
730 "psraw $3, %%mm3 \n\t"
731 "psraw $3, %%mm4 \n\t"
732 "psraw $3, %%mm1 \n\t"
733 "psraw $3, %%mm7 \n\t"
739 const int16_t **lumSrc,
int lumFilterSize,
740 const int16_t *chrFilter,
const int16_t **chrUSrc,
741 const int16_t **chrVSrc,
742 int chrFilterSize,
const int16_t **alpSrc,
751 "psraw $3, %%mm3 \n\t"
752 "psraw $3, %%mm4 \n\t"
753 "psraw $3, %%mm1 \n\t"
754 "psraw $3, %%mm7 \n\t"
759 #define REAL_YSCALEYUV2RGB_UV(index, c) \
760 "xor "#index", "#index" \n\t"\
763 "movq (%2, "#index"), %%mm2 \n\t" \
764 "movq (%3, "#index"), %%mm3 \n\t" \
765 "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
766 "movq (%2, "#index"), %%mm5 \n\t" \
767 "movq (%3, "#index"), %%mm4 \n\t" \
768 "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
769 "psubw %%mm3, %%mm2 \n\t" \
770 "psubw %%mm4, %%mm5 \n\t" \
771 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
772 "pmulhw %%mm0, %%mm2 \n\t" \
773 "pmulhw %%mm0, %%mm5 \n\t" \
774 "psraw $4, %%mm3 \n\t" \
775 "psraw $4, %%mm4 \n\t" \
776 "paddw %%mm2, %%mm3 \n\t" \
777 "paddw %%mm5, %%mm4 \n\t" \
778 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" \
779 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" \
780 "movq %%mm3, %%mm2 \n\t" \
781 "movq %%mm4, %%mm5 \n\t" \
782 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
783 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
786 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
787 "movq ("#b1", "#index", 2), %%mm0 \n\t" \
788 "movq ("#b2", "#index", 2), %%mm1 \n\t" \
789 "movq 8("#b1", "#index", 2), %%mm6 \n\t" \
790 "movq 8("#b2", "#index", 2), %%mm7 \n\t" \
791 "psubw %%mm1, %%mm0 \n\t" \
792 "psubw %%mm7, %%mm6 \n\t" \
793 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" \
794 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" \
795 "psraw $4, %%mm1 \n\t" \
796 "psraw $4, %%mm7 \n\t" \
797 "paddw %%mm0, %%mm1 \n\t" \
798 "paddw %%mm6, %%mm7 \n\t" \
800 #define REAL_YSCALEYUV2RGB_COEFF(c) \
801 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
802 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
803 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" \
804 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" \
805 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
806 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
808 "paddw %%mm3, %%mm4 \n\t"\
809 "movq %%mm2, %%mm0 \n\t"\
810 "movq %%mm5, %%mm6 \n\t"\
811 "movq %%mm4, %%mm3 \n\t"\
812 "punpcklwd %%mm2, %%mm2 \n\t"\
813 "punpcklwd %%mm5, %%mm5 \n\t"\
814 "punpcklwd %%mm4, %%mm4 \n\t"\
815 "paddw %%mm1, %%mm2 \n\t"\
816 "paddw %%mm1, %%mm5 \n\t"\
817 "paddw %%mm1, %%mm4 \n\t"\
818 "punpckhwd %%mm0, %%mm0 \n\t"\
819 "punpckhwd %%mm6, %%mm6 \n\t"\
820 "punpckhwd %%mm3, %%mm3 \n\t"\
821 "paddw %%mm7, %%mm0 \n\t"\
822 "paddw %%mm7, %%mm6 \n\t"\
823 "paddw %%mm7, %%mm3 \n\t"\
825 "packuswb %%mm0, %%mm2 \n\t"\
826 "packuswb %%mm6, %%mm5 \n\t"\
827 "packuswb %%mm3, %%mm4 \n\t"\
829 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
831 #define YSCALEYUV2RGB(index, c) \
832 REAL_YSCALEYUV2RGB_UV(index, c) \
833 REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
834 REAL_YSCALEYUV2RGB_COEFF(c)
840 const int16_t *ubuf[2],
const int16_t *vbuf[2],
841 const int16_t *abuf[2],
uint8_t *dest,
842 int dstW,
int yalpha,
int uvalpha,
int y)
844 const int16_t *buf0 = buf[0], *buf1 = buf[1],
845 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
847 if (CONFIG_SWSCALE_ALPHA &&
c->alpPixBuf) {
848 const int16_t *abuf0 = abuf[0], *abuf1 = abuf[1];
853 "psraw $3, %%mm1 \n\t"
854 "psraw $3, %%mm7 \n\t"
855 "packuswb %%mm7, %%mm1 \n\t"
856 WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
857 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"r" (dest),
859 "r" (abuf0),
"r" (abuf1)
863 c->u_temp=(intptr_t)abuf0;
864 c->v_temp=(intptr_t)abuf1;
867 "mov %4, %%"REG_b
" \n\t"
868 "push %%"REG_BP
" \n\t"
872 "mov "U_TEMP"(%5), %0 \n\t"
873 "mov "V_TEMP"(%5), %1 \n\t"
875 "psraw $3, %%mm1 \n\t"
876 "psraw $3, %%mm7 \n\t"
877 "packuswb %%mm7, %%mm1 \n\t"
880 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
881 "pop %%"REG_BP
" \n\t"
883 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
890 "mov %4, %%"REG_b
" \n\t"
891 "push %%"REG_BP
" \n\t"
893 "pcmpeqd %%mm7, %%mm7 \n\t"
894 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
895 "pop %%"REG_BP
" \n\t"
897 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
904 const int16_t *ubuf[2],
const int16_t *vbuf[2],
905 const int16_t *abuf[2],
uint8_t *dest,
906 int dstW,
int yalpha,
int uvalpha,
int y)
908 const int16_t *buf0 = buf[0], *buf1 = buf[1],
909 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
914 "mov %4, %%"REG_b
" \n\t"
915 "push %%"REG_BP
" \n\t"
917 "pxor %%mm7, %%mm7 \n\t"
919 "pop %%"REG_BP
" \n\t"
921 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
927 const int16_t *ubuf[2],
const int16_t *vbuf[2],
928 const int16_t *abuf[2],
uint8_t *dest,
929 int dstW,
int yalpha,
int uvalpha,
int y)
931 const int16_t *buf0 = buf[0], *buf1 = buf[1],
932 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
937 "mov %4, %%"REG_b
" \n\t"
938 "push %%"REG_BP
" \n\t"
940 "pxor %%mm7, %%mm7 \n\t"
948 "pop %%"REG_BP
" \n\t"
950 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
956 const int16_t *ubuf[2],
const int16_t *vbuf[2],
957 const int16_t *abuf[2],
uint8_t *dest,
958 int dstW,
int yalpha,
int uvalpha,
int y)
960 const int16_t *buf0 = buf[0], *buf1 = buf[1],
961 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
966 "mov %4, %%"REG_b
" \n\t"
967 "push %%"REG_BP
" \n\t"
969 "pxor %%mm7, %%mm7 \n\t"
977 "pop %%"REG_BP
" \n\t"
979 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
984 #define REAL_YSCALEYUV2PACKED(index, c) \
985 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
986 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
987 "psraw $3, %%mm0 \n\t"\
988 "psraw $3, %%mm1 \n\t"\
989 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
990 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
991 "xor "#index", "#index" \n\t"\
994 "movq (%2, "#index"), %%mm2 \n\t" \
995 "movq (%3, "#index"), %%mm3 \n\t" \
996 "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
997 "movq (%2, "#index"), %%mm5 \n\t" \
998 "movq (%3, "#index"), %%mm4 \n\t" \
999 "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
1000 "psubw %%mm3, %%mm2 \n\t" \
1001 "psubw %%mm4, %%mm5 \n\t" \
1002 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
1003 "pmulhw %%mm0, %%mm2 \n\t" \
1004 "pmulhw %%mm0, %%mm5 \n\t" \
1005 "psraw $7, %%mm3 \n\t" \
1006 "psraw $7, %%mm4 \n\t" \
1007 "paddw %%mm2, %%mm3 \n\t" \
1008 "paddw %%mm5, %%mm4 \n\t" \
1009 "movq (%0, "#index", 2), %%mm0 \n\t" \
1010 "movq (%1, "#index", 2), %%mm1 \n\t" \
1011 "movq 8(%0, "#index", 2), %%mm6 \n\t" \
1012 "movq 8(%1, "#index", 2), %%mm7 \n\t" \
1013 "psubw %%mm1, %%mm0 \n\t" \
1014 "psubw %%mm7, %%mm6 \n\t" \
1015 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" \
1016 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" \
1017 "psraw $7, %%mm1 \n\t" \
1018 "psraw $7, %%mm7 \n\t" \
1019 "paddw %%mm0, %%mm1 \n\t" \
1020 "paddw %%mm6, %%mm7 \n\t" \
1022 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
1025 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1026 const int16_t *abuf[2],
uint8_t *dest,
1027 int dstW,
int yalpha,
int uvalpha,
int y)
1029 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1030 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
1035 "mov %4, %%"REG_b
" \n\t"
1036 "push %%"REG_BP
" \n\t"
1039 "pop %%"REG_BP
" \n\t"
1041 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1046 #define REAL_YSCALEYUV2RGB1(index, c) \
1047 "xor "#index", "#index" \n\t"\
1050 "movq (%2, "#index"), %%mm3 \n\t" \
1051 "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
1052 "movq (%2, "#index"), %%mm4 \n\t" \
1053 "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
1054 "psraw $4, %%mm3 \n\t" \
1055 "psraw $4, %%mm4 \n\t" \
1056 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" \
1057 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" \
1058 "movq %%mm3, %%mm2 \n\t" \
1059 "movq %%mm4, %%mm5 \n\t" \
1060 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
1061 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
1063 "movq (%0, "#index", 2), %%mm1 \n\t" \
1064 "movq 8(%0, "#index", 2), %%mm7 \n\t" \
1065 "psraw $4, %%mm1 \n\t" \
1066 "psraw $4, %%mm7 \n\t" \
1067 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
1068 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
1069 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" \
1070 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" \
1071 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
1072 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
1074 "paddw %%mm3, %%mm4 \n\t"\
1075 "movq %%mm2, %%mm0 \n\t"\
1076 "movq %%mm5, %%mm6 \n\t"\
1077 "movq %%mm4, %%mm3 \n\t"\
1078 "punpcklwd %%mm2, %%mm2 \n\t"\
1079 "punpcklwd %%mm5, %%mm5 \n\t"\
1080 "punpcklwd %%mm4, %%mm4 \n\t"\
1081 "paddw %%mm1, %%mm2 \n\t"\
1082 "paddw %%mm1, %%mm5 \n\t"\
1083 "paddw %%mm1, %%mm4 \n\t"\
1084 "punpckhwd %%mm0, %%mm0 \n\t"\
1085 "punpckhwd %%mm6, %%mm6 \n\t"\
1086 "punpckhwd %%mm3, %%mm3 \n\t"\
1087 "paddw %%mm7, %%mm0 \n\t"\
1088 "paddw %%mm7, %%mm6 \n\t"\
1089 "paddw %%mm7, %%mm3 \n\t"\
1091 "packuswb %%mm0, %%mm2 \n\t"\
1092 "packuswb %%mm6, %%mm5 \n\t"\
1093 "packuswb %%mm3, %%mm4 \n\t"\
1095 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
1098 #define REAL_YSCALEYUV2RGB1b(index, c) \
1099 "xor "#index", "#index" \n\t"\
1102 "movq (%2, "#index"), %%mm2 \n\t" \
1103 "movq (%3, "#index"), %%mm3 \n\t" \
1104 "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
1105 "movq (%2, "#index"), %%mm5 \n\t" \
1106 "movq (%3, "#index"), %%mm4 \n\t" \
1107 "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
1108 "paddw %%mm2, %%mm3 \n\t" \
1109 "paddw %%mm5, %%mm4 \n\t" \
1110 "psrlw $5, %%mm3 \n\t" \
1111 "psrlw $5, %%mm4 \n\t" \
1112 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" \
1113 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" \
1114 "movq %%mm3, %%mm2 \n\t" \
1115 "movq %%mm4, %%mm5 \n\t" \
1116 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
1117 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
1119 "movq (%0, "#index", 2), %%mm1 \n\t" \
1120 "movq 8(%0, "#index", 2), %%mm7 \n\t" \
1121 "psraw $4, %%mm1 \n\t" \
1122 "psraw $4, %%mm7 \n\t" \
1123 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
1124 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
1125 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" \
1126 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" \
1127 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
1128 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
1130 "paddw %%mm3, %%mm4 \n\t"\
1131 "movq %%mm2, %%mm0 \n\t"\
1132 "movq %%mm5, %%mm6 \n\t"\
1133 "movq %%mm4, %%mm3 \n\t"\
1134 "punpcklwd %%mm2, %%mm2 \n\t"\
1135 "punpcklwd %%mm5, %%mm5 \n\t"\
1136 "punpcklwd %%mm4, %%mm4 \n\t"\
1137 "paddw %%mm1, %%mm2 \n\t"\
1138 "paddw %%mm1, %%mm5 \n\t"\
1139 "paddw %%mm1, %%mm4 \n\t"\
1140 "punpckhwd %%mm0, %%mm0 \n\t"\
1141 "punpckhwd %%mm6, %%mm6 \n\t"\
1142 "punpckhwd %%mm3, %%mm3 \n\t"\
1143 "paddw %%mm7, %%mm0 \n\t"\
1144 "paddw %%mm7, %%mm6 \n\t"\
1145 "paddw %%mm7, %%mm3 \n\t"\
1147 "packuswb %%mm0, %%mm2 \n\t"\
1148 "packuswb %%mm6, %%mm5 \n\t"\
1149 "packuswb %%mm3, %%mm4 \n\t"\
1151 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
1153 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
1154 "movq (%1, "#index", 2), %%mm7 \n\t" \
1155 "movq 8(%1, "#index", 2), %%mm1 \n\t" \
1156 "psraw $7, %%mm7 \n\t" \
1157 "psraw $7, %%mm1 \n\t" \
1158 "packuswb %%mm1, %%mm7 \n\t"
1159 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
1165 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1166 const int16_t *abuf0,
uint8_t *dest,
1167 int dstW,
int uvalpha,
int y)
1169 const int16_t *ubuf0 = ubuf[0];
1170 const int16_t *buf1= buf0;
1172 if (uvalpha < 2048) {
1173 const int16_t *ubuf1 = ubuf[0];
1174 if (CONFIG_SWSCALE_ALPHA &&
c->alpPixBuf) {
1177 "mov %4, %%"REG_b
" \n\t"
1178 "push %%"REG_BP
" \n\t"
1181 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1182 "pop %%"REG_BP
" \n\t"
1184 ::
"c" (buf0),
"d" (abuf0),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1190 "mov %4, %%"REG_b
" \n\t"
1191 "push %%"REG_BP
" \n\t"
1193 "pcmpeqd %%mm7, %%mm7 \n\t"
1194 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1195 "pop %%"REG_BP
" \n\t"
1197 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1202 const int16_t *ubuf1 = ubuf[1];
1203 if (CONFIG_SWSCALE_ALPHA &&
c->alpPixBuf) {
1206 "mov %4, %%"REG_b
" \n\t"
1207 "push %%"REG_BP
" \n\t"
1210 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1211 "pop %%"REG_BP
" \n\t"
1213 ::
"c" (buf0),
"d" (abuf0),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1219 "mov %4, %%"REG_b
" \n\t"
1220 "push %%"REG_BP
" \n\t"
1222 "pcmpeqd %%mm7, %%mm7 \n\t"
1223 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1224 "pop %%"REG_BP
" \n\t"
1226 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1234 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1235 const int16_t *abuf0,
uint8_t *dest,
1236 int dstW,
int uvalpha,
int y)
1238 const int16_t *ubuf0 = ubuf[0];
1239 const int16_t *buf1= buf0;
1241 if (uvalpha < 2048) {
1242 const int16_t *ubuf1 = ubuf[0];
1245 "mov %4, %%"REG_b
" \n\t"
1246 "push %%"REG_BP
" \n\t"
1248 "pxor %%mm7, %%mm7 \n\t"
1250 "pop %%"REG_BP
" \n\t"
1252 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1256 const int16_t *ubuf1 = ubuf[1];
1259 "mov %4, %%"REG_b
" \n\t"
1260 "push %%"REG_BP
" \n\t"
1262 "pxor %%mm7, %%mm7 \n\t"
1264 "pop %%"REG_BP
" \n\t"
1266 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1273 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1274 const int16_t *abuf0,
uint8_t *dest,
1275 int dstW,
int uvalpha,
int y)
1277 const int16_t *ubuf0 = ubuf[0];
1278 const int16_t *buf1= buf0;
1280 if (uvalpha < 2048) {
1281 const int16_t *ubuf1 = ubuf[0];
1284 "mov %4, %%"REG_b
" \n\t"
1285 "push %%"REG_BP
" \n\t"
1287 "pxor %%mm7, %%mm7 \n\t"
1295 "pop %%"REG_BP
" \n\t"
1297 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1301 const int16_t *ubuf1 = ubuf[1];
1304 "mov %4, %%"REG_b
" \n\t"
1305 "push %%"REG_BP
" \n\t"
1307 "pxor %%mm7, %%mm7 \n\t"
1315 "pop %%"REG_BP
" \n\t"
1317 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1324 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1325 const int16_t *abuf0,
uint8_t *dest,
1326 int dstW,
int uvalpha,
int y)
1328 const int16_t *ubuf0 = ubuf[0];
1329 const int16_t *buf1= buf0;
1331 if (uvalpha < 2048) {
1332 const int16_t *ubuf1 = ubuf[0];
1335 "mov %4, %%"REG_b
" \n\t"
1336 "push %%"REG_BP
" \n\t"
1338 "pxor %%mm7, %%mm7 \n\t"
1346 "pop %%"REG_BP
" \n\t"
1348 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1352 const int16_t *ubuf1 = ubuf[1];
1355 "mov %4, %%"REG_b
" \n\t"
1356 "push %%"REG_BP
" \n\t"
1358 "pxor %%mm7, %%mm7 \n\t"
1366 "pop %%"REG_BP
" \n\t"
1368 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1374 #define REAL_YSCALEYUV2PACKED1(index, c) \
1375 "xor "#index", "#index" \n\t"\
1378 "movq (%2, "#index"), %%mm3 \n\t" \
1379 "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
1380 "movq (%2, "#index"), %%mm4 \n\t" \
1381 "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
1382 "psraw $7, %%mm3 \n\t" \
1383 "psraw $7, %%mm4 \n\t" \
1384 "movq (%0, "#index", 2), %%mm1 \n\t" \
1385 "movq 8(%0, "#index", 2), %%mm7 \n\t" \
1386 "psraw $7, %%mm1 \n\t" \
1387 "psraw $7, %%mm7 \n\t" \
1389 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
1391 #define REAL_YSCALEYUV2PACKED1b(index, c) \
1392 "xor "#index", "#index" \n\t"\
1395 "movq (%2, "#index"), %%mm2 \n\t" \
1396 "movq (%3, "#index"), %%mm3 \n\t" \
1397 "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
1398 "movq (%2, "#index"), %%mm5 \n\t" \
1399 "movq (%3, "#index"), %%mm4 \n\t" \
1400 "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
1401 "paddw %%mm2, %%mm3 \n\t" \
1402 "paddw %%mm5, %%mm4 \n\t" \
1403 "psrlw $8, %%mm3 \n\t" \
1404 "psrlw $8, %%mm4 \n\t" \
1405 "movq (%0, "#index", 2), %%mm1 \n\t" \
1406 "movq 8(%0, "#index", 2), %%mm7 \n\t" \
1407 "psraw $7, %%mm1 \n\t" \
1408 "psraw $7, %%mm7 \n\t"
1409 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
1412 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1413 const int16_t *abuf0,
uint8_t *dest,
1414 int dstW,
int uvalpha,
int y)
1416 const int16_t *ubuf0 = ubuf[0];
1417 const int16_t *buf1= buf0;
1419 if (uvalpha < 2048) {
1420 const int16_t *ubuf1 = ubuf[0];
1423 "mov %4, %%"REG_b
" \n\t"
1424 "push %%"REG_BP
" \n\t"
1427 "pop %%"REG_BP
" \n\t"
1429 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1433 const int16_t *ubuf1 = ubuf[1];
1436 "mov %4, %%"REG_b
" \n\t"
1437 "push %%"REG_BP
" \n\t"
1440 "pop %%"REG_BP
" \n\t"
1442 ::
"c" (buf0),
"d" (buf1),
"S" (ubuf0),
"D" (ubuf1),
"m" (dest),
1448 #if COMPILE_TEMPLATE_MMXEXT
1450 int dstWidth,
const uint8_t *src,
1453 int32_t *filterPos =
c->hLumFilterPos;
1454 int16_t *
filter =
c->hLumFilter;
1455 void *mmxextFilterCode =
c->lumMmxextFilterCode;
1466 "mov %%"REG_b
", %5 \n\t"
1468 "mov -8(%%rsp), %%"REG_a
" \n\t"
1469 "mov %%"REG_a
", %6 \n\t"
1473 "mov -8(%%rsp), %%"REG_a
" \n\t"
1474 "mov %%"REG_a
", %5 \n\t"
1477 "pxor %%mm7, %%mm7 \n\t"
1478 "mov %0, %%"REG_c
" \n\t"
1479 "mov %1, %%"REG_D
" \n\t"
1480 "mov %2, %%"REG_d
" \n\t"
1481 "mov %3, %%"REG_b
" \n\t"
1482 "xor %%"REG_a
", %%"REG_a
" \n\t"
1488 #define CALL_MMXEXT_FILTER_CODE \
1489 "movl (%%"REG_b"), %%esi \n\t"\
1491 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
1492 "add %%"REG_S", %%"REG_c" \n\t"\
1493 "add %%"REG_a", %%"REG_D" \n\t"\
1494 "xor %%"REG_a", %%"REG_a" \n\t"\
1497 #define CALL_MMXEXT_FILTER_CODE \
1498 "movl (%%"REG_b"), %%esi \n\t"\
1500 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
1501 "add %%"REG_a", %%"REG_D" \n\t"\
1502 "xor %%"REG_a", %%"REG_a" \n\t"\
1506 CALL_MMXEXT_FILTER_CODE
1507 CALL_MMXEXT_FILTER_CODE
1508 CALL_MMXEXT_FILTER_CODE
1509 CALL_MMXEXT_FILTER_CODE
1510 CALL_MMXEXT_FILTER_CODE
1511 CALL_MMXEXT_FILTER_CODE
1512 CALL_MMXEXT_FILTER_CODE
1513 CALL_MMXEXT_FILTER_CODE
1516 "mov %5, %%"REG_b
" \n\t"
1518 "mov %6, %%"REG_a
" \n\t"
1519 "mov %%"REG_a
", -8(%%rsp) \n\t"
1523 "mov %5, %%"REG_a
" \n\t"
1524 "mov %%"REG_a
", -8(%%rsp) \n\t"
1527 ::
"m" (src),
"m" (
dst),
"m" (
filter),
"m" (filterPos),
1528 "m" (mmxextFilterCode)
1535 :
"%"REG_a,
"%"REG_c,
"%"REG_d,
"%"REG_S,
"%"REG_D
1541 for (i=dstWidth-1; (i*xInc)>>16 >=
srcW-1; i--)
1546 int dstWidth,
const uint8_t *src1,
1549 int32_t *filterPos = c->hChrFilterPos;
1550 int16_t *filter = c->hChrFilter;
1551 void *mmxextFilterCode = c->chrMmxextFilterCode;
1562 "mov %%"REG_b
", %7 \n\t"
1564 "mov -8(%%rsp), %%"REG_a
" \n\t"
1565 "mov %%"REG_a
", %8 \n\t"
1569 "mov -8(%%rsp), %%"REG_a
" \n\t"
1570 "mov %%"REG_a
", %7 \n\t"
1573 "pxor %%mm7, %%mm7 \n\t"
1574 "mov %0, %%"REG_c
" \n\t"
1575 "mov %1, %%"REG_D
" \n\t"
1576 "mov %2, %%"REG_d
" \n\t"
1577 "mov %3, %%"REG_b
" \n\t"
1578 "xor %%"REG_a
", %%"REG_a
" \n\t"
1583 CALL_MMXEXT_FILTER_CODE
1584 CALL_MMXEXT_FILTER_CODE
1585 CALL_MMXEXT_FILTER_CODE
1586 CALL_MMXEXT_FILTER_CODE
1587 "xor %%"REG_a
", %%"REG_a
" \n\t"
1588 "mov %5, %%"REG_c
" \n\t"
1589 "mov %6, %%"REG_D
" \n\t"
1594 CALL_MMXEXT_FILTER_CODE
1595 CALL_MMXEXT_FILTER_CODE
1596 CALL_MMXEXT_FILTER_CODE
1597 CALL_MMXEXT_FILTER_CODE
1600 "mov %7, %%"REG_b
" \n\t"
1602 "mov %8, %%"REG_a
" \n\t"
1603 "mov %%"REG_a
", -8(%%rsp) \n\t"
1607 "mov %7, %%"REG_a
" \n\t"
1608 "mov %%"REG_a
", -8(%%rsp) \n\t"
1611 ::
"m" (src1),
"m" (dst1),
"m" (
filter),
"m" (filterPos),
1612 "m" (mmxextFilterCode),
"m" (src2),
"m"(dst2)
1619 :
"%"REG_a,
"%"REG_c,
"%"REG_d,
"%"REG_S,
"%"REG_D
1625 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
1626 dst1[i] = src1[srcW-1]*128;
1627 dst2[i] = src2[srcW-1]*128;
1636 c->use_mmx_vfilter= 0;
1641 switch (c->dstFormat) {
1651 c->use_mmx_vfilter= 1;
1652 c->yuv2planeX =
RENAME(yuv2yuvX );
1654 switch (c->dstFormat) {
1665 switch (c->dstFormat) {
1667 c->yuv2packed1 =
RENAME(yuv2rgb32_1);
1668 c->yuv2packed2 =
RENAME(yuv2rgb32_2);
1671 c->yuv2packed1 =
RENAME(yuv2bgr24_1);
1672 c->yuv2packed2 =
RENAME(yuv2bgr24_2);
1675 c->yuv2packed1 =
RENAME(yuv2rgb555_1);
1676 c->yuv2packed2 =
RENAME(yuv2rgb555_2);
1679 c->yuv2packed1 =
RENAME(yuv2rgb565_1);
1680 c->yuv2packed2 =
RENAME(yuv2rgb565_2);
1683 c->yuv2packed1 =
RENAME(yuv2yuyv422_1);
1684 c->yuv2packed2 =
RENAME(yuv2yuyv422_2);
1692 if (c->srcBpc == 8 && c->dstBpc <= 14) {
1694 #if COMPILE_TEMPLATE_MMXEXT
1700 c->hyscale_fast =
NULL;
1701 c->hcscale_fast =
NULL;
1702 #if COMPILE_TEMPLATE_MMXEXT