33 void ff_avg_pixels4_mmxext(uint8_t *
dst,
const uint8_t *
src, ptrdiff_t
stride);
34 void ff_put_pixels4_l2_mmxext(uint8_t *
dst,
const uint8_t *
src1,
const uint8_t *
src2,
36 void ff_avg_pixels4_l2_mmxext(uint8_t *
dst,
const uint8_t *
src1,
const uint8_t *
src2,
39 ptrdiff_t dstStride, ptrdiff_t src1Stride,
int h);
41 ptrdiff_t dstStride, ptrdiff_t src1Stride);
43 ptrdiff_t dstStride, ptrdiff_t src1Stride,
int h);
45 ptrdiff_t dstStride, ptrdiff_t src1Stride);
46 #define ff_put_pixels4_l2_mmxext(dst, src1, src2, dststride, src1stride, h) \
47 ff_put_pixels4_l2_mmxext((dst), (src1), (src2), (dststride))
48 #define ff_avg_pixels4_l2_mmxext(dst, src1, src2, dststride, src1stride, h) \
49 ff_avg_pixels4_l2_mmxext((dst), (src1), (src2), (dststride))
50 #define ff_put_pixels8_l2_sse2 ff_put_pixels8_l2_mmxext
51 #define ff_avg_pixels8_l2_sse2(dst, src1, src2, dststride, src1stride, h) \
52 ff_avg_pixels8_l2_mmxext((dst), (src1), (src2), (dststride), (src1stride))
53 #define ff_put_pixels16_l2_sse2 ff_put_pixels16_l2_mmxext
54 #define ff_avg_pixels16_l2_sse2(dst, src1, src2, dststride, src1stride, h) \
55 ff_avg_pixels16_l2_mmxext((dst), (src1), (src2), (dststride), (src1stride))
57 #define DEF_QPEL(OPNAME)\
58 void ff_ ## OPNAME ## _h264_qpel4_h_lowpass_mmxext(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride);\
59 void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_ssse3(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride);\
60 void ff_ ## OPNAME ## _h264_qpel4_h_lowpass_l2_mmxext(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);\
61 void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_l2_sse2(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);\
62 void ff_ ## OPNAME ## _h264_qpel16_h_lowpass_l2_sse2(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);\
63 void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_l2_ssse3(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);\
64 void ff_ ## OPNAME ## _h264_qpel4_v_lowpass_mmxext(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride);\
65 void ff_ ## OPNAME ## _h264_qpel8or16_v_lowpass_sse2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h);\
66 void ff_ ## OPNAME ## _h264_qpel4_hv_lowpass_h_mmxext(int16_t *tmp, uint8_t *dst, ptrdiff_t dstStride);\
67 void ff_ ## OPNAME ## _h264_qpel8or16_hv1_lowpass_op_sse2(const uint8_t *src, int16_t *tmp, ptrdiff_t srcStride, int size);\
68 void ff_ ## OPNAME ## _h264_qpel8_hv2_lowpass_sse2(uint8_t *dst, int16_t *tmp, ptrdiff_t dstStride);\
69 void ff_ ## OPNAME ## _h264_qpel16_hv2_lowpass_sse2(uint8_t *dst, int16_t *tmp, ptrdiff_t dstStride);\
70 void ff_ ## OPNAME ## _h264_qpel8_hv2_lowpass_ssse3(uint8_t *dst, int16_t *tmp, ptrdiff_t dstStride);\
71 void ff_ ## OPNAME ## _h264_qpel16_hv2_lowpass_ssse3(uint8_t *dst, int16_t *tmp, ptrdiff_t dstStride);\
72 void ff_ ## OPNAME ## _pixels4_l2_shift5_mmxext(uint8_t *dst, const int16_t *src16, const uint8_t *src8, ptrdiff_t dstStride);\
73 void ff_ ## OPNAME ## _pixels8_l2_shift5_sse2(uint8_t *dst, const int16_t *src16, const uint8_t *src8, ptrdiff_t dstStride);\
74 void ff_ ## OPNAME ## _pixels16_l2_shift5_sse2(uint8_t *dst, const int16_t *src16, const uint8_t *src8, ptrdiff_t dstStride);\
76 void ff_put_h264_qpel4_hv_lowpass_v_mmxext(
const uint8_t *
src, int16_t *
tmp, ptrdiff_t srcStride);
81 #define QPEL_H264(OPNAME, MMX)\
82 static av_always_inline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
84 src -= 2*srcStride+2;\
85 ff_put_h264_qpel4_hv_lowpass_v_mmxext(src, tmp, srcStride);\
86 ff_ ## OPNAME ## h264_qpel4_hv_lowpass_h_mmxext(tmp, dst, dstStride);\
89 #define QPEL_H264_H16(OPNAME, EXT) \
90 static av_always_inline void ff_ ## OPNAME ## h264_qpel16_h_lowpass_l2_ ## EXT(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride)\
92 ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## EXT(dst , src , src2 , dstStride, src2Stride);\
93 ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## EXT(dst+8, src+8, src2+8, dstStride, src2Stride);\
96 src2 += 8*src2Stride;\
97 ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## EXT(dst , src , src2 , dstStride, src2Stride);\
98 ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## EXT(dst+8, src+8, src2+8, dstStride, src2Stride);\
103 #define QPEL_H264_H16_XMM(OPNAME, MMX)\
105 void ff_avg_h264_qpel16_h_lowpass_l2_ssse3(uint8_t *
dst,
const uint8_t *
src,
const uint8_t *
src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);
106 void ff_put_h264_qpel16_h_lowpass_l2_ssse3(uint8_t *
dst,
const uint8_t *
src,
const uint8_t *
src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);
109 #define QPEL_H264_H16_XMM(OPNAME, EXT) QPEL_H264_H16(OPNAME, EXT)
110 #endif // ARCH_X86_64
112 #define QPEL_H264_H_XMM(OPNAME, MMX)\
113 QPEL_H264_H16_XMM(OPNAME, MMX)\
114 static av_always_inline void ff_ ## OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
116 ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
117 ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
120 ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
121 ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
124 #define QPEL_H264_V_XMM(OPNAME, XMM, XMM2)\
125 static av_always_inline void ff_ ## OPNAME ## h264_qpel8_v_lowpass_ ## XMM(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
127 ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## XMM2(dst , src , dstStride, srcStride, 8);\
129 static av_always_inline void ff_ ## OPNAME ## h264_qpel16_v_lowpass_ ## XMM(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
131 ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## XMM2(dst , src , dstStride, srcStride, 16);\
132 ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## XMM2(dst+8, src+8, dstStride, srcStride, 16);\
141 src -= 2*srcStride+2;
143 ff_put_h264_qpel8or16_hv1_lowpass_op_sse2(
src,
tmp, srcStride,
size);
149 #define QPEL_H264_HV_XMM(OPNAME, MMX)\
150 static av_always_inline void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
152 put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, srcStride, 8);\
153 ff_ ## OPNAME ## h264_qpel8_hv2_lowpass_ ## MMX(dst, tmp, dstStride);\
155 static av_always_inline void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
157 put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, srcStride, 16);\
158 ff_ ## OPNAME ## h264_qpel16_hv2_lowpass_ ## MMX(dst, tmp, dstStride);\
161 #define H264_MC_V_H_HV(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT) \
162 H264_MC_V(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT)\
163 H264_MC_H(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT)\
164 H264_MC_HV(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT)\
166 #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN, UNUSED) \
167 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
169 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
172 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
174 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
177 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
179 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
182 #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN, UNUSED) \
183 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
185 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
186 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
187 ff_ ## OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
190 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
192 ff_ ## OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
195 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
197 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
198 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
199 ff_ ## OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
202 #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT) \
203 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
205 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
206 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
207 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
210 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
212 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
213 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
214 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
217 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
219 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
220 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
221 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
224 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
226 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
227 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
228 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
231 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
233 LOCAL_ALIGNED(ALIGN, uint16_t, temp, [SIZE*(SIZE<8?12:24)]);\
234 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, stride);\
237 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
239 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
240 uint8_t * const halfHV= temp;\
241 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
242 av_assert2(((uintptr_t)temp & 7) == 0);\
243 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, stride);\
244 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
247 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
249 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
250 uint8_t * const halfHV= temp;\
251 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
252 av_assert2(((uintptr_t)temp & 7) == 0);\
253 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, stride);\
254 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
257 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
259 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
260 uint8_t * const halfHV= temp;\
261 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
262 av_assert2(((uintptr_t)temp & 7) == 0);\
263 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, stride);\
264 ff_ ## OPNAME ## pixels ## SIZE ## _l2_shift5_ ## SHIFT5_EXT(dst, halfV+2, halfHV, stride);\
267 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
269 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
270 uint8_t * const halfHV= temp;\
271 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
272 av_assert2(((uintptr_t)temp & 7) == 0);\
273 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, stride);\
274 ff_ ## OPNAME ## pixels ## SIZE ## _l2_shift5_ ## SHIFT5_EXT(dst, halfV+3, halfHV, stride);\
277 #define H264_MC(QPEL, SIZE, MMX, ALIGN, SHIFT5_EXT)\
278 QPEL(put_, SIZE, MMX, ALIGN, SHIFT5_EXT) \
279 QPEL(avg_, SIZE, MMX, ALIGN, SHIFT5_EXT) \
281 #define H264_MC_816(QPEL, XMM, SHIFT5_EXT)\
282 QPEL(put_, 8, XMM, 16, SHIFT5_EXT)\
283 QPEL(put_, 16,XMM, 16, SHIFT5_EXT)\
284 QPEL(avg_, 8, XMM, 16, SHIFT5_EXT)\
285 QPEL(avg_, 16,XMM, 16, SHIFT5_EXT)\
287 QPEL_H264(put_, mmxext)
288 QPEL_H264(avg_, mmxext)
289 QPEL_H264_V_XMM(put_, sse2, sse2)
290 QPEL_H264_V_XMM(avg_, sse2, sse2)
291 QPEL_H264_HV_XMM(put_, sse2)
292 QPEL_H264_HV_XMM(avg_, sse2)
293 QPEL_H264_H_XMM(put_, ssse3)
294 QPEL_H264_H_XMM(avg_, ssse3)
295 QPEL_H264_V_XMM(put_, ssse3, sse2)
296 QPEL_H264_HV_XMM(put_, ssse3)
297 QPEL_H264_HV_XMM(avg_, ssse3)
299 H264_MC(H264_MC_V_H_HV, 4, mmxext, 8, mmxext)
300 H264_MC_816(H264_MC_V, sse2, sse2)
301 H264_MC_816(H264_MC_HV, sse2, sse2)
302 H264_MC_816(H264_MC_H, ssse3, sse2)
303 H264_MC_816(H264_MC_HV, ssse3, sse2)
307 #define LUMA_MC_OP(OP, NUM, DEPTH, TYPE, OPT) \
308 void ff_ ## OP ## _h264_qpel ## NUM ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT \
309 (uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
311 #define LUMA_MC_4(DEPTH, TYPE, OPT) \
312 LUMA_MC_OP(put, 4, DEPTH, TYPE, OPT) \
313 LUMA_MC_OP(avg, 4, DEPTH, TYPE, OPT)
315 #define LUMA_MC_816(DEPTH, TYPE, OPT) \
316 LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \
317 LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \
318 LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \
319 LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT)
321 LUMA_MC_4(10, mc00, mmxext)
322 LUMA_MC_4(10, mc10, mmxext)
323 LUMA_MC_4(10, mc20, mmxext)
324 LUMA_MC_4(10, mc30, mmxext)
325 LUMA_MC_4(10, mc01, mmxext)
326 LUMA_MC_4(10, mc11, mmxext)
327 LUMA_MC_4(10, mc21, mmxext)
328 LUMA_MC_4(10, mc31, mmxext)
329 LUMA_MC_4(10, mc02, mmxext)
330 LUMA_MC_4(10, mc12, mmxext)
331 LUMA_MC_4(10, mc22, mmxext)
332 LUMA_MC_4(10, mc32, mmxext)
333 LUMA_MC_4(10, mc03, mmxext)
334 LUMA_MC_4(10, mc13, mmxext)
335 LUMA_MC_4(10, mc23, mmxext)
336 LUMA_MC_4(10, mc33, mmxext)
338 LUMA_MC_816(10, mc00, sse2)
339 LUMA_MC_816(10, mc10, sse2)
340 LUMA_MC_816(10, mc10, ssse3_cache64)
341 LUMA_MC_816(10, mc20, sse2)
342 LUMA_MC_816(10, mc20, ssse3_cache64)
343 LUMA_MC_816(10, mc30, sse2)
344 LUMA_MC_816(10, mc30, ssse3_cache64)
345 LUMA_MC_816(10, mc01, sse2)
346 LUMA_MC_816(10, mc11, sse2)
347 LUMA_MC_816(10, mc21, sse2)
348 LUMA_MC_816(10, mc31, sse2)
349 LUMA_MC_816(10, mc02, sse2)
350 LUMA_MC_816(10, mc12, sse2)
351 LUMA_MC_816(10, mc22, sse2)
352 LUMA_MC_816(10, mc32, sse2)
353 LUMA_MC_816(10, mc03, sse2)
354 LUMA_MC_816(10, mc13, sse2)
355 LUMA_MC_816(10, mc23, sse2)
356 LUMA_MC_816(10, mc33, sse2)
360 #define SET_QPEL_FUNCS_1PP(PFX, IDX, SIZE, CPU, PREFIX) \
362 c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
363 c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
364 c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
365 c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
366 c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
367 c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
368 c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
369 c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
370 c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
371 c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
372 c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
373 c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
374 c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
375 c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
376 c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
378 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
380 c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
381 SET_QPEL_FUNCS_1PP(PFX, IDX, SIZE, CPU, PREFIX); \
384 #define H264_QPEL_FUNCS(x, y, CPU) \
386 c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \
387 c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc ## x ## y ## _ ## CPU; \
388 c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \
389 c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc ## x ## y ## _ ## CPU; \
392 #define H264_QPEL_FUNCS_10(x, y, CPU) \
394 c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
395 c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
396 c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
397 c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
407 if (!high_bit_depth) {
411 c->avg_h264_qpel_pixels_tab[2][0] = ff_avg_pixels4_mmxext;
419 if (!high_bit_depth) {
448 if (!high_bit_depth) {