29 #define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
30 void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT(uint8_t *dst, \
49 #define IDCT_ADD_REP_FUNC(NUM, REP, DEPTH, OPT) \
50 void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
51 (uint8_t *dst, const int *block_offset, \
52 int16_t *block, int stride, const uint8_t nnzc[5 * 8]);
65 #define IDCT_ADD_REP_FUNC2(NUM, REP, DEPTH, OPT) \
66 void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
67 (uint8_t **dst, const int *block_offset, \
68 int16_t *block, int stride, const uint8_t nnzc[15 * 8]);
87 int bidir,
int edges,
int step,
88 int mask_mv0,
int mask_mv1,
int field);
90 #define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
91 void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \
96 #define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
97 void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \
102 #define LF_FUNCS(type, depth) \
103 LF_FUNC(h, luma, depth, sse2) \
104 LF_IFUNC(h, luma_intra, depth, sse2) \
105 LF_FUNC(v, luma, depth, sse2) \
106 LF_IFUNC(v, luma_intra, depth, sse2) \
107 LF_FUNC(h, chroma, depth, sse2) \
108 LF_IFUNC(h, chroma_intra, depth, sse2) \
109 LF_FUNC(h, chroma422, depth, sse2) \
110 LF_IFUNC(h, chroma422_intra, depth, sse2) \
111 LF_FUNC(v, chroma, depth, sse2) \
112 LF_IFUNC(v, chroma_intra, depth, sse2) \
113 LF_FUNC(h, luma, depth, avx) \
114 LF_IFUNC(h, luma_intra, depth, avx) \
115 LF_FUNC(v, luma, depth, avx) \
116 LF_IFUNC(v, luma_intra, depth, avx) \
117 LF_FUNC(h, chroma, depth, avx) \
118 LF_IFUNC(h, chroma_intra, depth, avx) \
119 LF_FUNC(h, chroma422, depth, avx) \
120 LF_IFUNC(h, chroma422_intra, depth, avx) \
121 LF_FUNC(v, chroma, depth, avx) \
122 LF_IFUNC(v, chroma_intra, depth, avx)
133 #define H264_WEIGHT(W, OPT) \
134 void ff_h264_weight_ ## W ## _ ## OPT(uint8_t *dst, ptrdiff_t stride, \
135 int height, int log2_denom, \
136 int weight, int offset);
138 #define H264_BIWEIGHT(W, OPT) \
139 void ff_h264_biweight_ ## W ## _ ## OPT(uint8_t *dst, uint8_t *src, \
140 ptrdiff_t stride, int height, \
141 int log2_denom, int weightd, \
142 int weights, int offset);
144 #define H264_BIWEIGHT_MMX(W) \
145 H264_WEIGHT(W, mmxext) \
146 H264_BIWEIGHT(W, mmxext)
148 #define H264_BIWEIGHT_SSE(W) \
149 H264_WEIGHT(W, sse2) \
150 H264_BIWEIGHT(W, sse2) \
151 H264_BIWEIGHT(W, ssse3)
157 #define H264_WEIGHT_10(W, DEPTH, OPT) \
158 void ff_h264_weight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
165 #define H264_BIWEIGHT_10(W, DEPTH, OPT) \
166 void ff_h264_biweight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
175 #define H264_BIWEIGHT_10_SSE(W, DEPTH) \
176 H264_WEIGHT_10(W, DEPTH, sse2) \
177 H264_WEIGHT_10(W, DEPTH, sse4) \
178 H264_BIWEIGHT_10(W, DEPTH, sse2) \
179 H264_BIWEIGHT_10(W, DEPTH, sse4)
186 const
int chroma_format_idc)
195 if (chroma_format_idc <= 1) {
197 c->idct_add8 = ff_h264_idct_add8_422_8_mmx;
201 c->idct8_dc_add = ff_h264_idct8_dc_add_8_mmxext;
203 c->weight_pixels_tab[2] = ff_h264_weight_4_mmxext;
205 c->biweight_pixels_tab[2] = ff_h264_biweight_4_mmxext;
208 c->idct8_add = ff_h264_idct8_add_8_sse2;
210 c->idct_add16 = ff_h264_idct_add16_8_sse2;
211 c->idct8_add4 = ff_h264_idct8_add4_8_sse2;
212 if (chroma_format_idc <= 1)
213 c->idct_add8 = ff_h264_idct_add8_8_sse2;
214 c->idct_add16intra = ff_h264_idct_add16intra_8_sse2;
217 c->weight_pixels_tab[0] = ff_h264_weight_16_sse2;
218 c->weight_pixels_tab[1] = ff_h264_weight_8_sse2;
220 c->biweight_pixels_tab[0] = ff_h264_biweight_16_sse2;
221 c->biweight_pixels_tab[1] = ff_h264_biweight_8_sse2;
223 c->v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
224 c->h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
225 c->v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
226 c->h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
229 c->h_loop_filter_luma_mbaff = ff_deblock_h_luma_mbaff_8_sse2;
232 c->v_loop_filter_chroma = ff_deblock_v_chroma_8_sse2;
233 c->v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_sse2;
234 if (chroma_format_idc <= 1) {
235 c->h_loop_filter_chroma = ff_deblock_h_chroma_8_sse2;
236 c->h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_sse2;
238 c->h_loop_filter_chroma = ff_deblock_h_chroma422_8_sse2;
239 c->h_loop_filter_chroma_intra = ff_deblock_h_chroma422_intra_8_sse2;
242 c->idct_add = ff_h264_idct_add_8_sse2;
243 c->idct_dc_add = ff_h264_idct_dc_add_8_sse2;
246 c->biweight_pixels_tab[0] = ff_h264_biweight_16_ssse3;
247 c->biweight_pixels_tab[1] = ff_h264_biweight_8_ssse3;
250 c->v_loop_filter_luma = ff_deblock_v_luma_8_avx;
251 c->h_loop_filter_luma = ff_deblock_h_luma_8_avx;
252 c->v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
253 c->h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
255 c->h_loop_filter_luma_mbaff = ff_deblock_h_luma_mbaff_8_avx;
258 c->v_loop_filter_chroma = ff_deblock_v_chroma_8_avx;
259 c->v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_avx;
260 if (chroma_format_idc <= 1) {
261 c->h_loop_filter_chroma = ff_deblock_h_chroma_8_avx;
262 c->h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_avx;
264 c->h_loop_filter_chroma = ff_deblock_h_chroma422_8_avx;
265 c->h_loop_filter_chroma_intra = ff_deblock_h_chroma422_intra_8_avx;
268 c->idct_add = ff_h264_idct_add_8_avx;
269 c->idct_dc_add = ff_h264_idct_dc_add_8_avx;
273 c->idct_dc_add = ff_h264_idct_dc_add_10_mmxext;
276 c->idct_add = ff_h264_idct_add_10_sse2;
277 c->idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
279 c->idct_add16 = ff_h264_idct_add16_10_sse2;
280 if (chroma_format_idc <= 1) {
281 c->idct_add8 = ff_h264_idct_add8_10_sse2;
283 c->idct_add8 = ff_h264_idct_add8_422_10_sse2;
285 c->idct_add16intra = ff_h264_idct_add16intra_10_sse2;
286 #if HAVE_ALIGNED_STACK
287 c->idct8_add = ff_h264_idct8_add_10_sse2;
288 c->idct8_add4 = ff_h264_idct8_add4_10_sse2;
291 c->weight_pixels_tab[0] = ff_h264_weight_16_10_sse2;
292 c->weight_pixels_tab[1] = ff_h264_weight_8_10_sse2;
293 c->weight_pixels_tab[2] = ff_h264_weight_4_10_sse2;
295 c->biweight_pixels_tab[0] = ff_h264_biweight_16_10_sse2;
296 c->biweight_pixels_tab[1] = ff_h264_biweight_8_10_sse2;
297 c->biweight_pixels_tab[2] = ff_h264_biweight_4_10_sse2;
299 c->v_loop_filter_chroma = ff_deblock_v_chroma_10_sse2;
300 c->v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_sse2;
301 if (chroma_format_idc <= 1) {
302 c->h_loop_filter_chroma = ff_deblock_h_chroma_10_sse2;
304 c->h_loop_filter_chroma = ff_deblock_h_chroma422_10_sse2;
306 c->v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
307 c->h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
308 c->v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
309 c->h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
312 c->weight_pixels_tab[0] = ff_h264_weight_16_10_sse4;
313 c->weight_pixels_tab[1] = ff_h264_weight_8_10_sse4;
314 c->weight_pixels_tab[2] = ff_h264_weight_4_10_sse4;
316 c->biweight_pixels_tab[0] = ff_h264_biweight_16_10_sse4;
317 c->biweight_pixels_tab[1] = ff_h264_biweight_8_10_sse4;
318 c->biweight_pixels_tab[2] = ff_h264_biweight_4_10_sse4;
322 c->idct_add = ff_h264_idct_add_10_avx;
323 c->idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
325 c->idct_add16 = ff_h264_idct_add16_10_avx;
326 if (chroma_format_idc <= 1) {
327 c->idct_add8 = ff_h264_idct_add8_10_avx;
329 c->idct_add8 = ff_h264_idct_add8_422_10_avx;
331 c->idct_add16intra = ff_h264_idct_add16intra_10_avx;
332 #if HAVE_ALIGNED_STACK
333 c->idct8_add = ff_h264_idct8_add_10_avx;
334 c->idct8_add4 = ff_h264_idct8_add4_10_avx;
337 c->v_loop_filter_chroma = ff_deblock_v_chroma_10_avx;
338 c->v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_avx;
339 if (chroma_format_idc <= 1) {
340 c->h_loop_filter_chroma = ff_deblock_h_chroma_10_avx;
342 c->h_loop_filter_chroma = ff_deblock_h_chroma422_10_avx;
344 c->v_loop_filter_luma = ff_deblock_v_luma_10_avx;
345 c->h_loop_filter_luma = ff_deblock_h_luma_10_avx;
346 c->v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
347 c->h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;