00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024 #define PIXOP2(OPNAME, OP) \
00025 \
00026 static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00027 {\
00028 do {\
00029 OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
00030 src1+=src_stride1; \
00031 src2+=src_stride2; \
00032 dst+=dst_stride; \
00033 } while(--h); \
00034 }\
00035 \
00036 static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00037 {\
00038 do {\
00039 OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
00040 src1+=src_stride1; \
00041 src2+=src_stride2; \
00042 dst+=dst_stride; \
00043 } while(--h); \
00044 }\
00045 \
00046 static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00047 {\
00048 do {\
00049 OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
00050 OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
00051 OP(LP(dst+8),no_rnd_avg32(AV_RN32(src1+8),LPC(src2+8)) ); \
00052 OP(LP(dst+12),no_rnd_avg32(AV_RN32(src1+12),LPC(src2+12)) ); \
00053 src1+=src_stride1; \
00054 src2+=src_stride2; \
00055 dst+=dst_stride; \
00056 } while(--h); \
00057 }\
00058 \
00059 static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00060 {\
00061 do {\
00062 OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
00063 OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
00064 OP(LP(dst+8),rnd_avg32(AV_RN32(src1+8),LPC(src2+8)) ); \
00065 OP(LP(dst+12),rnd_avg32(AV_RN32(src1+12),LPC(src2+12)) ); \
00066 src1+=src_stride1; \
00067 src2+=src_stride2; \
00068 dst+=dst_stride; \
00069 } while(--h); \
00070 }\
00071 \
00072 static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00073 {\
00074 do { \
00075 OP(LP(dst ),no_rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
00076 OP(LP(dst+4),no_rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
00077 src1+=src_stride1; \
00078 src2+=src_stride2; \
00079 dst+=dst_stride; \
00080 } while(--h); \
00081 }\
00082 \
00083 static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00084 {\
00085 do {\
00086 OP(LP(dst ),rnd_avg32(AV_RN32(src1 ),LPC(src2 )) ); \
00087 OP(LP(dst+4),rnd_avg32(AV_RN32(src1+4),LPC(src2+4)) ); \
00088 src1+=src_stride1; \
00089 src2+=src_stride2; \
00090 dst+=dst_stride; \
00091 } while(--h); \
00092 }\
00093 \
00094 static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00095 {\
00096 do {\
00097 OP(LP(dst ),no_rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
00098 OP(LP(dst+4),no_rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
00099 src1+=src_stride1; \
00100 src2+=src_stride2; \
00101 dst+=dst_stride; \
00102 } while(--h); \
00103 }\
00104 \
00105 static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00106 {\
00107 do {\
00108 OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
00109 OP(LP(dst+4),rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
00110 src1+=src_stride1; \
00111 src2+=src_stride2; \
00112 dst+=dst_stride; \
00113 } while(--h); \
00114 }\
00115 \
00116 static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00117 {\
00118 do {\
00119 OP(LP(dst ),no_rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
00120 OP(LP(dst+4),no_rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
00121 OP(LP(dst+8),no_rnd_avg32(LPC(src1+8),LPC(src2+8)) ); \
00122 OP(LP(dst+12),no_rnd_avg32(LPC(src1+12),LPC(src2+12)) ); \
00123 src1+=src_stride1; \
00124 src2+=src_stride2; \
00125 dst+=dst_stride; \
00126 } while(--h); \
00127 }\
00128 \
00129 static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00130 {\
00131 do {\
00132 OP(LP(dst ),rnd_avg32(LPC(src1 ),LPC(src2 )) ); \
00133 OP(LP(dst+4),rnd_avg32(LPC(src1+4),LPC(src2+4)) ); \
00134 OP(LP(dst+8),rnd_avg32(LPC(src1+8),LPC(src2+8)) ); \
00135 OP(LP(dst+12),rnd_avg32(LPC(src1+12),LPC(src2+12)) ); \
00136 src1+=src_stride1; \
00137 src2+=src_stride2; \
00138 dst+=dst_stride; \
00139 } while(--h); \
00140 }\
00141 \
00142 static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00143 { OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
00144 \
00145 static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00146 { OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
00147 \
00148 static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00149 { OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
00150 \
00151 static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
00152 { OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
00153 \
00154 static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00155 do { \
00156 uint32_t a0,a1,a2,a3; \
00157 UNPACK(a0,a1,LPC(src1),LPC(src2)); \
00158 UNPACK(a2,a3,LPC(src3),LPC(src4)); \
00159 OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
00160 UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
00161 UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
00162 OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
00163 src1+=src_stride1;\
00164 src2+=src_stride2;\
00165 src3+=src_stride3;\
00166 src4+=src_stride4;\
00167 dst+=dst_stride;\
00168 } while(--h); \
00169 } \
00170 \
00171 static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00172 do { \
00173 uint32_t a0,a1,a2,a3; \
00174 UNPACK(a0,a1,LPC(src1),LPC(src2)); \
00175 UNPACK(a2,a3,LPC(src3),LPC(src4)); \
00176 OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
00177 UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
00178 UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
00179 OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
00180 src1+=src_stride1;\
00181 src2+=src_stride2;\
00182 src3+=src_stride3;\
00183 src4+=src_stride4;\
00184 dst+=dst_stride;\
00185 } while(--h); \
00186 } \
00187 \
00188 static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00189 do { \
00190 uint32_t a0,a1,a2,a3; \
00191 UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
00192 UNPACK(a2,a3,LPC(src3),LPC(src4)); \
00193 OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
00194 UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
00195 UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
00196 OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
00197 src1+=src_stride1;\
00198 src2+=src_stride2;\
00199 src3+=src_stride3;\
00200 src4+=src_stride4;\
00201 dst+=dst_stride;\
00202 } while(--h); \
00203 } \
00204 \
00205 static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00206 do { \
00207 uint32_t a0,a1,a2,a3; \
00208 UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
00209 UNPACK(a2,a3,LPC(src3),LPC(src4)); \
00210 OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
00211 UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
00212 UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
00213 OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
00214 src1+=src_stride1;\
00215 src2+=src_stride2;\
00216 src3+=src_stride3;\
00217 src4+=src_stride4;\
00218 dst+=dst_stride;\
00219 } while(--h); \
00220 } \
00221 \
00222 static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00223 do { \
00224 uint32_t a0,a1,a2,a3; \
00225 UNPACK(a0,a1,LPC(src1),LPC(src2)); \
00226 UNPACK(a2,a3,LPC(src3),LPC(src4)); \
00227 OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
00228 UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
00229 UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
00230 OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
00231 UNPACK(a0,a1,LPC(src1+8),LPC(src2+8)); \
00232 UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
00233 OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
00234 UNPACK(a0,a1,LPC(src1+12),LPC(src2+12)); \
00235 UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
00236 OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
00237 src1+=src_stride1;\
00238 src2+=src_stride2;\
00239 src3+=src_stride3;\
00240 src4+=src_stride4;\
00241 dst+=dst_stride;\
00242 } while(--h); \
00243 } \
00244 \
00245 static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00246 do { \
00247 uint32_t a0,a1,a2,a3; \
00248 UNPACK(a0,a1,LPC(src1),LPC(src2)); \
00249 UNPACK(a2,a3,LPC(src3),LPC(src4)); \
00250 OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
00251 UNPACK(a0,a1,LPC(src1+4),LPC(src2+4)); \
00252 UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
00253 OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
00254 UNPACK(a0,a1,LPC(src1+8),LPC(src2+8)); \
00255 UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
00256 OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
00257 UNPACK(a0,a1,LPC(src1+12),LPC(src2+12)); \
00258 UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
00259 OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
00260 src1+=src_stride1;\
00261 src2+=src_stride2;\
00262 src3+=src_stride3;\
00263 src4+=src_stride4;\
00264 dst+=dst_stride;\
00265 } while(--h); \
00266 } \
00267 \
00268 static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00269 do { \
00270 uint32_t a0,a1,a2,a3; \
00271 UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
00272 UNPACK(a2,a3,LPC(src3),LPC(src4)); \
00273 OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
00274 UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
00275 UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
00276 OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
00277 UNPACK(a0,a1,AV_RN32(src1+8),LPC(src2+8)); \
00278 UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
00279 OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
00280 UNPACK(a0,a1,AV_RN32(src1+12),LPC(src2+12)); \
00281 UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
00282 OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
00283 src1+=src_stride1;\
00284 src2+=src_stride2;\
00285 src3+=src_stride3;\
00286 src4+=src_stride4;\
00287 dst+=dst_stride;\
00288 } while(--h); \
00289 } \
00290 \
00291 static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
00292 do { \
00293 uint32_t a0,a1,a2,a3; \
00294 UNPACK(a0,a1,AV_RN32(src1),LPC(src2)); \
00295 UNPACK(a2,a3,LPC(src3),LPC(src4)); \
00296 OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
00297 UNPACK(a0,a1,AV_RN32(src1+4),LPC(src2+4)); \
00298 UNPACK(a2,a3,LPC(src3+4),LPC(src4+4)); \
00299 OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
00300 UNPACK(a0,a1,AV_RN32(src1+8),LPC(src2+8)); \
00301 UNPACK(a2,a3,LPC(src3+8),LPC(src4+8)); \
00302 OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
00303 UNPACK(a0,a1,AV_RN32(src1+12),LPC(src2+12)); \
00304 UNPACK(a2,a3,LPC(src3+12),LPC(src4+12)); \
00305 OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
00306 src1+=src_stride1;\
00307 src2+=src_stride2;\
00308 src3+=src_stride3;\
00309 src4+=src_stride4;\
00310 dst+=dst_stride;\
00311 } while(--h); \
00312 } \
00313 \
00314
00315 #define op_avg(a, b) a = rnd_avg32(a,b)
00316 #define op_put(a, b) a = b
00317
00318 PIXOP2(avg, op_avg)
00319 PIXOP2(put, op_put)
00320 #undef op_avg
00321 #undef op_put
00322
00323 #define avg2(a,b) ((a+b+1)>>1)
00324 #define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
00325
00326
00327 static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
00328 {
00329 const int A=(16-x16)*(16-y16);
00330 const int B=( x16)*(16-y16);
00331 const int C=(16-x16)*( y16);
00332 const int D=( x16)*( y16);
00333
00334 do {
00335 int t0,t1,t2,t3;
00336 uint8_t *s0 = src;
00337 uint8_t *s1 = src+stride;
00338 t0 = *s0++; t2 = *s1++;
00339 t1 = *s0++; t3 = *s1++;
00340 dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
00341 t0 = *s0++; t2 = *s1++;
00342 dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
00343 t1 = *s0++; t3 = *s1++;
00344 dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
00345 t0 = *s0++; t2 = *s1++;
00346 dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
00347 t1 = *s0++; t3 = *s1++;
00348 dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
00349 t0 = *s0++; t2 = *s1++;
00350 dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
00351 t1 = *s0++; t3 = *s1++;
00352 dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
00353 t0 = *s0++; t2 = *s1++;
00354 dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
00355 dst+= stride;
00356 src+= stride;
00357 }while(--h);
00358 }
00359
00360 static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
00361 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
00362 {
00363 int y, vx, vy;
00364 const int s= 1<<shift;
00365
00366 width--;
00367 height--;
00368
00369 for(y=0; y<h; y++){
00370 int x;
00371
00372 vx= ox;
00373 vy= oy;
00374 for(x=0; x<8; x++){
00375 int src_x, src_y, frac_x, frac_y, index;
00376
00377 src_x= vx>>16;
00378 src_y= vy>>16;
00379 frac_x= src_x&(s-1);
00380 frac_y= src_y&(s-1);
00381 src_x>>=shift;
00382 src_y>>=shift;
00383
00384 if((unsigned)src_x < width){
00385 if((unsigned)src_y < height){
00386 index= src_x + src_y*stride;
00387 dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
00388 + src[index +1]* frac_x )*(s-frac_y)
00389 + ( src[index+stride ]*(s-frac_x)
00390 + src[index+stride+1]* frac_x )* frac_y
00391 + r)>>(shift*2);
00392 }else{
00393 index= src_x + av_clip(src_y, 0, height)*stride;
00394 dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
00395 + src[index +1]* frac_x )*s
00396 + r)>>(shift*2);
00397 }
00398 }else{
00399 if((unsigned)src_y < height){
00400 index= av_clip(src_x, 0, width) + src_y*stride;
00401 dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
00402 + src[index+stride ]* frac_y )*s
00403 + r)>>(shift*2);
00404 }else{
00405 index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
00406 dst[y*stride + x]= src[index ];
00407 }
00408 }
00409
00410 vx+= dxx;
00411 vy+= dyx;
00412 }
00413 ox += dxy;
00414 oy += dyy;
00415 }
00416 }
00417 #define H264_CHROMA_MC(OPNAME, OP)\
00418 static void OPNAME ## h264_chroma_mc2_sh4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y){\
00419 const int A=(8-x)*(8-y);\
00420 const int B=( x)*(8-y);\
00421 const int C=(8-x)*( y);\
00422 const int D=( x)*( y);\
00423 \
00424 assert(x<8 && y<8 && x>=0 && y>=0);\
00425 \
00426 do {\
00427 int t0,t1,t2,t3; \
00428 uint8_t *s0 = src; \
00429 uint8_t *s1 = src+stride; \
00430 t0 = *s0++; t2 = *s1++; \
00431 t1 = *s0++; t3 = *s1++; \
00432 OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
00433 t0 = *s0++; t2 = *s1++; \
00434 OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
00435 dst+= stride;\
00436 src+= stride;\
00437 }while(--h);\
00438 }\
00439 \
00440 static void OPNAME ## h264_chroma_mc4_sh4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y){\
00441 const int A=(8-x)*(8-y);\
00442 const int B=( x)*(8-y);\
00443 const int C=(8-x)*( y);\
00444 const int D=( x)*( y);\
00445 \
00446 assert(x<8 && y<8 && x>=0 && y>=0);\
00447 \
00448 do {\
00449 int t0,t1,t2,t3; \
00450 uint8_t *s0 = src; \
00451 uint8_t *s1 = src+stride; \
00452 t0 = *s0++; t2 = *s1++; \
00453 t1 = *s0++; t3 = *s1++; \
00454 OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
00455 t0 = *s0++; t2 = *s1++; \
00456 OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
00457 t1 = *s0++; t3 = *s1++; \
00458 OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
00459 t0 = *s0++; t2 = *s1++; \
00460 OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
00461 dst+= stride;\
00462 src+= stride;\
00463 }while(--h);\
00464 }\
00465 \
00466 static void OPNAME ## h264_chroma_mc8_sh4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y){\
00467 const int A=(8-x)*(8-y);\
00468 const int B=( x)*(8-y);\
00469 const int C=(8-x)*( y);\
00470 const int D=( x)*( y);\
00471 \
00472 assert(x<8 && y<8 && x>=0 && y>=0);\
00473 \
00474 do {\
00475 int t0,t1,t2,t3; \
00476 uint8_t *s0 = src; \
00477 uint8_t *s1 = src+stride; \
00478 t0 = *s0++; t2 = *s1++; \
00479 t1 = *s0++; t3 = *s1++; \
00480 OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
00481 t0 = *s0++; t2 = *s1++; \
00482 OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
00483 t1 = *s0++; t3 = *s1++; \
00484 OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
00485 t0 = *s0++; t2 = *s1++; \
00486 OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
00487 t1 = *s0++; t3 = *s1++; \
00488 OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
00489 t0 = *s0++; t2 = *s1++; \
00490 OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
00491 t1 = *s0++; t3 = *s1++; \
00492 OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
00493 t0 = *s0++; t2 = *s1++; \
00494 OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
00495 dst+= stride;\
00496 src+= stride;\
00497 }while(--h);\
00498 }
00499
00500 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
00501 #define op_put(a, b) a = (((b) + 32)>>6)
00502
00503 H264_CHROMA_MC(put_ , op_put)
00504 H264_CHROMA_MC(avg_ , op_avg)
00505 #undef op_avg
00506 #undef op_put
00507
00508 #define QPEL_MC(r, OPNAME, RND, OP) \
00509 static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
00510 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
00511 do {\
00512 uint8_t *s = src; \
00513 int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
00514 src0= *s++;\
00515 src1= *s++;\
00516 src2= *s++;\
00517 src3= *s++;\
00518 src4= *s++;\
00519 OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
00520 src5= *s++;\
00521 OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
00522 src6= *s++;\
00523 OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
00524 src7= *s++;\
00525 OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
00526 src8= *s++;\
00527 OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
00528 OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
00529 OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
00530 OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
00531 dst+=dstStride;\
00532 src+=srcStride;\
00533 }while(--h);\
00534 }\
00535 \
00536 static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00537 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
00538 int w=8;\
00539 do{\
00540 uint8_t *s = src, *d=dst;\
00541 int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
00542 src0 = *s; s+=srcStride; \
00543 src1 = *s; s+=srcStride; \
00544 src2 = *s; s+=srcStride; \
00545 src3 = *s; s+=srcStride; \
00546 src4 = *s; s+=srcStride; \
00547 OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
00548 src5 = *s; s+=srcStride; \
00549 OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
00550 src6 = *s; s+=srcStride; \
00551 OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
00552 src7 = *s; s+=srcStride; \
00553 OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
00554 src8 = *s; \
00555 OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
00556 OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
00557 OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
00558 OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
00559 dst++;\
00560 src++;\
00561 }while(--w);\
00562 }\
00563 \
00564 static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
00565 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
00566 do {\
00567 uint8_t *s = src;\
00568 int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
00569 int src9,src10,src11,src12,src13,src14,src15,src16;\
00570 src0= *s++;\
00571 src1= *s++;\
00572 src2= *s++;\
00573 src3= *s++;\
00574 src4= *s++;\
00575 OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
00576 src5= *s++;\
00577 OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
00578 src6= *s++;\
00579 OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
00580 src7= *s++;\
00581 OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
00582 src8= *s++;\
00583 OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
00584 src9= *s++;\
00585 OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
00586 src10= *s++;\
00587 OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
00588 src11= *s++;\
00589 OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
00590 src12= *s++;\
00591 OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
00592 src13= *s++;\
00593 OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
00594 src14= *s++;\
00595 OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
00596 src15= *s++;\
00597 OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
00598 src16= *s++;\
00599 OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
00600 OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
00601 OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
00602 OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
00603 dst+=dstStride;\
00604 src+=srcStride;\
00605 }while(--h);\
00606 }\
00607 \
00608 static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
00609 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
00610 int w=16;\
00611 do {\
00612 uint8_t *s = src, *d=dst;\
00613 int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
00614 int src9,src10,src11,src12,src13,src14,src15,src16;\
00615 src0 = *s; s+=srcStride; \
00616 src1 = *s; s+=srcStride; \
00617 src2 = *s; s+=srcStride; \
00618 src3 = *s; s+=srcStride; \
00619 src4 = *s; s+=srcStride; \
00620 OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
00621 src5 = *s; s+=srcStride; \
00622 OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
00623 src6 = *s; s+=srcStride; \
00624 OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
00625 src7 = *s; s+=srcStride; \
00626 OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
00627 src8 = *s; s+=srcStride; \
00628 OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
00629 src9 = *s; s+=srcStride; \
00630 OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
00631 src10 = *s; s+=srcStride; \
00632 OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
00633 src11 = *s; s+=srcStride; \
00634 OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
00635 src12 = *s; s+=srcStride; \
00636 OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
00637 src13 = *s; s+=srcStride; \
00638 OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
00639 src14 = *s; s+=srcStride; \
00640 OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
00641 src15 = *s; s+=srcStride; \
00642 OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
00643 src16 = *s; \
00644 OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
00645 OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
00646 OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
00647 OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
00648 dst++;\
00649 src++;\
00650 }while(--w);\
00651 }\
00652 \
00653 static void OPNAME ## qpel8_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
00654 OPNAME ## pixels8_c(dst, src, stride, 8);\
00655 }\
00656 \
00657 static void OPNAME ## qpel8_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
00658 uint8_t half[64];\
00659 put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
00660 OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
00661 }\
00662 \
00663 static void OPNAME ## qpel8_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
00664 OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
00665 }\
00666 \
00667 static void OPNAME ## qpel8_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
00668 uint8_t half[64];\
00669 put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
00670 OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
00671 }\
00672 \
00673 static void OPNAME ## qpel8_mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
00674 uint8_t full[16*9];\
00675 uint8_t half[64];\
00676 copy_block9(full, src, 16, stride, 9);\
00677 put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
00678 OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
00679 }\
00680 \
00681 static void OPNAME ## qpel8_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
00682 uint8_t full[16*9];\
00683 copy_block9(full, src, 16, stride, 9);\
00684 OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
00685 }\
00686 \
00687 static void OPNAME ## qpel8_mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
00688 uint8_t full[16*9];\
00689 uint8_t half[64];\
00690 copy_block9(full, src, 16, stride, 9);\
00691 put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
00692 OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
00693 }\
00694 static void OPNAME ## qpel8_mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
00695 uint8_t full[16*9];\
00696 uint8_t halfH[72];\
00697 uint8_t halfHV[64];\
00698 copy_block9(full, src, 16, stride, 9);\
00699 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
00700 put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
00701 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
00702 OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
00703 }\
00704 static void OPNAME ## qpel8_mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
00705 uint8_t full[16*9];\
00706 uint8_t halfH[72];\
00707 uint8_t halfHV[64];\
00708 copy_block9(full, src, 16, stride, 9);\
00709 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
00710 put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
00711 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
00712 OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
00713 }\
00714 static void OPNAME ## qpel8_mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
00715 uint8_t full[16*9];\
00716 uint8_t halfH[72];\
00717 uint8_t halfHV[64];\
00718 copy_block9(full, src, 16, stride, 9);\
00719 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
00720 put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
00721 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
00722 OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
00723 }\
00724 static void OPNAME ## qpel8_mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
00725 uint8_t full[16*9];\
00726 uint8_t halfH[72];\
00727 uint8_t halfHV[64];\
00728 copy_block9(full, src, 16, stride, 9);\
00729 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
00730 put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
00731 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
00732 OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
00733 }\
00734 static void OPNAME ## qpel8_mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
00735 uint8_t halfH[72];\
00736 uint8_t halfHV[64];\
00737 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
00738 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
00739 OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
00740 }\
00741 static void OPNAME ## qpel8_mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
00742 uint8_t halfH[72];\
00743 uint8_t halfHV[64];\
00744 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
00745 put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
00746 OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
00747 }\
00748 static void OPNAME ## qpel8_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
00749 uint8_t full[16*9];\
00750 uint8_t halfH[72];\
00751 copy_block9(full, src, 16, stride, 9);\
00752 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
00753 put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
00754 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
00755 }\
00756 static void OPNAME ## qpel8_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
00757 uint8_t full[16*9];\
00758 uint8_t halfH[72];\
00759 copy_block9(full, src, 16, stride, 9);\
00760 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
00761 put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
00762 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
00763 }\
00764 static void OPNAME ## qpel8_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
00765 uint8_t halfH[72];\
00766 put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
00767 OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
00768 }\
00769 static void OPNAME ## qpel16_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
00770 OPNAME ## pixels16_c(dst, src, stride, 16);\
00771 }\
00772 \
00773 static void OPNAME ## qpel16_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
00774 uint8_t half[256];\
00775 put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
00776 OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
00777 }\
00778 \
00779 static void OPNAME ## qpel16_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
00780 OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
00781 }\
00782 \
00783 static void OPNAME ## qpel16_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
00784 uint8_t half[256];\
00785 put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
00786 OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
00787 }\
00788 \
00789 static void OPNAME ## qpel16_mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
00790 uint8_t full[24*17];\
00791 uint8_t half[256];\
00792 copy_block17(full, src, 24, stride, 17);\
00793 put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
00794 OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
00795 }\
00796 \
00797 static void OPNAME ## qpel16_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
00798 uint8_t full[24*17];\
00799 copy_block17(full, src, 24, stride, 17);\
00800 OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
00801 }\
00802 \
00803 static void OPNAME ## qpel16_mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
00804 uint8_t full[24*17];\
00805 uint8_t half[256];\
00806 copy_block17(full, src, 24, stride, 17);\
00807 put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
00808 OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
00809 }\
00810 static void OPNAME ## qpel16_mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
00811 uint8_t full[24*17];\
00812 uint8_t halfH[272];\
00813 uint8_t halfHV[256];\
00814 copy_block17(full, src, 24, stride, 17);\
00815 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
00816 put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
00817 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
00818 OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
00819 }\
00820 static void OPNAME ## qpel16_mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
00821 uint8_t full[24*17];\
00822 uint8_t halfH[272];\
00823 uint8_t halfHV[256];\
00824 copy_block17(full, src, 24, stride, 17);\
00825 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
00826 put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
00827 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
00828 OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
00829 }\
00830 static void OPNAME ## qpel16_mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
00831 uint8_t full[24*17];\
00832 uint8_t halfH[272];\
00833 uint8_t halfHV[256];\
00834 copy_block17(full, src, 24, stride, 17);\
00835 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
00836 put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
00837 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
00838 OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
00839 }\
00840 static void OPNAME ## qpel16_mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
00841 uint8_t full[24*17];\
00842 uint8_t halfH[272];\
00843 uint8_t halfHV[256];\
00844 copy_block17(full, src, 24, stride, 17);\
00845 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
00846 put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
00847 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
00848 OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
00849 }\
00850 static void OPNAME ## qpel16_mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
00851 uint8_t halfH[272];\
00852 uint8_t halfHV[256];\
00853 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
00854 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
00855 OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
00856 }\
00857 static void OPNAME ## qpel16_mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
00858 uint8_t halfH[272];\
00859 uint8_t halfHV[256];\
00860 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
00861 put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
00862 OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
00863 }\
00864 static void OPNAME ## qpel16_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
00865 uint8_t full[24*17];\
00866 uint8_t halfH[272];\
00867 copy_block17(full, src, 24, stride, 17);\
00868 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
00869 put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
00870 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
00871 }\
00872 static void OPNAME ## qpel16_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
00873 uint8_t full[24*17];\
00874 uint8_t halfH[272];\
00875 copy_block17(full, src, 24, stride, 17);\
00876 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
00877 put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
00878 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
00879 }\
00880 static void OPNAME ## qpel16_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
00881 uint8_t halfH[272];\
00882 put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
00883 OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
00884 }
00885
00886 #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
00887 #define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
00888 #define op_put(a, b) a = cm[((b) + 16)>>5]
00889 #define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
00890
00891 QPEL_MC(0, put_ , _ , op_put)
00892 QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
00893 QPEL_MC(0, avg_ , _ , op_avg)
00894
00895 #undef op_avg
00896 #undef op_avg_no_rnd
00897 #undef op_put
00898 #undef op_put_no_rnd
00899
00900 #define H264_LOWPASS(OPNAME, OP, OP2) \
00901 static inline void OPNAME ## h264_qpel_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
00902 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
00903 do {\
00904 int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
00905 uint8_t *s = src-2;\
00906 srcB = *s++;\
00907 srcA = *s++;\
00908 src0 = *s++;\
00909 src1 = *s++;\
00910 src2 = *s++;\
00911 src3 = *s++;\
00912 OP(dst[0], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
00913 src4 = *s++;\
00914 OP(dst[1], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
00915 src5 = *s++;\
00916 OP(dst[2], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
00917 src6 = *s++;\
00918 OP(dst[3], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
00919 if (w>4) { \
00920 int src7,src8,src9,src10; \
00921 src7 = *s++;\
00922 OP(dst[4], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
00923 src8 = *s++;\
00924 OP(dst[5], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
00925 src9 = *s++;\
00926 OP(dst[6], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
00927 src10 = *s++;\
00928 OP(dst[7], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
00929 if (w>8) { \
00930 int src11,src12,src13,src14,src15,src16,src17,src18; \
00931 src11 = *s++;\
00932 OP(dst[8] , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
00933 src12 = *s++;\
00934 OP(dst[9] , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
00935 src13 = *s++;\
00936 OP(dst[10], (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
00937 src14 = *s++;\
00938 OP(dst[11], (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
00939 src15 = *s++;\
00940 OP(dst[12], (src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
00941 src16 = *s++;\
00942 OP(dst[13], (src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
00943 src17 = *s++;\
00944 OP(dst[14], (src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
00945 src18 = *s++;\
00946 OP(dst[15], (src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
00947 } \
00948 } \
00949 dst+=dstStride;\
00950 src+=srcStride;\
00951 }while(--h);\
00952 }\
00953 \
00954 static inline void OPNAME ## h264_qpel_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
00955 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
00956 do{\
00957 int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
00958 uint8_t *s = src-2*srcStride,*d=dst;\
00959 srcB = *s; s+=srcStride;\
00960 srcA = *s; s+=srcStride;\
00961 src0 = *s; s+=srcStride;\
00962 src1 = *s; s+=srcStride;\
00963 src2 = *s; s+=srcStride;\
00964 src3 = *s; s+=srcStride;\
00965 OP(*d, (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));d+=dstStride;\
00966 src4 = *s; s+=srcStride;\
00967 OP(*d, (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));d+=dstStride;\
00968 src5 = *s; s+=srcStride;\
00969 OP(*d, (src2+src3)*20 - (src1+src4)*5 + (src0+src5));d+=dstStride;\
00970 src6 = *s; s+=srcStride;\
00971 OP(*d, (src3+src4)*20 - (src2+src5)*5 + (src1+src6));d+=dstStride;\
00972 if (h>4) { \
00973 int src7,src8,src9,src10; \
00974 src7 = *s; s+=srcStride;\
00975 OP(*d, (src4+src5)*20 - (src3+src6)*5 + (src2+src7));d+=dstStride;\
00976 src8 = *s; s+=srcStride;\
00977 OP(*d, (src5+src6)*20 - (src4+src7)*5 + (src3+src8));d+=dstStride;\
00978 src9 = *s; s+=srcStride;\
00979 OP(*d, (src6+src7)*20 - (src5+src8)*5 + (src4+src9));d+=dstStride;\
00980 src10 = *s; s+=srcStride;\
00981 OP(*d, (src7+src8)*20 - (src6+src9)*5 + (src5+src10));d+=dstStride;\
00982 if (h>8) { \
00983 int src11,src12,src13,src14,src15,src16,src17,src18; \
00984 src11 = *s; s+=srcStride;\
00985 OP(*d , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));d+=dstStride;\
00986 src12 = *s; s+=srcStride;\
00987 OP(*d , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));d+=dstStride;\
00988 src13 = *s; s+=srcStride;\
00989 OP(*d, (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));d+=dstStride;\
00990 src14 = *s; s+=srcStride;\
00991 OP(*d, (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));d+=dstStride;\
00992 src15 = *s; s+=srcStride;\
00993 OP(*d, (src12+src13)*20 - (src11+src14)*5 + (src10+src15));d+=dstStride;\
00994 src16 = *s; s+=srcStride;\
00995 OP(*d, (src13+src14)*20 - (src12+src15)*5 + (src11+src16));d+=dstStride;\
00996 src17 = *s; s+=srcStride;\
00997 OP(*d, (src14+src15)*20 - (src13+src16)*5 + (src12+src17));d+=dstStride;\
00998 src18 = *s; s+=srcStride;\
00999 OP(*d, (src15+src16)*20 - (src14+src17)*5 + (src13+src18));d+=dstStride;\
01000 } \
01001 } \
01002 dst++;\
01003 src++;\
01004 }while(--w);\
01005 }\
01006 \
01007 static inline void OPNAME ## h264_qpel_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride,int w,int h){\
01008 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
01009 int i;\
01010 src -= 2*srcStride;\
01011 i= h+5; \
01012 do {\
01013 int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
01014 uint8_t *s = src-2;\
01015 srcB = *s++;\
01016 srcA = *s++;\
01017 src0 = *s++;\
01018 src1 = *s++;\
01019 src2 = *s++;\
01020 src3 = *s++;\
01021 tmp[0] = ((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
01022 src4 = *s++;\
01023 tmp[1] = ((src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
01024 src5 = *s++;\
01025 tmp[2] = ((src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
01026 src6 = *s++;\
01027 tmp[3] = ((src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
01028 if (w>4) { \
01029 int src7,src8,src9,src10; \
01030 src7 = *s++;\
01031 tmp[4] = ((src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
01032 src8 = *s++;\
01033 tmp[5] = ((src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
01034 src9 = *s++;\
01035 tmp[6] = ((src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
01036 src10 = *s++;\
01037 tmp[7] = ((src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
01038 if (w>8) { \
01039 int src11,src12,src13,src14,src15,src16,src17,src18; \
01040 src11 = *s++;\
01041 tmp[8] = ((src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
01042 src12 = *s++;\
01043 tmp[9] = ((src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
01044 src13 = *s++;\
01045 tmp[10] = ((src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
01046 src14 = *s++;\
01047 tmp[11] = ((src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
01048 src15 = *s++;\
01049 tmp[12] = ((src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
01050 src16 = *s++;\
01051 tmp[13] = ((src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
01052 src17 = *s++;\
01053 tmp[14] = ((src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
01054 src18 = *s++;\
01055 tmp[15] = ((src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
01056 } \
01057 } \
01058 tmp+=tmpStride;\
01059 src+=srcStride;\
01060 }while(--i);\
01061 tmp -= tmpStride*(h+5-2);\
01062 i = w; \
01063 do {\
01064 int tmpB,tmpA,tmp0,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6;\
01065 int16_t *s = tmp-2*tmpStride; \
01066 uint8_t *d=dst;\
01067 tmpB = *s; s+=tmpStride;\
01068 tmpA = *s; s+=tmpStride;\
01069 tmp0 = *s; s+=tmpStride;\
01070 tmp1 = *s; s+=tmpStride;\
01071 tmp2 = *s; s+=tmpStride;\
01072 tmp3 = *s; s+=tmpStride;\
01073 OP2(*d, (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));d+=dstStride;\
01074 tmp4 = *s; s+=tmpStride;\
01075 OP2(*d, (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));d+=dstStride;\
01076 tmp5 = *s; s+=tmpStride;\
01077 OP2(*d, (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));d+=dstStride;\
01078 tmp6 = *s; s+=tmpStride;\
01079 OP2(*d, (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));d+=dstStride;\
01080 if (h>4) { \
01081 int tmp7,tmp8,tmp9,tmp10; \
01082 tmp7 = *s; s+=tmpStride;\
01083 OP2(*d, (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));d+=dstStride;\
01084 tmp8 = *s; s+=tmpStride;\
01085 OP2(*d, (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));d+=dstStride;\
01086 tmp9 = *s; s+=tmpStride;\
01087 OP2(*d, (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));d+=dstStride;\
01088 tmp10 = *s; s+=tmpStride;\
01089 OP2(*d, (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));d+=dstStride;\
01090 if (h>8) { \
01091 int tmp11,tmp12,tmp13,tmp14,tmp15,tmp16,tmp17,tmp18; \
01092 tmp11 = *s; s+=tmpStride;\
01093 OP2(*d , (tmp8 +tmp9 )*20 - (tmp7 +tmp10)*5 + (tmp6 +tmp11));d+=dstStride;\
01094 tmp12 = *s; s+=tmpStride;\
01095 OP2(*d , (tmp9 +tmp10)*20 - (tmp8 +tmp11)*5 + (tmp7 +tmp12));d+=dstStride;\
01096 tmp13 = *s; s+=tmpStride;\
01097 OP2(*d, (tmp10+tmp11)*20 - (tmp9 +tmp12)*5 + (tmp8 +tmp13));d+=dstStride;\
01098 tmp14 = *s; s+=tmpStride;\
01099 OP2(*d, (tmp11+tmp12)*20 - (tmp10+tmp13)*5 + (tmp9 +tmp14));d+=dstStride;\
01100 tmp15 = *s; s+=tmpStride;\
01101 OP2(*d, (tmp12+tmp13)*20 - (tmp11+tmp14)*5 + (tmp10+tmp15));d+=dstStride;\
01102 tmp16 = *s; s+=tmpStride;\
01103 OP2(*d, (tmp13+tmp14)*20 - (tmp12+tmp15)*5 + (tmp11+tmp16));d+=dstStride;\
01104 tmp17 = *s; s+=tmpStride;\
01105 OP2(*d, (tmp14+tmp15)*20 - (tmp13+tmp16)*5 + (tmp12+tmp17));d+=dstStride;\
01106 tmp18 = *s; s+=tmpStride;\
01107 OP2(*d, (tmp15+tmp16)*20 - (tmp14+tmp17)*5 + (tmp13+tmp18));d+=dstStride;\
01108 } \
01109 } \
01110 dst++;\
01111 tmp++;\
01112 }while(--i);\
01113 }\
01114 \
01115 static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01116 OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,4,4); \
01117 }\
01118 static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01119 OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,8,8); \
01120 }\
01121 static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01122 OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,16,16); \
01123 }\
01124 \
01125 static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01126 OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,4,4); \
01127 }\
01128 static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01129 OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,8,8); \
01130 }\
01131 static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01132 OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,16,16); \
01133 }\
01134 static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
01135 OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,4,4); \
01136 }\
01137 static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
01138 OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,8,8); \
01139 }\
01140 static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
01141 OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,16,16); \
01142 }\
01143
01144 #define H264_MC(OPNAME, SIZE) \
01145 static void OPNAME ## h264_qpel ## SIZE ## _mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){\
01146 OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
01147 }\
01148 \
01149 static void OPNAME ## h264_qpel ## SIZE ## _mc10_sh4(uint8_t *dst, uint8_t *src, int stride){\
01150 uint8_t half[SIZE*SIZE];\
01151 put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
01152 OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src, half, stride, stride, SIZE, SIZE);\
01153 }\
01154 \
01155 static void OPNAME ## h264_qpel ## SIZE ## _mc20_sh4(uint8_t *dst, uint8_t *src, int stride){\
01156 OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
01157 }\
01158 \
01159 static void OPNAME ## h264_qpel ## SIZE ## _mc30_sh4(uint8_t *dst, uint8_t *src, int stride){\
01160 uint8_t half[SIZE*SIZE];\
01161 put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
01162 OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src+1, half, stride, stride, SIZE, SIZE);\
01163 }\
01164 \
01165 static void OPNAME ## h264_qpel ## SIZE ## _mc01_sh4(uint8_t *dst, uint8_t *src, int stride){\
01166 uint8_t full[SIZE*(SIZE+5)];\
01167 uint8_t * const full_mid= full + SIZE*2;\
01168 uint8_t half[SIZE*SIZE];\
01169 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
01170 put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
01171 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
01172 }\
01173 \
01174 static void OPNAME ## h264_qpel ## SIZE ## _mc02_sh4(uint8_t *dst, uint8_t *src, int stride){\
01175 uint8_t full[SIZE*(SIZE+5)];\
01176 uint8_t * const full_mid= full + SIZE*2;\
01177 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
01178 OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
01179 }\
01180 \
01181 static void OPNAME ## h264_qpel ## SIZE ## _mc03_sh4(uint8_t *dst, uint8_t *src, int stride){\
01182 uint8_t full[SIZE*(SIZE+5)];\
01183 uint8_t * const full_mid= full + SIZE*2;\
01184 uint8_t half[SIZE*SIZE];\
01185 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
01186 put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
01187 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
01188 }\
01189 \
01190 static void OPNAME ## h264_qpel ## SIZE ## _mc11_sh4(uint8_t *dst, uint8_t *src, int stride){\
01191 uint8_t full[SIZE*(SIZE+5)];\
01192 uint8_t * const full_mid= full + SIZE*2;\
01193 uint8_t halfH[SIZE*SIZE];\
01194 uint8_t halfV[SIZE*SIZE];\
01195 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
01196 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
01197 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
01198 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
01199 }\
01200 \
01201 static void OPNAME ## h264_qpel ## SIZE ## _mc31_sh4(uint8_t *dst, uint8_t *src, int stride){\
01202 uint8_t full[SIZE*(SIZE+5)];\
01203 uint8_t * const full_mid= full + SIZE*2;\
01204 uint8_t halfH[SIZE*SIZE];\
01205 uint8_t halfV[SIZE*SIZE];\
01206 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
01207 copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
01208 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
01209 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
01210 }\
01211 \
01212 static void OPNAME ## h264_qpel ## SIZE ## _mc13_sh4(uint8_t *dst, uint8_t *src, int stride){\
01213 uint8_t full[SIZE*(SIZE+5)];\
01214 uint8_t * const full_mid= full + SIZE*2;\
01215 uint8_t halfH[SIZE*SIZE];\
01216 uint8_t halfV[SIZE*SIZE];\
01217 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
01218 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
01219 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
01220 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
01221 }\
01222 \
01223 static void OPNAME ## h264_qpel ## SIZE ## _mc33_sh4(uint8_t *dst, uint8_t *src, int stride){\
01224 uint8_t full[SIZE*(SIZE+5)];\
01225 uint8_t * const full_mid= full + SIZE*2;\
01226 uint8_t halfH[SIZE*SIZE];\
01227 uint8_t halfV[SIZE*SIZE];\
01228 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
01229 copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
01230 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
01231 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
01232 }\
01233 \
01234 static void OPNAME ## h264_qpel ## SIZE ## _mc22_sh4(uint8_t *dst, uint8_t *src, int stride){\
01235 int16_t tmp[SIZE*(SIZE+5)];\
01236 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
01237 }\
01238 \
01239 static void OPNAME ## h264_qpel ## SIZE ## _mc21_sh4(uint8_t *dst, uint8_t *src, int stride){\
01240 int16_t tmp[SIZE*(SIZE+5)];\
01241 uint8_t halfH[SIZE*SIZE];\
01242 uint8_t halfHV[SIZE*SIZE];\
01243 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
01244 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
01245 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
01246 }\
01247 \
01248 static void OPNAME ## h264_qpel ## SIZE ## _mc23_sh4(uint8_t *dst, uint8_t *src, int stride){\
01249 int16_t tmp[SIZE*(SIZE+5)];\
01250 uint8_t halfH[SIZE*SIZE];\
01251 uint8_t halfHV[SIZE*SIZE];\
01252 put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
01253 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
01254 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
01255 }\
01256 \
01257 static void OPNAME ## h264_qpel ## SIZE ## _mc12_sh4(uint8_t *dst, uint8_t *src, int stride){\
01258 uint8_t full[SIZE*(SIZE+5)];\
01259 uint8_t * const full_mid= full + SIZE*2;\
01260 int16_t tmp[SIZE*(SIZE+5)];\
01261 uint8_t halfV[SIZE*SIZE];\
01262 uint8_t halfHV[SIZE*SIZE];\
01263 copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
01264 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
01265 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
01266 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
01267 }\
01268 \
01269 static void OPNAME ## h264_qpel ## SIZE ## _mc32_sh4(uint8_t *dst, uint8_t *src, int stride){\
01270 uint8_t full[SIZE*(SIZE+5)];\
01271 uint8_t * const full_mid= full + SIZE*2;\
01272 int16_t tmp[SIZE*(SIZE+5)];\
01273 uint8_t halfV[SIZE*SIZE];\
01274 uint8_t halfHV[SIZE*SIZE];\
01275 copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
01276 put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
01277 put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
01278 OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
01279 }\
01280
01281 #define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
01282
01283 #define op_put(a, b) a = cm[((b) + 16)>>5]
01284 #define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
01285 #define op2_put(a, b) a = cm[((b) + 512)>>10]
01286
01287 H264_LOWPASS(put_ , op_put, op2_put)
01288 H264_LOWPASS(avg_ , op_avg, op2_avg)
01289 H264_MC(put_, 4)
01290 H264_MC(put_, 8)
01291 H264_MC(put_, 16)
01292 H264_MC(avg_, 4)
01293 H264_MC(avg_, 8)
01294 H264_MC(avg_, 16)
01295
01296 #undef op_avg
01297 #undef op_put
01298 #undef op2_avg
01299 #undef op2_put
01300
01301 static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
01302 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
01303
01304 do{
01305 int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
01306 uint8_t *s = src;
01307 src_1 = s[-1];
01308 src0 = *s++;
01309 src1 = *s++;
01310 src2 = *s++;
01311 dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
01312 src3 = *s++;
01313 dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
01314 src4 = *s++;
01315 dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
01316 src5 = *s++;
01317 dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
01318 src6 = *s++;
01319 dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
01320 src7 = *s++;
01321 dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
01322 src8 = *s++;
01323 dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
01324 src9 = *s++;
01325 dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
01326 dst+=dstStride;
01327 src+=srcStride;
01328 }while(--h);
01329 }
01330
01331 static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
01332 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
01333
01334 do{
01335 int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
01336 uint8_t *s = src,*d = dst;
01337 src_1 = *(s-srcStride);
01338 src0 = *s; s+=srcStride;
01339 src1 = *s; s+=srcStride;
01340 src2 = *s; s+=srcStride;
01341 *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
01342 src3 = *s; s+=srcStride;
01343 *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
01344 src4 = *s; s+=srcStride;
01345 *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
01346 src5 = *s; s+=srcStride;
01347 *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
01348 src6 = *s; s+=srcStride;
01349 *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
01350 src7 = *s; s+=srcStride;
01351 *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
01352 src8 = *s; s+=srcStride;
01353 *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
01354 src9 = *s;
01355 *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
01356 src++;
01357 dst++;
01358 }while(--w);
01359 }
01360
01361 static void put_mspel8_mc00_sh4 (uint8_t *dst, uint8_t *src, int stride){
01362 put_pixels8_c(dst, src, stride, 8);
01363 }
01364
01365 static void put_mspel8_mc10_sh4(uint8_t *dst, uint8_t *src, int stride){
01366 uint8_t half[64];
01367 wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
01368 put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
01369 }
01370
01371 static void put_mspel8_mc20_sh4(uint8_t *dst, uint8_t *src, int stride){
01372 wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
01373 }
01374
01375 static void put_mspel8_mc30_sh4(uint8_t *dst, uint8_t *src, int stride){
01376 uint8_t half[64];
01377 wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
01378 put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
01379 }
01380
01381 static void put_mspel8_mc02_sh4(uint8_t *dst, uint8_t *src, int stride){
01382 wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
01383 }
01384
01385 static void put_mspel8_mc12_sh4(uint8_t *dst, uint8_t *src, int stride){
01386 uint8_t halfH[88];
01387 uint8_t halfV[64];
01388 uint8_t halfHV[64];
01389 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
01390 wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
01391 wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
01392 put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
01393 }
01394 static void put_mspel8_mc32_sh4(uint8_t *dst, uint8_t *src, int stride){
01395 uint8_t halfH[88];
01396 uint8_t halfV[64];
01397 uint8_t halfHV[64];
01398 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
01399 wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
01400 wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
01401 put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
01402 }
01403 static void put_mspel8_mc22_sh4(uint8_t *dst, uint8_t *src, int stride){
01404 uint8_t halfH[88];
01405 wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
01406 wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
01407 }