00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include "libavutil/common.h"
00023 #include "libavcodec/dsputil.h"
00024
00025 #include "libavutil/x86/asm.h"
00026 #include "dsputil_mmx.h"
00027
00028 #if HAVE_INLINE_ASM
00029
00030 #define ROW_SHIFT 11
00031 #define COL_SHIFT 6
00032
00033 #define round(bias) ((int)(((bias)+0.5) * (1<<ROW_SHIFT)))
00034 #define rounder(bias) {round (bias), round (bias)}
00035
00036
00037 #if 0
00038
00039 static inline void idct_row (int16_t * row, int offset,
00040 int16_t * table, int32_t * rounder)
00041 {
00042 int C1, C2, C3, C4, C5, C6, C7;
00043 int a0, a1, a2, a3, b0, b1, b2, b3;
00044
00045 row += offset;
00046
00047 C1 = table[1];
00048 C2 = table[2];
00049 C3 = table[3];
00050 C4 = table[4];
00051 C5 = table[5];
00052 C6 = table[6];
00053 C7 = table[7];
00054
00055 a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + *rounder;
00056 a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + *rounder;
00057 a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + *rounder;
00058 a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + *rounder;
00059
00060 b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7];
00061 b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7];
00062 b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7];
00063 b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7];
00064
00065 row[0] = (a0 + b0) >> ROW_SHIFT;
00066 row[1] = (a1 + b1) >> ROW_SHIFT;
00067 row[2] = (a2 + b2) >> ROW_SHIFT;
00068 row[3] = (a3 + b3) >> ROW_SHIFT;
00069 row[4] = (a3 - b3) >> ROW_SHIFT;
00070 row[5] = (a2 - b2) >> ROW_SHIFT;
00071 row[6] = (a1 - b1) >> ROW_SHIFT;
00072 row[7] = (a0 - b0) >> ROW_SHIFT;
00073 }
00074 #endif
00075
00076
00077
00078
00079 #define mmxext_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, -c4, -c2, \
00080 c4, c6, c4, c6, \
00081 c1, c3, -c1, -c5, \
00082 c5, c7, c3, -c7, \
00083 c4, -c6, c4, -c6, \
00084 -c4, c2, c4, -c2, \
00085 c5, -c1, c3, -c1, \
00086 c7, c3, c7, -c5 }
00087
00088 static inline void mmxext_row_head (int16_t * const row, const int offset,
00089 const int16_t * const table)
00090 {
00091 __asm__ volatile(
00092 "movq (%0), %%mm2 \n\t"
00093
00094 "movq 8(%0), %%mm5 \n\t"
00095 "movq %%mm2, %%mm0 \n\t"
00096
00097 "movq (%1), %%mm3 \n\t"
00098 "movq %%mm5, %%mm6 \n\t"
00099
00100 "movq 8(%1), %%mm4 \n\t"
00101 "pmaddwd %%mm0, %%mm3 \n\t"
00102
00103 "pshufw $0x4e, %%mm2, %%mm2 \n\t"
00104 :: "r" ((row+offset)), "r" (table)
00105 );
00106 }
00107
00108 static inline void mmxext_row (const int16_t * const table,
00109 const int32_t * const rounder)
00110 {
00111 __asm__ volatile (
00112 "movq 16(%0), %%mm1 \n\t"
00113 "pmaddwd %%mm2, %%mm4 \n\t"
00114
00115 "pmaddwd 32(%0), %%mm0 \n\t"
00116 "pshufw $0x4e, %%mm6, %%mm6 \n\t"
00117
00118 "movq 24(%0), %%mm7 \n\t"
00119 "pmaddwd %%mm5, %%mm1 \n\t"
00120
00121 "paddd (%1), %%mm3 \n\t"
00122 "pmaddwd %%mm6, %%mm7 \n\t"
00123
00124 "pmaddwd 40(%0), %%mm2 \n\t"
00125 "paddd %%mm4, %%mm3 \n\t"
00126
00127 "pmaddwd 48(%0), %%mm5 \n\t"
00128 "movq %%mm3, %%mm4 \n\t"
00129
00130 "pmaddwd 56(%0), %%mm6 \n\t"
00131 "paddd %%mm7, %%mm1 \n\t"
00132
00133 "paddd (%1), %%mm0 \n\t"
00134 "psubd %%mm1, %%mm3 \n\t"
00135
00136 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm3 \n\t"
00137 "paddd %%mm4, %%mm1 \n\t"
00138
00139 "paddd %%mm2, %%mm0 \n\t"
00140 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm1 \n\t"
00141
00142 "paddd %%mm6, %%mm5 \n\t"
00143 "movq %%mm0, %%mm4 \n\t"
00144
00145 "paddd %%mm5, %%mm0 \n\t"
00146 "psubd %%mm5, %%mm4 \n\t"
00147 : : "r" (table), "r" (rounder));
00148 }
00149
00150 static inline void mmxext_row_tail (int16_t * const row, const int store)
00151 {
00152 __asm__ volatile (
00153 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t"
00154
00155 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm4 \n\t"
00156
00157 "packssdw %%mm0, %%mm1 \n\t"
00158
00159 "packssdw %%mm3, %%mm4 \n\t"
00160
00161 "movq %%mm1, (%0) \n\t"
00162 "pshufw $0xb1, %%mm4, %%mm4 \n\t"
00163
00164
00165
00166 "movq %%mm4, 8(%0) \n\t"
00167 :: "r" (row+store)
00168 );
00169 }
00170
00171 static inline void mmxext_row_mid (int16_t * const row, const int store,
00172 const int offset,
00173 const int16_t * const table)
00174 {
00175 __asm__ volatile (
00176 "movq (%0,%1), %%mm2 \n\t"
00177 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t"
00178
00179 "movq 8(%0,%1), %%mm5 \n\t"
00180 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm4 \n\t"
00181
00182 "packssdw %%mm0, %%mm1 \n\t"
00183 "movq %%mm5, %%mm6 \n\t"
00184
00185 "packssdw %%mm3, %%mm4 \n\t"
00186 "movq %%mm2, %%mm0 \n\t"
00187
00188 "movq %%mm1, (%0,%2) \n\t"
00189 "pshufw $0xb1, %%mm4, %%mm4\n\t"
00190
00191 "movq (%3), %%mm3 \n\t"
00192 "movq %%mm4, 8(%0,%2) \n\t"
00193
00194 "pmaddwd %%mm0, %%mm3 \n\t"
00195
00196 "movq 8(%3), %%mm4 \n\t"
00197 "pshufw $0x4e, %%mm2, %%mm2\n\t"
00198 :: "r" (row), "r" ((x86_reg) (2*offset)), "r" ((x86_reg) (2*store)), "r" (table)
00199 );
00200 }
00201
00202
00203
00204
00205 #define mmx_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, c4, c6, \
00206 c4, c6, -c4, -c2, \
00207 c1, c3, c3, -c7, \
00208 c5, c7, -c1, -c5, \
00209 c4, -c6, c4, -c2, \
00210 -c4, c2, c4, -c6, \
00211 c5, -c1, c7, -c5, \
00212 c7, c3, c3, -c1 }
00213
00214 static inline void mmx_row_head (int16_t * const row, const int offset,
00215 const int16_t * const table)
00216 {
00217 __asm__ volatile (
00218 "movq (%0), %%mm2 \n\t"
00219
00220 "movq 8(%0), %%mm5 \n\t"
00221 "movq %%mm2, %%mm0 \n\t"
00222
00223 "movq (%1), %%mm3 \n\t"
00224 "movq %%mm5, %%mm6 \n\t"
00225
00226 "punpckldq %%mm0, %%mm0 \n\t"
00227
00228 "movq 8(%1), %%mm4 \n\t"
00229 "pmaddwd %%mm0, %%mm3 \n\t"
00230
00231 "movq 16(%1), %%mm1 \n\t"
00232 "punpckhdq %%mm2, %%mm2 \n\t"
00233 :: "r" ((row+offset)), "r" (table)
00234 );
00235 }
00236
00237 static inline void mmx_row (const int16_t * const table,
00238 const int32_t * const rounder)
00239 {
00240 __asm__ volatile (
00241 "pmaddwd %%mm2, %%mm4 \n\t"
00242 "punpckldq %%mm5, %%mm5 \n\t"
00243
00244 "pmaddwd 32(%0), %%mm0 \n\t"
00245 "punpckhdq %%mm6, %%mm6 \n\t"
00246
00247 "movq 24(%0), %%mm7 \n\t"
00248 "pmaddwd %%mm5, %%mm1 \n\t"
00249
00250 "paddd (%1), %%mm3 \n\t"
00251 "pmaddwd %%mm6, %%mm7 \n\t"
00252
00253 "pmaddwd 40(%0), %%mm2 \n\t"
00254 "paddd %%mm4, %%mm3 \n\t"
00255
00256 "pmaddwd 48(%0), %%mm5 \n\t"
00257 "movq %%mm3, %%mm4 \n\t"
00258
00259 "pmaddwd 56(%0), %%mm6 \n\t"
00260 "paddd %%mm7, %%mm1 \n\t"
00261
00262 "paddd (%1), %%mm0 \n\t"
00263 "psubd %%mm1, %%mm3 \n\t"
00264
00265 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm3 \n\t"
00266 "paddd %%mm4, %%mm1 \n\t"
00267
00268 "paddd %%mm2, %%mm0 \n\t"
00269 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm1 \n\t"
00270
00271 "paddd %%mm6, %%mm5 \n\t"
00272 "movq %%mm0, %%mm7 \n\t"
00273
00274 "paddd %%mm5, %%mm0 \n\t"
00275 "psubd %%mm5, %%mm7 \n\t"
00276 :: "r" (table), "r" (rounder)
00277 );
00278 }
00279
00280 static inline void mmx_row_tail (int16_t * const row, const int store)
00281 {
00282 __asm__ volatile (
00283 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t"
00284
00285 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm7 \n\t"
00286
00287 "packssdw %%mm0, %%mm1 \n\t"
00288
00289 "packssdw %%mm3, %%mm7 \n\t"
00290
00291 "movq %%mm1, (%0) \n\t"
00292 "movq %%mm7, %%mm4 \n\t"
00293
00294 "pslld $16, %%mm7 \n\t"
00295
00296 "psrld $16, %%mm4 \n\t"
00297
00298 "por %%mm4, %%mm7 \n\t"
00299
00300
00301
00302 "movq %%mm7, 8(%0) \n\t"
00303 :: "r" (row+store)
00304 );
00305 }
00306
00307 static inline void mmx_row_mid (int16_t * const row, const int store,
00308 const int offset, const int16_t * const table)
00309 {
00310
00311 __asm__ volatile (
00312 "movq (%0,%1), %%mm2 \n\t"
00313 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t"
00314
00315 "movq 8(%0,%1), %%mm5 \n\t"
00316 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm7 \n\t"
00317
00318 "packssdw %%mm0, %%mm1 \n\t"
00319 "movq %%mm5, %%mm6 \n\t"
00320
00321 "packssdw %%mm3, %%mm7 \n\t"
00322 "movq %%mm2, %%mm0 \n\t"
00323
00324 "movq %%mm1, (%0,%2) \n\t"
00325 "movq %%mm7, %%mm1 \n\t"
00326
00327 "punpckldq %%mm0, %%mm0 \n\t"
00328 "psrld $16, %%mm7 \n\t"
00329
00330 "movq (%3), %%mm3 \n\t"
00331 "pslld $16, %%mm1 \n\t"
00332
00333 "movq 8(%3), %%mm4 \n\t"
00334 "por %%mm1, %%mm7 \n\t"
00335
00336 "movq 16(%3), %%mm1 \n\t"
00337 "punpckhdq %%mm2, %%mm2 \n\t"
00338
00339 "movq %%mm7, 8(%0,%2) \n\t"
00340 "pmaddwd %%mm0, %%mm3 \n\t"
00341 : : "r" (row), "r" ((x86_reg) (2*offset)), "r" ((x86_reg) (2*store)), "r" (table)
00342 );
00343 }
00344
00345
00346 #if 0
00347
00348 static inline void idct_col (int16_t * col, int offset)
00349 {
00350
00351 #define F(c,x) (((c) * (x)) >> 16)
00352
00353
00354 #define S(x) (((x)>32767) ? 32767 : ((x)<-32768) ? -32768 : (x))
00355
00356 int16_t x0, x1, x2, x3, x4, x5, x6, x7;
00357 int16_t y0, y1, y2, y3, y4, y5, y6, y7;
00358 int16_t a0, a1, a2, a3, b0, b1, b2, b3;
00359 int16_t u04, v04, u26, v26, u17, v17, u35, v35, u12, v12;
00360
00361 col += offset;
00362
00363 x0 = col[0*8];
00364 x1 = col[1*8];
00365 x2 = col[2*8];
00366 x3 = col[3*8];
00367 x4 = col[4*8];
00368 x5 = col[5*8];
00369 x6 = col[6*8];
00370 x7 = col[7*8];
00371
00372 u04 = S (x0 + x4);
00373 v04 = S (x0 - x4);
00374 u26 = S (F (T2, x6) + x2);
00375 v26 = S (F (T2, x2) - x6);
00376
00377 a0 = S (u04 + u26);
00378 a1 = S (v04 + v26);
00379 a2 = S (v04 - v26);
00380 a3 = S (u04 - u26);
00381
00382 u17 = S (F (T1, x7) + x1);
00383 v17 = S (F (T1, x1) - x7);
00384 u35 = S (F (T3, x5) + x3);
00385 v35 = S (F (T3, x3) - x5);
00386
00387 b0 = S (u17 + u35);
00388 b3 = S (v17 - v35);
00389 u12 = S (u17 - u35);
00390 v12 = S (v17 + v35);
00391 u12 = S (2 * F (C4, u12));
00392 v12 = S (2 * F (C4, v12));
00393 b1 = S (u12 + v12);
00394 b2 = S (u12 - v12);
00395
00396 y0 = S (a0 + b0) >> COL_SHIFT;
00397 y1 = S (a1 + b1) >> COL_SHIFT;
00398 y2 = S (a2 + b2) >> COL_SHIFT;
00399 y3 = S (a3 + b3) >> COL_SHIFT;
00400
00401 y4 = S (a3 - b3) >> COL_SHIFT;
00402 y5 = S (a2 - b2) >> COL_SHIFT;
00403 y6 = S (a1 - b1) >> COL_SHIFT;
00404 y7 = S (a0 - b0) >> COL_SHIFT;
00405
00406 col[0*8] = y0;
00407 col[1*8] = y1;
00408 col[2*8] = y2;
00409 col[3*8] = y3;
00410 col[4*8] = y4;
00411 col[5*8] = y5;
00412 col[6*8] = y6;
00413 col[7*8] = y7;
00414 }
00415 #endif
00416
00417
00418
00419 static inline void idct_col (int16_t * const col, const int offset)
00420 {
00421 #define T1 13036
00422 #define T2 27146
00423 #define T3 43790
00424 #define C4 23170
00425
00426 DECLARE_ALIGNED(8, static const short, t1_vector)[] = {
00427 T1,T1,T1,T1,
00428 T2,T2,T2,T2,
00429 T3,T3,T3,T3,
00430 C4,C4,C4,C4
00431 };
00432
00433
00434
00435
00436 __asm__ volatile (
00437 "movq (%0), %%mm0 \n\t"
00438
00439 "movq 2*8(%1), %%mm1 \n\t"
00440 "movq %%mm0, %%mm2 \n\t"
00441
00442 "movq 7*2*8(%1), %%mm4 \n\t"
00443 "pmulhw %%mm1, %%mm0 \n\t"
00444
00445 "movq 16(%0), %%mm5 \n\t"
00446 "pmulhw %%mm4, %%mm2 \n\t"
00447
00448 "movq 2*5*8(%1), %%mm6 \n\t"
00449 "movq %%mm5, %%mm7 \n\t"
00450
00451 "movq 3*8*2(%1), %%mm3 \n\t"
00452 "psubsw %%mm4, %%mm0 \n\t"
00453
00454 "movq 8(%0), %%mm4 \n\t"
00455 "pmulhw %%mm3, %%mm5 \n\t"
00456
00457 "paddsw %%mm2, %%mm1 \n\t"
00458 "pmulhw %%mm6, %%mm7 \n\t"
00459
00460
00461
00462 "movq %%mm4, %%mm2 \n\t"
00463 "paddsw %%mm3, %%mm5 \n\t"
00464
00465 "pmulhw 2*8*2(%1), %%mm4 \n\t"
00466 "paddsw %%mm6, %%mm7 \n\t"
00467
00468 "psubsw %%mm6, %%mm5 \n\t"
00469 "paddsw %%mm3, %%mm7 \n\t"
00470
00471 "movq 6*8*2(%1), %%mm3 \n\t"
00472 "movq %%mm0, %%mm6 \n\t"
00473
00474 "pmulhw %%mm3, %%mm2 \n\t"
00475 "psubsw %%mm5, %%mm0 \n\t"
00476
00477 "psubsw %%mm3, %%mm4 \n\t"
00478 "paddsw %%mm6, %%mm5 \n\t"
00479
00480 "movq %%mm0, 3*8*2(%1)\n\t"
00481 "movq %%mm1, %%mm6 \n\t"
00482
00483 "paddsw 2*8*2(%1), %%mm2 \n\t"
00484 "paddsw %%mm7, %%mm6 \n\t"
00485
00486 "psubsw %%mm7, %%mm1 \n\t"
00487 "movq %%mm1, %%mm7 \n\t"
00488
00489 "movq 0*8(%1), %%mm3 \n\t"
00490 "paddsw %%mm5, %%mm1 \n\t"
00491
00492 "movq 24(%0), %%mm0 \n\t"
00493 "psubsw %%mm5, %%mm7 \n\t"
00494
00495 "movq %%mm6, 5*8*2(%1)\n\t"
00496 "pmulhw %%mm0, %%mm1 \n\t"
00497
00498 "movq %%mm4, %%mm6 \n\t"
00499 "pmulhw %%mm0, %%mm7 \n\t"
00500
00501 "movq 4*8*2(%1), %%mm5 \n\t"
00502 "movq %%mm3, %%mm0 \n\t"
00503
00504 "psubsw %%mm5, %%mm3 \n\t"
00505 "paddsw %%mm5, %%mm0 \n\t"
00506
00507 "paddsw %%mm3, %%mm4 \n\t"
00508 "movq %%mm0, %%mm5 \n\t"
00509
00510 "psubsw %%mm6, %%mm3 \n\t"
00511 "paddsw %%mm2, %%mm5 \n\t"
00512
00513 "paddsw %%mm1, %%mm1 \n\t"
00514 "psubsw %%mm2, %%mm0 \n\t"
00515
00516 "paddsw %%mm7, %%mm7 \n\t"
00517 "movq %%mm3, %%mm2 \n\t"
00518
00519 "movq %%mm4, %%mm6 \n\t"
00520 "paddsw %%mm7, %%mm3 \n\t"
00521
00522 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm3\n\t"
00523 "paddsw %%mm1, %%mm4\n\t"
00524
00525 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm4\n\t"
00526 "psubsw %%mm1, %%mm6 \n\t"
00527
00528 "movq 5*8*2(%1), %%mm1 \n\t"
00529 "psubsw %%mm7, %%mm2 \n\t"
00530
00531 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm6\n\t"
00532 "movq %%mm5, %%mm7 \n\t"
00533
00534 "movq %%mm4, 1*8*2(%1)\n\t"
00535 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm2\n\t"
00536
00537 "movq %%mm3, 2*8*2(%1)\n\t"
00538 "paddsw %%mm1, %%mm5 \n\t"
00539
00540 "movq 3*8*2(%1), %%mm4 \n\t"
00541 "psubsw %%mm1, %%mm7 \n\t"
00542
00543 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm5\n\t"
00544 "movq %%mm0, %%mm3 \n\t"
00545
00546 "movq %%mm2, 5*8*2(%1)\n\t"
00547 "psubsw %%mm4, %%mm3 \n\t"
00548
00549 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm7\n\t"
00550 "paddsw %%mm0, %%mm4 \n\t"
00551
00552 "movq %%mm5, 0*8*2(%1)\n\t"
00553 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm3\n\t"
00554
00555 "movq %%mm6, 6*8*2(%1)\n\t"
00556 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm4\n\t"
00557
00558 "movq %%mm7, 7*8*2(%1)\n\t"
00559
00560 "movq %%mm3, 4*8*2(%1)\n\t"
00561
00562 "movq %%mm4, 3*8*2(%1)\n\t"
00563 :: "r" (t1_vector), "r" (col+offset)
00564 );
00565
00566 #undef T1
00567 #undef T2
00568 #undef T3
00569 #undef C4
00570 }
00571
00572
00573 DECLARE_ALIGNED(8, static const int32_t, rounder0)[] =
00574 rounder ((1 << (COL_SHIFT - 1)) - 0.5);
00575 DECLARE_ALIGNED(8, static const int32_t, rounder4)[] = rounder (0);
00576 DECLARE_ALIGNED(8, static const int32_t, rounder1)[] =
00577 rounder (1.25683487303);
00578 DECLARE_ALIGNED(8, static const int32_t, rounder7)[] =
00579 rounder (-0.25);
00580 DECLARE_ALIGNED(8, static const int32_t, rounder2)[] =
00581 rounder (0.60355339059);
00582 DECLARE_ALIGNED(8, static const int32_t, rounder6)[] =
00583 rounder (-0.25);
00584 DECLARE_ALIGNED(8, static const int32_t, rounder3)[] =
00585 rounder (0.087788325588);
00586 DECLARE_ALIGNED(8, static const int32_t, rounder5)[] =
00587 rounder (-0.441341716183);
00588
00589 #undef COL_SHIFT
00590 #undef ROW_SHIFT
00591
00592 #define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \
00593 void idct (int16_t * const block) \
00594 { \
00595 DECLARE_ALIGNED(16, static const int16_t, table04)[] = \
00596 table (22725, 21407, 19266, 16384, 12873, 8867, 4520); \
00597 DECLARE_ALIGNED(16, static const int16_t, table17)[] = \
00598 table (31521, 29692, 26722, 22725, 17855, 12299, 6270); \
00599 DECLARE_ALIGNED(16, static const int16_t, table26)[] = \
00600 table (29692, 27969, 25172, 21407, 16819, 11585, 5906); \
00601 DECLARE_ALIGNED(16, static const int16_t, table35)[] = \
00602 table (26722, 25172, 22654, 19266, 15137, 10426, 5315); \
00603 \
00604 idct_row_head (block, 0*8, table04); \
00605 idct_row (table04, rounder0); \
00606 idct_row_mid (block, 0*8, 4*8, table04); \
00607 idct_row (table04, rounder4); \
00608 idct_row_mid (block, 4*8, 1*8, table17); \
00609 idct_row (table17, rounder1); \
00610 idct_row_mid (block, 1*8, 7*8, table17); \
00611 idct_row (table17, rounder7); \
00612 idct_row_mid (block, 7*8, 2*8, table26); \
00613 idct_row (table26, rounder2); \
00614 idct_row_mid (block, 2*8, 6*8, table26); \
00615 idct_row (table26, rounder6); \
00616 idct_row_mid (block, 6*8, 3*8, table35); \
00617 idct_row (table35, rounder3); \
00618 idct_row_mid (block, 3*8, 5*8, table35); \
00619 idct_row (table35, rounder5); \
00620 idct_row_tail (block, 5*8); \
00621 \
00622 idct_col (block, 0); \
00623 idct_col (block, 4); \
00624 }
00625
00626 declare_idct (ff_mmxext_idct, mmxext_table,
00627 mmxext_row_head, mmxext_row, mmxext_row_tail, mmxext_row_mid)
00628
00629 declare_idct (ff_mmx_idct, mmx_table,
00630 mmx_row_head, mmx_row, mmx_row_tail, mmx_row_mid)
00631
00632 #endif