49     t1 = vec_mradds (a1, vx7, vx1 );                    \ 
   50     t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7));    \ 
   51     t7 = vec_mradds (a2, vx5, vx3);                     \ 
   52     t3 = vec_mradds (ma2, vx3, vx5);                    \ 
   55     t5 = vec_adds (vx0, vx4);                           \ 
   56     t0 = vec_subs (vx0, vx4);                           \ 
   57     t2 = vec_mradds (a0, vx6, vx2);                     \ 
   58     t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6));    \ 
   59     t6 = vec_adds (t8, t3);                             \ 
   60     t3 = vec_subs (t8, t3);                             \ 
   61     t8 = vec_subs (t1, t7);                             \ 
   62     t1 = vec_adds (t1, t7);                             \ 
   65     t7 = vec_adds (t5, t2);                             \ 
   66     t2 = vec_subs (t5, t2);                             \ 
   67     t5 = vec_adds (t0, t4);                             \ 
   68     t0 = vec_subs (t0, t4);                             \ 
   69     t4 = vec_subs (t8, t3);                             \ 
   70     t3 = vec_adds (t8, t3);                             \ 
   73     vy0 = vec_adds (t7, t1);                            \ 
   74     vy7 = vec_subs (t7, t1);                            \ 
   75     vy1 = vec_mradds (c4, t3, t5);                      \ 
   76     vy6 = vec_mradds (mc4, t3, t5);                     \ 
   77     vy2 = vec_mradds (c4, t4, t0);                      \ 
   78     vy5 = vec_mradds (mc4, t4, t0);                     \ 
   79     vy3 = vec_adds (t2, t6);                            \ 
   80     vy4 = vec_subs (t2, t6); 
   84     vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7;                \ 
   85     vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7;                \ 
   86     vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias;                  \ 
   87     vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8;                    \ 
   90     c4 = vec_splat (constants[0], 0);                                   \ 
   91     a0 = vec_splat (constants[0], 1);                                   \ 
   92     a1 = vec_splat (constants[0], 2);                                   \ 
   93     a2 = vec_splat (constants[0], 3);                                   \ 
   94     mc4 = vec_splat (constants[0], 4);                                  \ 
   95     ma2 = vec_splat (constants[0], 5);                                  \ 
   96     bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3);     \ 
   98     zero = vec_splat_s16 (0);                                           \ 
   99     shift = vec_splat_u16 (4);                                          \ 
  101     vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero);    \ 
  102     vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero);    \ 
  103     vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero);    \ 
  104     vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero);    \ 
  105     vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero);    \ 
  106     vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero);    \ 
  107     vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero);    \ 
  108     vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero);    \ 
  112     vx0 = vec_mergeh (vy0, vy4);                                        \ 
  113     vx1 = vec_mergel (vy0, vy4);                                        \ 
  114     vx2 = vec_mergeh (vy1, vy5);                                        \ 
  115     vx3 = vec_mergel (vy1, vy5);                                        \ 
  116     vx4 = vec_mergeh (vy2, vy6);                                        \ 
  117     vx5 = vec_mergel (vy2, vy6);                                        \ 
  118     vx6 = vec_mergeh (vy3, vy7);                                        \ 
  119     vx7 = vec_mergel (vy3, vy7);                                        \ 
  121     vy0 = vec_mergeh (vx0, vx4);                                        \ 
  122     vy1 = vec_mergel (vx0, vx4);                                        \ 
  123     vy2 = vec_mergeh (vx1, vx5);                                        \ 
  124     vy3 = vec_mergel (vx1, vx5);                                        \ 
  125     vy4 = vec_mergeh (vx2, vx6);                                        \ 
  126     vy5 = vec_mergel (vx2, vx6);                                        \ 
  127     vy6 = vec_mergeh (vx3, vx7);                                        \ 
  128     vy7 = vec_mergel (vx3, vx7);                                        \ 
  130     vx0 = vec_adds (vec_mergeh (vy0, vy4), bias);                       \ 
  131     vx1 = vec_mergel (vy0, vy4);                                        \ 
  132     vx2 = vec_mergeh (vy1, vy5);                                        \ 
  133     vx3 = vec_mergel (vy1, vy5);                                        \ 
  134     vx4 = vec_mergeh (vy2, vy6);                                        \ 
  135     vx5 = vec_mergel (vy2, vy6);                                        \ 
  136     vx6 = vec_mergeh (vy3, vy7);                                        \ 
  137     vx7 = vec_mergel (vy3, vy7);                                        \ 
  141     shift = vec_splat_u16 (6);                                          \ 
  142     vx0 = vec_sra (vy0, shift);                                         \ 
  143     vx1 = vec_sra (vy1, shift);                                         \ 
  144     vx2 = vec_sra (vy2, shift);                                         \ 
  145     vx3 = vec_sra (vy3, shift);                                         \ 
  146     vx4 = vec_sra (vy4, shift);                                         \ 
  147     vx5 = vec_sra (vy5, shift);                                         \ 
  148     vx6 = vec_sra (vy6, shift);                                         \ 
  149     vx7 = vec_sra (vy7, shift); 
  153     {23170, 13573,  6518, 21895, -23170, -21895,    32,    31},
 
  154     {16384, 22725, 21407, 19266,  16384,  19266, 21407, 22725},
 
  155     {22725, 31521, 29692, 26722,  22725,  26722, 29692, 31521},
 
  156     {21407, 29692, 27969, 25172,  21407,  25172, 27969, 29692},
 
  157     {19266, 26722, 25172, 22654,  19266,  22654, 25172, 26722}
 
  167 #define COPY(dest,src)                                          \ 
  168     tmp = vec_packsu (src, src);                                \ 
  169     vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);       \ 
  170     vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest); 
  193     p0 = vec_lvsl (0, dest);
 
  194     p1 = vec_lvsl (stride, dest);
 
  195     p = vec_splat_u8 (-1);
 
  196     perm0 = vec_mergeh (p, p0);
 
  197     perm1 = vec_mergeh (p, p1);
 
  199 #define ADD(dest,src,perm)                                              \ 
  201     tmp = vec_ld (0, dest);                                             \ 
  202     tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm);       \ 
  203     tmp3 = vec_adds (tmp2, src);                                        \ 
  204     tmp = vec_packsu (tmp3, tmp3);                                      \ 
  205     vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);               \ 
  206     vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest); 
  215     ADD (dest, vx7, perm1)