Go to the documentation of this file.
21 #ifndef AVUTIL_MIPS_GENERIC_MACROS_MSA_H
22 #define AVUTIL_MIPS_GENERIC_MACROS_MSA_H
29 #define ALLOC_ALIGNED(align) __attribute__ ((aligned((align) << 1)))
31 #define LD_V(RTYPE, psrc) *((RTYPE *)(psrc))
32 #define LD_UB(...) LD_V(v16u8, __VA_ARGS__)
33 #define LD_SB(...) LD_V(v16i8, __VA_ARGS__)
34 #define LD_UH(...) LD_V(v8u16, __VA_ARGS__)
35 #define LD_SH(...) LD_V(v8i16, __VA_ARGS__)
36 #define LD_UW(...) LD_V(v4u32, __VA_ARGS__)
37 #define LD_SW(...) LD_V(v4i32, __VA_ARGS__)
39 #define ST_V(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
40 #define ST_UB(...) ST_V(v16u8, __VA_ARGS__)
41 #define ST_SB(...) ST_V(v16i8, __VA_ARGS__)
42 #define ST_UH(...) ST_V(v8u16, __VA_ARGS__)
43 #define ST_SH(...) ST_V(v8i16, __VA_ARGS__)
44 #define ST_UW(...) ST_V(v4u32, __VA_ARGS__)
45 #define ST_SW(...) ST_V(v4i32, __VA_ARGS__)
47 #if HAVE_MIPS32R6 || HAVE_MIPS64R6
50 uint16_t val_lh_m = *(uint16_t *)(psrc); \
56 uint32_t val_lw_m = *(uint32_t *)(psrc); \
63 uint64_t val_ld_m = *(uint64_t *)(psrc); \
66 #else // !(__mips == 64)
69 uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
70 uint32_t val0_ld_m, val1_ld_m; \
71 uint64_t val_ld_m = 0; \
73 val0_ld_m = LW(psrc_ld_m); \
74 val1_ld_m = LW(psrc_ld_m + 4); \
76 val_ld_m = (uint64_t) (val1_ld_m); \
77 val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \
78 val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \
82 #endif // (__mips == 64)
84 #define SH(val, pdst) *(uint16_t *)(pdst) = (val);
85 #define SW(val, pdst) *(uint32_t *)(pdst) = (val);
86 #define SD(val, pdst) *(uint64_t *)(pdst) = (val);
88 #else // !HAVE_MIPS32R6 && !HAVE_MIPS64R6
91 uint8_t *psrc_lh_m = (uint8_t *) (psrc); \
95 "ulh %[val_lh_m], %[psrc_lh_m] \n\t" \
97 : [val_lh_m] "=r" (val_lh_m) \
98 : [psrc_lh_m] "m" (*psrc_lh_m) \
106 uint8_t *psrc_lw_m = (uint8_t *) (psrc); \
110 "lwr %[val_lw_m], 0(%[psrc_lw_m]) \n\t" \
111 "lwl %[val_lw_m], 3(%[psrc_lw_m]) \n\t" \
113 : [val_lw_m] "=&r"(val_lw_m) \
114 : [psrc_lw_m] "r"(psrc_lw_m) \
123 uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
124 uint64_t val_ld_m = 0; \
127 "ldr %[val_ld_m], 0(%[psrc_ld_m]) \n\t" \
128 "ldl %[val_ld_m], 7(%[psrc_ld_m]) \n\t" \
130 : [val_ld_m] "=&r" (val_ld_m) \
131 : [psrc_ld_m] "r" (psrc_ld_m) \
136 #else // !(__mips == 64)
139 uint8_t *psrc_ld_m = (uint8_t *) (psrc); \
140 uint32_t val0_ld_m, val1_ld_m; \
141 uint64_t val_ld_m = 0; \
143 val0_ld_m = LW(psrc_ld_m); \
144 val1_ld_m = LW(psrc_ld_m + 4); \
146 val_ld_m = (uint64_t) (val1_ld_m); \
147 val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \
148 val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \
152 #endif // (__mips == 64)
154 #define SH(val, pdst) \
156 uint8_t *pdst_sh_m = (uint8_t *) (pdst); \
157 uint16_t val_sh_m = (val); \
160 "ush %[val_sh_m], %[pdst_sh_m] \n\t" \
162 : [pdst_sh_m] "=m" (*pdst_sh_m) \
163 : [val_sh_m] "r" (val_sh_m) \
167 #define SW(val, pdst) \
169 uint8_t *pdst_sw_m = (uint8_t *) (pdst); \
170 uint32_t val_sw_m = (val); \
173 "usw %[val_sw_m], %[pdst_sw_m] \n\t" \
175 : [pdst_sw_m] "=m" (*pdst_sw_m) \
176 : [val_sw_m] "r" (val_sw_m) \
180 #define SD(val, pdst) \
182 uint8_t *pdst_sd_m = (uint8_t *) (pdst); \
183 uint32_t val0_sd_m, val1_sd_m; \
185 val0_sd_m = (uint32_t) ((val) & 0x00000000FFFFFFFF); \
186 val1_sd_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF); \
188 SW(val0_sd_m, pdst_sd_m); \
189 SW(val1_sd_m, pdst_sd_m + 4); \
191 #endif // HAVE_MIPS32R6 || HAVE_MIPS64R6
202 #define LW4(psrc, stride, out0, out1, out2, out3) \
205 out1 = LW((psrc) + stride); \
206 out2 = LW((psrc) + 2 * stride); \
207 out3 = LW((psrc) + 3 * stride); \
210 #define LW2(psrc, stride, out0, out1) \
213 out1 = LW((psrc) + stride); \
223 #define LD2(psrc, stride, out0, out1) \
226 out1 = LD((psrc) + stride); \
228 #define LD4(psrc, stride, out0, out1, out2, out3) \
230 LD2((psrc), stride, out0, out1); \
231 LD2((psrc) + 2 * stride, stride, out2, out3); \
241 #define SW4(in0, in1, in2, in3, pdst, stride) \
244 SW(in1, (pdst) + stride); \
245 SW(in2, (pdst) + 2 * stride); \
246 SW(in3, (pdst) + 3 * stride); \
256 #define SD4(in0, in1, in2, in3, pdst, stride) \
259 SD(in1, (pdst) + stride); \
260 SD(in2, (pdst) + 2 * stride); \
261 SD(in3, (pdst) + 3 * stride); \
272 #define LD_V2(RTYPE, psrc, stride, out0, out1) \
274 out0 = LD_V(RTYPE, (psrc)); \
275 out1 = LD_V(RTYPE, (psrc) + stride); \
277 #define LD_UB2(...) LD_V2(v16u8, __VA_ARGS__)
278 #define LD_SB2(...) LD_V2(v16i8, __VA_ARGS__)
279 #define LD_UH2(...) LD_V2(v8u16, __VA_ARGS__)
280 #define LD_SH2(...) LD_V2(v8i16, __VA_ARGS__)
281 #define LD_SW2(...) LD_V2(v4i32, __VA_ARGS__)
283 #define LD_V3(RTYPE, psrc, stride, out0, out1, out2) \
285 LD_V2(RTYPE, (psrc), stride, out0, out1); \
286 out2 = LD_V(RTYPE, (psrc) + 2 * stride); \
288 #define LD_UB3(...) LD_V3(v16u8, __VA_ARGS__)
289 #define LD_SB3(...) LD_V3(v16i8, __VA_ARGS__)
291 #define LD_V4(RTYPE, psrc, stride, out0, out1, out2, out3) \
293 LD_V2(RTYPE, (psrc), stride, out0, out1); \
294 LD_V2(RTYPE, (psrc) + 2 * stride , stride, out2, out3); \
296 #define LD_UB4(...) LD_V4(v16u8, __VA_ARGS__)
297 #define LD_SB4(...) LD_V4(v16i8, __VA_ARGS__)
298 #define LD_UH4(...) LD_V4(v8u16, __VA_ARGS__)
299 #define LD_SH4(...) LD_V4(v8i16, __VA_ARGS__)
300 #define LD_SW4(...) LD_V4(v4i32, __VA_ARGS__)
302 #define LD_V5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \
304 LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
305 out4 = LD_V(RTYPE, (psrc) + 4 * stride); \
307 #define LD_UB5(...) LD_V5(v16u8, __VA_ARGS__)
308 #define LD_SB5(...) LD_V5(v16i8, __VA_ARGS__)
310 #define LD_V6(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5) \
312 LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
313 LD_V2(RTYPE, (psrc) + 4 * stride, stride, out4, out5); \
315 #define LD_UB6(...) LD_V6(v16u8, __VA_ARGS__)
316 #define LD_SB6(...) LD_V6(v16i8, __VA_ARGS__)
317 #define LD_UH6(...) LD_V6(v8u16, __VA_ARGS__)
318 #define LD_SH6(...) LD_V6(v8i16, __VA_ARGS__)
320 #define LD_V7(RTYPE, psrc, stride, \
321 out0, out1, out2, out3, out4, out5, out6) \
323 LD_V5(RTYPE, (psrc), stride, out0, out1, out2, out3, out4); \
324 LD_V2(RTYPE, (psrc) + 5 * stride, stride, out5, out6); \
326 #define LD_UB7(...) LD_V7(v16u8, __VA_ARGS__)
327 #define LD_SB7(...) LD_V7(v16i8, __VA_ARGS__)
329 #define LD_V8(RTYPE, psrc, stride, \
330 out0, out1, out2, out3, out4, out5, out6, out7) \
332 LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
333 LD_V4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \
335 #define LD_UB8(...) LD_V8(v16u8, __VA_ARGS__)
336 #define LD_SB8(...) LD_V8(v16i8, __VA_ARGS__)
337 #define LD_UH8(...) LD_V8(v8u16, __VA_ARGS__)
338 #define LD_SH8(...) LD_V8(v8i16, __VA_ARGS__)
339 #define LD_SW8(...) LD_V8(v4i32, __VA_ARGS__)
341 #define LD_V16(RTYPE, psrc, stride, \
342 out0, out1, out2, out3, out4, out5, out6, out7, \
343 out8, out9, out10, out11, out12, out13, out14, out15) \
345 LD_V8(RTYPE, (psrc), stride, \
346 out0, out1, out2, out3, out4, out5, out6, out7); \
347 LD_V8(RTYPE, (psrc) + 8 * stride, stride, \
348 out8, out9, out10, out11, out12, out13, out14, out15); \
350 #define LD_SH16(...) LD_V16(v8i16, __VA_ARGS__)
358 #define ST_V2(RTYPE, in0, in1, pdst, stride) \
360 ST_V(RTYPE, in0, (pdst)); \
361 ST_V(RTYPE, in1, (pdst) + stride); \
363 #define ST_UB2(...) ST_V2(v16u8, __VA_ARGS__)
364 #define ST_SB2(...) ST_V2(v16i8, __VA_ARGS__)
365 #define ST_UH2(...) ST_V2(v8u16, __VA_ARGS__)
366 #define ST_SH2(...) ST_V2(v8i16, __VA_ARGS__)
367 #define ST_SW2(...) ST_V2(v4i32, __VA_ARGS__)
369 #define ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \
371 ST_V2(RTYPE, in0, in1, (pdst), stride); \
372 ST_V2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
374 #define ST_UB4(...) ST_V4(v16u8, __VA_ARGS__)
375 #define ST_SB4(...) ST_V4(v16i8, __VA_ARGS__)
376 #define ST_SH4(...) ST_V4(v8i16, __VA_ARGS__)
377 #define ST_SW4(...) ST_V4(v4i32, __VA_ARGS__)
379 #define ST_V6(RTYPE, in0, in1, in2, in3, in4, in5, pdst, stride) \
381 ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
382 ST_V2(RTYPE, in4, in5, (pdst) + 4 * stride, stride); \
384 #define ST_SH6(...) ST_V6(v8i16, __VA_ARGS__)
386 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
388 ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \
389 ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
391 #define ST_UB8(...) ST_V8(v16u8, __VA_ARGS__)
392 #define ST_SH8(...) ST_V8(v8i16, __VA_ARGS__)
393 #define ST_SW8(...) ST_V8(v4i32, __VA_ARGS__)
403 #define ST_H1(in, idx, pdst) \
406 out0_m = __msa_copy_u_h((v8i16) in, idx); \
407 SH(out0_m, (pdst)); \
409 #define ST_H2(in, idx0, idx1, pdst, stride) \
411 uint16_t out0_m, out1_m; \
412 out0_m = __msa_copy_u_h((v8i16) in, idx0); \
413 out1_m = __msa_copy_u_h((v8i16) in, idx1); \
414 SH(out0_m, (pdst)); \
415 SH(out1_m, (pdst) + stride); \
417 #define ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \
419 uint16_t out0_m, out1_m, out2_m, out3_m; \
420 out0_m = __msa_copy_u_h((v8i16) in, idx0); \
421 out1_m = __msa_copy_u_h((v8i16) in, idx1); \
422 out2_m = __msa_copy_u_h((v8i16) in, idx2); \
423 out3_m = __msa_copy_u_h((v8i16) in, idx3); \
424 SH(out0_m, (pdst)); \
425 SH(out1_m, (pdst) + stride); \
426 SH(out2_m, (pdst) + 2 * stride); \
427 SH(out3_m, (pdst) + 3 * stride); \
429 #define ST_H8(in, idx0, idx1, idx2, idx3, idx4, idx5, \
430 idx6, idx7, pdst, stride) \
432 ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \
433 ST_H4(in, idx4, idx5, idx6, idx7, (pdst) + 4*stride, stride) \
444 #define ST_W1(in, idx, pdst) \
447 out0_m = __msa_copy_u_w((v4i32) in, idx); \
448 SW(out0_m, (pdst)); \
450 #define ST_W2(in, idx0, idx1, pdst, stride) \
452 uint32_t out0_m, out1_m; \
453 out0_m = __msa_copy_u_w((v4i32) in, idx0); \
454 out1_m = __msa_copy_u_w((v4i32) in, idx1); \
455 SW(out0_m, (pdst)); \
456 SW(out1_m, (pdst) + stride); \
458 #define ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride) \
460 uint32_t out0_m, out1_m, out2_m, out3_m; \
461 out0_m = __msa_copy_u_w((v4i32) in, idx0); \
462 out1_m = __msa_copy_u_w((v4i32) in, idx1); \
463 out2_m = __msa_copy_u_w((v4i32) in, idx2); \
464 out3_m = __msa_copy_u_w((v4i32) in, idx3); \
465 SW(out0_m, (pdst)); \
466 SW(out1_m, (pdst) + stride); \
467 SW(out2_m, (pdst) + 2*stride); \
468 SW(out3_m, (pdst) + 3*stride); \
470 #define ST_W8(in0, in1, idx0, idx1, idx2, idx3, \
471 idx4, idx5, idx6, idx7, pdst, stride) \
473 ST_W4(in0, idx0, idx1, idx2, idx3, pdst, stride) \
474 ST_W4(in1, idx4, idx5, idx6, idx7, pdst + 4*stride, stride) \
485 #define ST_D1(in, idx, pdst) \
488 out0_m = __msa_copy_u_d((v2i64) in, idx); \
489 SD(out0_m, (pdst)); \
491 #define ST_D2(in, idx0, idx1, pdst, stride) \
493 uint64_t out0_m, out1_m; \
494 out0_m = __msa_copy_u_d((v2i64) in, idx0); \
495 out1_m = __msa_copy_u_d((v2i64) in, idx1); \
496 SD(out0_m, (pdst)); \
497 SD(out1_m, (pdst) + stride); \
499 #define ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
501 uint64_t out0_m, out1_m, out2_m, out3_m; \
502 out0_m = __msa_copy_u_d((v2i64) in0, idx0); \
503 out1_m = __msa_copy_u_d((v2i64) in0, idx1); \
504 out2_m = __msa_copy_u_d((v2i64) in1, idx2); \
505 out3_m = __msa_copy_u_d((v2i64) in1, idx3); \
506 SD(out0_m, (pdst)); \
507 SD(out1_m, (pdst) + stride); \
508 SD(out2_m, (pdst) + 2 * stride); \
509 SD(out3_m, (pdst) + 3 * stride); \
511 #define ST_D8(in0, in1, in2, in3, idx0, idx1, idx2, idx3, \
512 idx4, idx5, idx6, idx7, pdst, stride) \
514 ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \
515 ST_D4(in2, in3, idx4, idx5, idx6, idx7, pdst + 4 * stride, stride) \
527 #define ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
529 uint64_t out0_m, out1_m, out2_m, out3_m; \
530 uint64_t out4_m, out5_m, out6_m, out7_m; \
531 uint32_t out8_m, out9_m, out10_m, out11_m; \
532 uint32_t out12_m, out13_m, out14_m, out15_m; \
533 uint8_t *pblk_12x8_m = (uint8_t *) (pdst); \
535 out0_m = __msa_copy_u_d((v2i64) in0, 0); \
536 out1_m = __msa_copy_u_d((v2i64) in1, 0); \
537 out2_m = __msa_copy_u_d((v2i64) in2, 0); \
538 out3_m = __msa_copy_u_d((v2i64) in3, 0); \
539 out4_m = __msa_copy_u_d((v2i64) in4, 0); \
540 out5_m = __msa_copy_u_d((v2i64) in5, 0); \
541 out6_m = __msa_copy_u_d((v2i64) in6, 0); \
542 out7_m = __msa_copy_u_d((v2i64) in7, 0); \
544 out8_m = __msa_copy_u_w((v4i32) in0, 2); \
545 out9_m = __msa_copy_u_w((v4i32) in1, 2); \
546 out10_m = __msa_copy_u_w((v4i32) in2, 2); \
547 out11_m = __msa_copy_u_w((v4i32) in3, 2); \
548 out12_m = __msa_copy_u_w((v4i32) in4, 2); \
549 out13_m = __msa_copy_u_w((v4i32) in5, 2); \
550 out14_m = __msa_copy_u_w((v4i32) in6, 2); \
551 out15_m = __msa_copy_u_w((v4i32) in7, 2); \
553 SD(out0_m, pblk_12x8_m); \
554 SW(out8_m, pblk_12x8_m + 8); \
555 pblk_12x8_m += stride; \
556 SD(out1_m, pblk_12x8_m); \
557 SW(out9_m, pblk_12x8_m + 8); \
558 pblk_12x8_m += stride; \
559 SD(out2_m, pblk_12x8_m); \
560 SW(out10_m, pblk_12x8_m + 8); \
561 pblk_12x8_m += stride; \
562 SD(out3_m, pblk_12x8_m); \
563 SW(out11_m, pblk_12x8_m + 8); \
564 pblk_12x8_m += stride; \
565 SD(out4_m, pblk_12x8_m); \
566 SW(out12_m, pblk_12x8_m + 8); \
567 pblk_12x8_m += stride; \
568 SD(out5_m, pblk_12x8_m); \
569 SW(out13_m, pblk_12x8_m + 8); \
570 pblk_12x8_m += stride; \
571 SD(out6_m, pblk_12x8_m); \
572 SW(out14_m, pblk_12x8_m + 8); \
573 pblk_12x8_m += stride; \
574 SD(out7_m, pblk_12x8_m); \
575 SW(out15_m, pblk_12x8_m + 8); \
590 #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
592 out0 = (RTYPE) __msa_aver_u_b((v16u8) in0, (v16u8) in1); \
593 out1 = (RTYPE) __msa_aver_u_b((v16u8) in2, (v16u8) in3); \
595 #define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
597 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
598 out0, out1, out2, out3) \
600 AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
601 AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \
603 #define AVER_UB4_UB(...) AVER_UB4(v16u8, __VA_ARGS__)
612 #define SLDI_B(RTYPE, d, s, slide_val, out) \
614 out = (RTYPE) __msa_sldi_b((v16i8) d, (v16i8) s, slide_val); \
617 #define SLDI_B2(RTYPE, d0, s0, d1, s1, slide_val, out0, out1) \
619 SLDI_B(RTYPE, d0, s0, slide_val, out0) \
620 SLDI_B(RTYPE, d1, s1, slide_val, out1) \
622 #define SLDI_B2_UB(...) SLDI_B2(v16u8, __VA_ARGS__)
623 #define SLDI_B2_SB(...) SLDI_B2(v16i8, __VA_ARGS__)
624 #define SLDI_B2_SH(...) SLDI_B2(v8i16, __VA_ARGS__)
625 #define SLDI_B2_SW(...) SLDI_B2(v4i32, __VA_ARGS__)
627 #define SLDI_B3(RTYPE, d0, s0, d1, s1, d2, s2, slide_val, \
630 SLDI_B2(RTYPE, d0, s0, d1, s1, slide_val, out0, out1) \
631 SLDI_B(RTYPE, d2, s2, slide_val, out2) \
633 #define SLDI_B3_UB(...) SLDI_B3(v16u8, __VA_ARGS__)
634 #define SLDI_B3_SB(...) SLDI_B3(v16i8, __VA_ARGS__)
635 #define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__)
637 #define SLDI_B4(RTYPE, d0, s0, d1, s1, d2, s2, d3, s3, \
638 slide_val, out0, out1, out2, out3) \
640 SLDI_B2(RTYPE, d0, s0, d1, s1, slide_val, out0, out1) \
641 SLDI_B2(RTYPE, d2, s2, d3, s3, slide_val, out2, out3) \
643 #define SLDI_B4_UB(...) SLDI_B4(v16u8, __VA_ARGS__)
644 #define SLDI_B4_SB(...) SLDI_B4(v16i8, __VA_ARGS__)
645 #define SLDI_B4_SH(...) SLDI_B4(v8i16, __VA_ARGS__)
656 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
658 out0 = (RTYPE) __msa_vshf_b((v16i8) mask0, (v16i8) in1, (v16i8) in0); \
659 out1 = (RTYPE) __msa_vshf_b((v16i8) mask1, (v16i8) in3, (v16i8) in2); \
661 #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
662 #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
663 #define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
664 #define VSHF_B2_SH(...) VSHF_B2(v8i16, __VA_ARGS__)
666 #define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
669 VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
670 out2 = (RTYPE) __msa_vshf_b((v16i8) mask2, (v16i8) in5, (v16i8) in4); \
672 #define VSHF_B3_SB(...) VSHF_B3(v16i8, __VA_ARGS__)
674 #define VSHF_B4(RTYPE, in0, in1, mask0, mask1, mask2, mask3, \
675 out0, out1, out2, out3) \
677 VSHF_B2(RTYPE, in0, in1, in0, in1, mask0, mask1, out0, out1); \
678 VSHF_B2(RTYPE, in0, in1, in0, in1, mask2, mask3, out2, out3); \
680 #define VSHF_B4_SB(...) VSHF_B4(v16i8, __VA_ARGS__)
681 #define VSHF_B4_SH(...) VSHF_B4(v8i16, __VA_ARGS__)
692 #define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
694 out0 = (RTYPE) __msa_vshf_h((v8i16) mask0, (v8i16) in1, (v8i16) in0); \
695 out1 = (RTYPE) __msa_vshf_h((v8i16) mask1, (v8i16) in3, (v8i16) in2); \
697 #define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__)
699 #define VSHF_H3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
702 VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \
703 out2 = (RTYPE) __msa_vshf_h((v8i16) mask2, (v8i16) in5, (v8i16) in4); \
705 #define VSHF_H3_SH(...) VSHF_H3(v8i16, __VA_ARGS__)
716 #define VSHF_W2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
718 out0 = (RTYPE) __msa_vshf_w((v4i32) mask0, (v4i32) in1, (v4i32) in0); \
719 out1 = (RTYPE) __msa_vshf_w((v4i32) mask1, (v4i32) in3, (v4i32) in2); \
721 #define VSHF_W2_SB(...) VSHF_W2(v16i8, __VA_ARGS__)
735 #define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
737 out0 = (RTYPE) __msa_dotp_u_h((v16u8) mult0, (v16u8) cnst0); \
738 out1 = (RTYPE) __msa_dotp_u_h((v16u8) mult1, (v16u8) cnst1); \
740 #define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__)
742 #define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, \
743 cnst0, cnst1, cnst2, cnst3, \
744 out0, out1, out2, out3) \
746 DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
747 DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
749 #define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__)
763 #define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
765 out0 = (RTYPE) __msa_dotp_s_h((v16i8) mult0, (v16i8) cnst0); \
766 out1 = (RTYPE) __msa_dotp_s_h((v16i8) mult1, (v16i8) cnst1); \
768 #define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
770 #define DOTP_SB3(RTYPE, mult0, mult1, mult2, cnst0, cnst1, cnst2, \
773 DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
774 out2 = (RTYPE) __msa_dotp_s_h((v16i8) mult2, (v16i8) cnst2); \
776 #define DOTP_SB3_SH(...) DOTP_SB3(v8i16, __VA_ARGS__)
778 #define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, \
779 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
781 DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
782 DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
784 #define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__)
798 #define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
800 out0 = (RTYPE) __msa_dotp_s_w((v8i16) mult0, (v8i16) cnst0); \
801 out1 = (RTYPE) __msa_dotp_s_w((v8i16) mult1, (v8i16) cnst1); \
803 #define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
805 #define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, \
806 cnst0, cnst1, cnst2, cnst3, \
807 out0, out1, out2, out3) \
809 DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
810 DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
812 #define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__)
826 #define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
828 out0 = (RTYPE) __msa_dpadd_s_h((v8i16) out0, \
829 (v16i8) mult0, (v16i8) cnst0); \
830 out1 = (RTYPE) __msa_dpadd_s_h((v8i16) out1, \
831 (v16i8) mult1, (v16i8) cnst1); \
833 #define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__)
835 #define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, \
836 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
838 DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
839 DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
841 #define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__)
855 #define DPADD_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
857 out0 = (RTYPE) __msa_dpadd_u_h((v8u16) out0, \
858 (v16u8) mult0, (v16u8) cnst0); \
859 out1 = (RTYPE) __msa_dpadd_u_h((v8u16) out1, \
860 (v16u8) mult1, (v16u8) cnst1); \
862 #define DPADD_UB2_UH(...) DPADD_UB2(v8u16, __VA_ARGS__)
876 #define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \
878 out0 = (RTYPE) __msa_dpadd_s_w((v4i32) out0, \
879 (v8i16) mult0, (v8i16) cnst0); \
880 out1 = (RTYPE) __msa_dpadd_s_w((v4i32) out1, \
881 (v8i16) mult1, (v8i16) cnst1); \
883 #define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
885 #define DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3, \
886 cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \
888 DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \
889 DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \
891 #define DPADD_SH4_SW(...) DPADD_SH4(v4i32, __VA_ARGS__)
901 #define MIN_UH2(RTYPE, in0, in1, min_vec) \
903 in0 = (RTYPE) __msa_min_u_h((v8u16) in0, min_vec); \
904 in1 = (RTYPE) __msa_min_u_h((v8u16) in1, min_vec); \
906 #define MIN_UH2_UH(...) MIN_UH2(v8u16, __VA_ARGS__)
908 #define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) \
910 MIN_UH2(RTYPE, in0, in1, min_vec); \
911 MIN_UH2(RTYPE, in2, in3, min_vec); \
913 #define MIN_UH4_UH(...) MIN_UH4(v8u16, __VA_ARGS__)
923 #define CLIP_SH(in, min, max) \
925 in = __msa_max_s_h((v8i16) min, (v8i16) in); \
926 in = __msa_min_s_h((v8i16) max, (v8i16) in); \
935 #define CLIP_SH_0_255(in) \
937 in = __msa_maxi_s_h((v8i16) in, 0); \
938 in = (v8i16) __msa_sat_u_h((v8u16) in, 7); \
941 #define CLIP_SH2_0_255(in0, in1) \
943 CLIP_SH_0_255(in0); \
944 CLIP_SH_0_255(in1); \
947 #define CLIP_SH4_0_255(in0, in1, in2, in3) \
949 CLIP_SH2_0_255(in0, in1); \
950 CLIP_SH2_0_255(in2, in3); \
953 #define CLIP_SH8_0_255(in0, in1, in2, in3, \
954 in4, in5, in6, in7) \
956 CLIP_SH4_0_255(in0, in1, in2, in3); \
957 CLIP_SH4_0_255(in4, in5, in6, in7); \
966 #define CLIP_SW_0_255(in) \
968 in = __msa_maxi_s_w((v4i32) in, 0); \
969 in = (v4i32) __msa_sat_u_w((v4u32) in, 7); \
972 #define CLIP_SW2_0_255(in0, in1) \
974 CLIP_SW_0_255(in0); \
975 CLIP_SW_0_255(in1); \
978 #define CLIP_SW4_0_255(in0, in1, in2, in3) \
980 CLIP_SW2_0_255(in0, in1); \
981 CLIP_SW2_0_255(in2, in3); \
984 #define CLIP_SW8_0_255(in0, in1, in2, in3, \
985 in4, in5, in6, in7) \
987 CLIP_SW4_0_255(in0, in1, in2, in3); \
988 CLIP_SW4_0_255(in4, in5, in6, in7); \
998 #define HADD_SW_S32(in) \
1000 v2i64 res0_m, res1_m; \
1003 res0_m = __msa_hadd_s_d((v4i32) in, (v4i32) in); \
1004 res1_m = __msa_splati_d(res0_m, 1); \
1006 sum_m = __msa_copy_s_w((v4i32) res0_m, 0); \
1017 #define HADD_UH_U32(in) \
1020 v2u64 res0_m, res1_m; \
1023 res_m = __msa_hadd_u_w((v8u16) in, (v8u16) in); \
1024 res0_m = __msa_hadd_u_d(res_m, res_m); \
1025 res1_m = (v2u64) __msa_splati_d((v2i64) res0_m, 1); \
1027 sum_m = __msa_copy_u_w((v4i32) res0_m, 0); \
1039 #define HADD_SB2(RTYPE, in0, in1, out0, out1) \
1041 out0 = (RTYPE) __msa_hadd_s_h((v16i8) in0, (v16i8) in0); \
1042 out1 = (RTYPE) __msa_hadd_s_h((v16i8) in1, (v16i8) in1); \
1044 #define HADD_SB2_SH(...) HADD_SB2(v8i16, __VA_ARGS__)
1046 #define HADD_SB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1048 HADD_SB2(RTYPE, in0, in1, out0, out1); \
1049 HADD_SB2(RTYPE, in2, in3, out2, out3); \
1051 #define HADD_SB4_UH(...) HADD_SB4(v8u16, __VA_ARGS__)
1052 #define HADD_SB4_SH(...) HADD_SB4(v8i16, __VA_ARGS__)
1062 #define HADD_UB2(RTYPE, in0, in1, out0, out1) \
1064 out0 = (RTYPE) __msa_hadd_u_h((v16u8) in0, (v16u8) in0); \
1065 out1 = (RTYPE) __msa_hadd_u_h((v16u8) in1, (v16u8) in1); \
1067 #define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__)
1069 #define HADD_UB3(RTYPE, in0, in1, in2, out0, out1, out2) \
1071 HADD_UB2(RTYPE, in0, in1, out0, out1); \
1072 out2 = (RTYPE) __msa_hadd_u_h((v16u8) in2, (v16u8) in2); \
1074 #define HADD_UB3_UH(...) HADD_UB3(v8u16, __VA_ARGS__)
1076 #define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1078 HADD_UB2(RTYPE, in0, in1, out0, out1); \
1079 HADD_UB2(RTYPE, in2, in3, out2, out3); \
1081 #define HADD_UB4_UB(...) HADD_UB4(v16u8, __VA_ARGS__)
1082 #define HADD_UB4_UH(...) HADD_UB4(v8u16, __VA_ARGS__)
1083 #define HADD_UB4_SH(...) HADD_UB4(v8i16, __VA_ARGS__)
1093 #define HSUB_UB2(RTYPE, in0, in1, out0, out1) \
1095 out0 = (RTYPE) __msa_hsub_u_h((v16u8) in0, (v16u8) in0); \
1096 out1 = (RTYPE) __msa_hsub_u_h((v16u8) in1, (v16u8) in1); \
1098 #define HSUB_UB2_UH(...) HSUB_UB2(v8u16, __VA_ARGS__)
1099 #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
1101 #define HSUB_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \
1103 HSUB_UB2(RTYPE, in0, in1, out0, out1); \
1104 HSUB_UB2(RTYPE, in2, in3, out2, out3); \
1106 #define HSUB_UB4_UH(...) HSUB_UB4(v8u16, __VA_ARGS__)
1107 #define HSUB_UB4_SH(...) HSUB_UB4(v8i16, __VA_ARGS__)
1118 #define SAD_UB2_UH(in0, in1, ref0, ref1) \
1120 v16u8 diff0_m, diff1_m; \
1121 v8u16 sad_m = { 0 }; \
1123 diff0_m = __msa_asub_u_b((v16u8) in0, (v16u8) ref0); \
1124 diff1_m = __msa_asub_u_b((v16u8) in1, (v16u8) ref1); \
1126 sad_m += __msa_hadd_u_h((v16u8) diff0_m, (v16u8) diff0_m); \
1127 sad_m += __msa_hadd_u_h((v16u8) diff1_m, (v16u8) diff1_m); \
1138 #define INSERT_W2(RTYPE, in0, in1, out) \
1140 out = (RTYPE) __msa_insert_w((v4i32) out, 0, in0); \
1141 out = (RTYPE) __msa_insert_w((v4i32) out, 1, in1); \
1143 #define INSERT_W2_UB(...) INSERT_W2(v16u8, __VA_ARGS__)
1144 #define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
1146 #define INSERT_W4(RTYPE, in0, in1, in2, in3, out) \
1148 out = (RTYPE) __msa_insert_w((v4i32) out, 0, in0); \
1149 out = (RTYPE) __msa_insert_w((v4i32) out, 1, in1); \
1150 out = (RTYPE) __msa_insert_w((v4i32) out, 2, in2); \
1151 out = (RTYPE) __msa_insert_w((v4i32) out, 3, in3); \
1153 #define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
1154 #define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
1155 #define INSERT_W4_SH(...) INSERT_W4(v8i16, __VA_ARGS__)
1156 #define INSERT_W4_SW(...) INSERT_W4(v4i32, __VA_ARGS__)
1164 #define INSERT_D2(RTYPE, in0, in1, out) \
1166 out = (RTYPE) __msa_insert_d((v2i64) out, 0, in0); \
1167 out = (RTYPE) __msa_insert_d((v2i64) out, 1, in1); \
1169 #define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__)
1170 #define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
1171 #define INSERT_D2_SH(...) INSERT_D2(v8i16, __VA_ARGS__)
1172 #define INSERT_D2_SD(...) INSERT_D2(v2i64, __VA_ARGS__)
1183 #define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1185 out0 = (RTYPE) __msa_ilvev_b((v16i8) in1, (v16i8) in0); \
1186 out1 = (RTYPE) __msa_ilvev_b((v16i8) in3, (v16i8) in2); \
1188 #define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
1189 #define ILVEV_B2_SB(...) ILVEV_B2(v16i8, __VA_ARGS__)
1190 #define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
1191 #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
1202 #define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1204 out0 = (RTYPE) __msa_ilvev_h((v8i16) in1, (v8i16) in0); \
1205 out1 = (RTYPE) __msa_ilvev_h((v8i16) in3, (v8i16) in2); \
1207 #define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
1208 #define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
1209 #define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
1220 #define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1222 out0 = (RTYPE) __msa_ilvev_w((v4i32) in1, (v4i32) in0); \
1223 out1 = (RTYPE) __msa_ilvev_w((v4i32) in3, (v4i32) in2); \
1225 #define ILVEV_W2_UB(...) ILVEV_W2(v16u8, __VA_ARGS__)
1226 #define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
1227 #define ILVEV_W2_UH(...) ILVEV_W2(v8u16, __VA_ARGS__)
1228 #define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__)
1239 #define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1241 out0 = (RTYPE) __msa_ilvev_d((v2i64) in1, (v2i64) in0); \
1242 out1 = (RTYPE) __msa_ilvev_d((v2i64) in3, (v2i64) in2); \
1244 #define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
1245 #define ILVEV_D2_SB(...) ILVEV_D2(v16i8, __VA_ARGS__)
1246 #define ILVEV_D2_SW(...) ILVEV_D2(v4i32, __VA_ARGS__)
1257 #define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1259 out0 = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1); \
1260 out1 = (RTYPE) __msa_ilvl_b((v16i8) in2, (v16i8) in3); \
1262 #define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
1263 #define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
1264 #define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__)
1265 #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
1267 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1268 out0, out1, out2, out3) \
1270 ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1271 ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1273 #define ILVL_B4_UB(...) ILVL_B4(v16u8, __VA_ARGS__)
1274 #define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__)
1275 #define ILVL_B4_UH(...) ILVL_B4(v8u16, __VA_ARGS__)
1276 #define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__)
1287 #define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1289 out0 = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \
1290 out1 = (RTYPE) __msa_ilvl_h((v8i16) in2, (v8i16) in3); \
1292 #define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__)
1293 #define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__)
1295 #define ILVL_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1296 out0, out1, out2, out3) \
1298 ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1299 ILVL_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1301 #define ILVL_H4_SH(...) ILVL_H4(v8i16, __VA_ARGS__)
1302 #define ILVL_H4_SW(...) ILVL_H4(v4i32, __VA_ARGS__)
1313 #define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1315 out0 = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \
1316 out1 = (RTYPE) __msa_ilvl_w((v4i32) in2, (v4i32) in3); \
1318 #define ILVL_W2_UB(...) ILVL_W2(v16u8, __VA_ARGS__)
1319 #define ILVL_W2_SB(...) ILVL_W2(v16i8, __VA_ARGS__)
1320 #define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__)
1332 #define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1334 out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \
1335 out1 = (RTYPE) __msa_ilvr_b((v16i8) in2, (v16i8) in3); \
1337 #define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
1338 #define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
1339 #define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__)
1340 #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
1341 #define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__)
1343 #define ILVR_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1345 ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1346 out2 = (RTYPE) __msa_ilvr_b((v16i8) in4, (v16i8) in5); \
1348 #define ILVR_B3_UB(...) ILVR_B3(v16u8, __VA_ARGS__)
1349 #define ILVR_B3_SB(...) ILVR_B3(v16i8, __VA_ARGS__)
1350 #define ILVR_B3_UH(...) ILVR_B3(v8u16, __VA_ARGS__)
1351 #define ILVR_B3_SH(...) ILVR_B3(v8i16, __VA_ARGS__)
1353 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1354 out0, out1, out2, out3) \
1356 ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1357 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1359 #define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
1360 #define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
1361 #define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
1362 #define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
1363 #define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__)
1365 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1366 in8, in9, in10, in11, in12, in13, in14, in15, \
1367 out0, out1, out2, out3, out4, out5, out6, out7) \
1369 ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1370 out0, out1, out2, out3); \
1371 ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, \
1372 out4, out5, out6, out7); \
1374 #define ILVR_B8_UH(...) ILVR_B8(v8u16, __VA_ARGS__)
1375 #define ILVR_B8_SW(...) ILVR_B8(v4i32, __VA_ARGS__)
1387 #define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1389 out0 = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \
1390 out1 = (RTYPE) __msa_ilvr_h((v8i16) in2, (v8i16) in3); \
1392 #define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
1393 #define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
1395 #define ILVR_H3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1397 ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1398 out2 = (RTYPE) __msa_ilvr_h((v8i16) in4, (v8i16) in5); \
1400 #define ILVR_H3_SH(...) ILVR_H3(v8i16, __VA_ARGS__)
1402 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1403 out0, out1, out2, out3) \
1405 ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1406 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1408 #define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
1409 #define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
1411 #define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
1413 out0 = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \
1414 out1 = (RTYPE) __msa_ilvr_w((v4i32) in2, (v4i32) in3); \
1416 #define ILVR_W2_UB(...) ILVR_W2(v16u8, __VA_ARGS__)
1417 #define ILVR_W2_SB(...) ILVR_W2(v16i8, __VA_ARGS__)
1418 #define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__)
1420 #define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1421 out0, out1, out2, out3) \
1423 ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1); \
1424 ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3); \
1426 #define ILVR_W4_SB(...) ILVR_W4(v16i8, __VA_ARGS__)
1427 #define ILVR_W4_UB(...) ILVR_W4(v16u8, __VA_ARGS__)
1438 #define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1440 out0 = (RTYPE) __msa_ilvr_d((v2i64) in0, (v2i64) in1); \
1441 out1 = (RTYPE) __msa_ilvr_d((v2i64) in2, (v2i64) in3); \
1443 #define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
1444 #define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
1445 #define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
1447 #define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1449 ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1450 out2 = (RTYPE) __msa_ilvr_d((v2i64) in4, (v2i64) in5); \
1452 #define ILVR_D3_SB(...) ILVR_D3(v16i8, __VA_ARGS__)
1454 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1455 out0, out1, out2, out3) \
1457 ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1458 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
1460 #define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
1461 #define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
1472 #define ILVL_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1474 out0 = (RTYPE) __msa_ilvl_d((v2i64) in0, (v2i64) in1); \
1475 out1 = (RTYPE) __msa_ilvl_d((v2i64) in2, (v2i64) in3); \
1477 #define ILVL_D2_UB(...) ILVL_D2(v16u8, __VA_ARGS__)
1478 #define ILVL_D2_SB(...) ILVL_D2(v16i8, __VA_ARGS__)
1479 #define ILVL_D2_SH(...) ILVL_D2(v8i16, __VA_ARGS__)
1490 #define ILVRL_B2(RTYPE, in0, in1, out0, out1) \
1492 out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \
1493 out1 = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1); \
1495 #define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
1496 #define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
1497 #define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
1498 #define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
1499 #define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__)
1501 #define ILVRL_H2(RTYPE, in0, in1, out0, out1) \
1503 out0 = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \
1504 out1 = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \
1506 #define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__)
1507 #define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__)
1508 #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
1509 #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
1511 #define ILVRL_W2(RTYPE, in0, in1, out0, out1) \
1513 out0 = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \
1514 out1 = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \
1516 #define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
1517 #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
1518 #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
1528 #define MAXI_SH2(RTYPE, in0, in1, max_val) \
1530 in0 = (RTYPE) __msa_maxi_s_h((v8i16) in0, max_val); \
1531 in1 = (RTYPE) __msa_maxi_s_h((v8i16) in1, max_val); \
1533 #define MAXI_SH2_UH(...) MAXI_SH2(v8u16, __VA_ARGS__)
1534 #define MAXI_SH2_SH(...) MAXI_SH2(v8i16, __VA_ARGS__)
1536 #define MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val) \
1538 MAXI_SH2(RTYPE, in0, in1, max_val); \
1539 MAXI_SH2(RTYPE, in2, in3, max_val); \
1541 #define MAXI_SH4_UH(...) MAXI_SH4(v8u16, __VA_ARGS__)
1542 #define MAXI_SH4_SH(...) MAXI_SH4(v8i16, __VA_ARGS__)
1544 #define MAXI_SH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, max_val) \
1546 MAXI_SH4(RTYPE, in0, in1, in2, in3, max_val); \
1547 MAXI_SH4(RTYPE, in4, in5, in6, in7, max_val); \
1549 #define MAXI_SH8_UH(...) MAXI_SH8(v8u16, __VA_ARGS__)
1550 #define MAXI_SH8_SH(...) MAXI_SH8(v8i16, __VA_ARGS__)
1562 #define SAT_UH2(RTYPE, in0, in1, sat_val) \
1564 in0 = (RTYPE) __msa_sat_u_h((v8u16) in0, sat_val); \
1565 in1 = (RTYPE) __msa_sat_u_h((v8u16) in1, sat_val); \
1567 #define SAT_UH2_UH(...) SAT_UH2(v8u16, __VA_ARGS__)
1568 #define SAT_UH2_SH(...) SAT_UH2(v8i16, __VA_ARGS__)
1570 #define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) \
1572 SAT_UH2(RTYPE, in0, in1, sat_val); \
1573 SAT_UH2(RTYPE, in2, in3, sat_val); \
1575 #define SAT_UH4_UH(...) SAT_UH4(v8u16, __VA_ARGS__)
1576 #define SAT_UH4_SH(...) SAT_UH4(v8i16, __VA_ARGS__)
1578 #define SAT_UH8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, sat_val) \
1580 SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val); \
1581 SAT_UH4(RTYPE, in4, in5, in6, in7, sat_val); \
1583 #define SAT_UH8_UH(...) SAT_UH8(v8u16, __VA_ARGS__)
1584 #define SAT_UH8_SH(...) SAT_UH8(v8i16, __VA_ARGS__)
1596 #define SAT_SH2(RTYPE, in0, in1, sat_val) \
1598 in0 = (RTYPE) __msa_sat_s_h((v8i16) in0, sat_val); \
1599 in1 = (RTYPE) __msa_sat_s_h((v8i16) in1, sat_val); \
1601 #define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__)
1603 #define SAT_SH3(RTYPE, in0, in1, in2, sat_val) \
1605 SAT_SH2(RTYPE, in0, in1, sat_val); \
1606 in2 = (RTYPE) __msa_sat_s_h((v8i16) in2, sat_val); \
1608 #define SAT_SH3_SH(...) SAT_SH3(v8i16, __VA_ARGS__)
1610 #define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \
1612 SAT_SH2(RTYPE, in0, in1, sat_val); \
1613 SAT_SH2(RTYPE, in2, in3, sat_val); \
1615 #define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__)
1627 #define SAT_SW2(RTYPE, in0, in1, sat_val) \
1629 in0 = (RTYPE) __msa_sat_s_w((v4i32) in0, sat_val); \
1630 in1 = (RTYPE) __msa_sat_s_w((v4i32) in1, sat_val); \
1632 #define SAT_SW2_SW(...) SAT_SW2(v4i32, __VA_ARGS__)
1634 #define SAT_SW4(RTYPE, in0, in1, in2, in3, sat_val) \
1636 SAT_SW2(RTYPE, in0, in1, sat_val); \
1637 SAT_SW2(RTYPE, in2, in3, sat_val); \
1639 #define SAT_SW4_SW(...) SAT_SW4(v4i32, __VA_ARGS__)
1650 #define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \
1652 out0 = (RTYPE) __msa_splati_h((v8i16) in, idx0); \
1653 out1 = (RTYPE) __msa_splati_h((v8i16) in, idx1); \
1655 #define SPLATI_H2_SB(...) SPLATI_H2(v16i8, __VA_ARGS__)
1656 #define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__)
1658 #define SPLATI_H3(RTYPE, in, idx0, idx1, idx2, \
1661 SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
1662 out2 = (RTYPE) __msa_splati_h((v8i16) in, idx2); \
1664 #define SPLATI_H3_SB(...) SPLATI_H3(v16i8, __VA_ARGS__)
1665 #define SPLATI_H3_SH(...) SPLATI_H3(v8i16, __VA_ARGS__)
1667 #define SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3, \
1668 out0, out1, out2, out3) \
1670 SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \
1671 SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3); \
1673 #define SPLATI_H4_SB(...) SPLATI_H4(v16i8, __VA_ARGS__)
1674 #define SPLATI_H4_SH(...) SPLATI_H4(v8i16, __VA_ARGS__)
1687 #define SPLATI_W2(RTYPE, in, stidx, out0, out1) \
1689 out0 = (RTYPE) __msa_splati_w((v4i32) in, stidx); \
1690 out1 = (RTYPE) __msa_splati_w((v4i32) in, (stidx+1)); \
1692 #define SPLATI_W2_SH(...) SPLATI_W2(v8i16, __VA_ARGS__)
1693 #define SPLATI_W2_SW(...) SPLATI_W2(v4i32, __VA_ARGS__)
1695 #define SPLATI_W4(RTYPE, in, out0, out1, out2, out3) \
1697 SPLATI_W2(RTYPE, in, 0, out0, out1); \
1698 SPLATI_W2(RTYPE, in, 2, out2, out3); \
1700 #define SPLATI_W4_SH(...) SPLATI_W4(v8i16, __VA_ARGS__)
1701 #define SPLATI_W4_SW(...) SPLATI_W4(v4i32, __VA_ARGS__)
1714 #define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
1716 out0 = (RTYPE) __msa_pckev_b((v16i8) in0, (v16i8) in1); \
1717 out1 = (RTYPE) __msa_pckev_b((v16i8) in2, (v16i8) in3); \
1719 #define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
1720 #define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
1721 #define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
1722 #define PCKEV_B2_SW(...) PCKEV_B2(v4i32, __VA_ARGS__)
1724 #define PCKEV_B3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \
1726 PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1727 out2 = (RTYPE) __msa_pckev_b((v16i8) in4, (v16i8) in5); \
1729 #define PCKEV_B3_UB(...) PCKEV_B3(v16u8, __VA_ARGS__)
1730 #define PCKEV_B3_SB(...) PCKEV_B3(v16i8, __VA_ARGS__)
1732 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1733 out0, out1, out2, out3) \
1735 PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
1736 PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1738 #define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
1739 #define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
1740 #define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
1741 #define PCKEV_B4_SW(...) PCKEV_B4(v4i32, __VA_ARGS__)
1754 #define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
1756 out0 = (RTYPE) __msa_pckev_h((v8i16) in0, (v8i16) in1); \
1757 out1 = (RTYPE) __msa_pckev_h((v8i16) in2, (v8i16) in3); \
1759 #define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
1760 #define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__)
1762 #define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1763 out0, out1, out2, out3) \
1765 PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
1766 PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
1768 #define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__)
1769 #define PCKEV_H4_SW(...) PCKEV_H4(v4i32, __VA_ARGS__)
1782 #define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1784 out0 = (RTYPE) __msa_pckev_d((v2i64) in0, (v2i64) in1); \
1785 out1 = (RTYPE) __msa_pckev_d((v2i64) in2, (v2i64) in3); \
1787 #define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__)
1788 #define PCKEV_D2_SB(...) PCKEV_D2(v16i8, __VA_ARGS__)
1789 #define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__)
1791 #define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1792 out0, out1, out2, out3) \
1794 PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
1795 PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
1797 #define PCKEV_D4_UB(...) PCKEV_D4(v16u8, __VA_ARGS__)
1808 #define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
1810 out0 = (RTYPE) __msa_pckod_d((v2i64) in0, (v2i64) in1); \
1811 out1 = (RTYPE) __msa_pckod_d((v2i64) in2, (v2i64) in3); \
1813 #define PCKOD_D2_UB(...) PCKOD_D2(v16u8, __VA_ARGS__)
1814 #define PCKOD_D2_SH(...) PCKOD_D2(v8i16, __VA_ARGS__)
1815 #define PCKOD_D2_SD(...) PCKOD_D2(v2i64, __VA_ARGS__)
1829 #define XORI_B2_128(RTYPE, in0, in1) \
1831 in0 = (RTYPE) __msa_xori_b((v16u8) in0, 128); \
1832 in1 = (RTYPE) __msa_xori_b((v16u8) in1, 128); \
1834 #define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__)
1835 #define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__)
1836 #define XORI_B2_128_SH(...) XORI_B2_128(v8i16, __VA_ARGS__)
1838 #define XORI_B3_128(RTYPE, in0, in1, in2) \
1840 XORI_B2_128(RTYPE, in0, in1); \
1841 in2 = (RTYPE) __msa_xori_b((v16u8) in2, 128); \
1843 #define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__)
1845 #define XORI_B4_128(RTYPE, in0, in1, in2, in3) \
1847 XORI_B2_128(RTYPE, in0, in1); \
1848 XORI_B2_128(RTYPE, in2, in3); \
1850 #define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__)
1851 #define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__)
1852 #define XORI_B4_128_SH(...) XORI_B4_128(v8i16, __VA_ARGS__)
1854 #define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \
1856 XORI_B3_128(RTYPE, in0, in1, in2); \
1857 XORI_B2_128(RTYPE, in3, in4); \
1859 #define XORI_B5_128_SB(...) XORI_B5_128(v16i8, __VA_ARGS__)
1861 #define XORI_B6_128(RTYPE, in0, in1, in2, in3, in4, in5) \
1863 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1864 XORI_B2_128(RTYPE, in4, in5); \
1866 #define XORI_B6_128_SB(...) XORI_B6_128(v16i8, __VA_ARGS__)
1868 #define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) \
1870 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1871 XORI_B3_128(RTYPE, in4, in5, in6); \
1873 #define XORI_B7_128_SB(...) XORI_B7_128(v16i8, __VA_ARGS__)
1875 #define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \
1877 XORI_B4_128(RTYPE, in0, in1, in2, in3); \
1878 XORI_B4_128(RTYPE, in4, in5, in6, in7); \
1880 #define XORI_B8_128_SB(...) XORI_B8_128(v16i8, __VA_ARGS__)
1881 #define XORI_B8_128_UB(...) XORI_B8_128(v16u8, __VA_ARGS__)
1892 #define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1) \
1894 out0 = (RTYPE) __msa_adds_s_h((v8i16) in0, (v8i16) in1); \
1895 out1 = (RTYPE) __msa_adds_s_h((v8i16) in2, (v8i16) in3); \
1897 #define ADDS_SH2_SH(...) ADDS_SH2(v8i16, __VA_ARGS__)
1899 #define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
1900 out0, out1, out2, out3) \
1902 ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1); \
1903 ADDS_SH2(RTYPE, in4, in5, in6, in7, out2, out3); \
1905 #define ADDS_SH4_UH(...) ADDS_SH4(v8u16, __VA_ARGS__)
1906 #define ADDS_SH4_SH(...) ADDS_SH4(v8i16, __VA_ARGS__)
1916 #define SLLI_2V(in0, in1, shift) \
1918 in0 = in0 << shift; \
1919 in1 = in1 << shift; \
1921 #define SLLI_4V(in0, in1, in2, in3, shift) \
1923 in0 = in0 << shift; \
1924 in1 = in1 << shift; \
1925 in2 = in2 << shift; \
1926 in3 = in3 << shift; \
1939 #define SRA_4V(in0, in1, in2, in3, shift) \
1941 in0 = in0 >> shift; \
1942 in1 = in1 >> shift; \
1943 in2 = in2 >> shift; \
1944 in3 = in3 >> shift; \
1957 #define SRL_H4(RTYPE, in0, in1, in2, in3, shift) \
1959 in0 = (RTYPE) __msa_srl_h((v8i16) in0, (v8i16) shift); \
1960 in1 = (RTYPE) __msa_srl_h((v8i16) in1, (v8i16) shift); \
1961 in2 = (RTYPE) __msa_srl_h((v8i16) in2, (v8i16) shift); \
1962 in3 = (RTYPE) __msa_srl_h((v8i16) in3, (v8i16) shift); \
1964 #define SRL_H4_UH(...) SRL_H4(v8u16, __VA_ARGS__)
1966 #define SRLR_H4(RTYPE, in0, in1, in2, in3, shift) \
1968 in0 = (RTYPE) __msa_srlr_h((v8i16) in0, (v8i16) shift); \
1969 in1 = (RTYPE) __msa_srlr_h((v8i16) in1, (v8i16) shift); \
1970 in2 = (RTYPE) __msa_srlr_h((v8i16) in2, (v8i16) shift); \
1971 in3 = (RTYPE) __msa_srlr_h((v8i16) in3, (v8i16) shift); \
1973 #define SRLR_H4_UH(...) SRLR_H4(v8u16, __VA_ARGS__)
1974 #define SRLR_H4_SH(...) SRLR_H4(v8i16, __VA_ARGS__)
1976 #define SRLR_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, shift) \
1978 SRLR_H4(RTYPE, in0, in1, in2, in3, shift); \
1979 SRLR_H4(RTYPE, in4, in5, in6, in7, shift); \
1981 #define SRLR_H8_UH(...) SRLR_H8(v8u16, __VA_ARGS__)
1982 #define SRLR_H8_SH(...) SRLR_H8(v8i16, __VA_ARGS__)
1995 #define SRAR_H2(RTYPE, in0, in1, shift) \
1997 in0 = (RTYPE) __msa_srar_h((v8i16) in0, (v8i16) shift); \
1998 in1 = (RTYPE) __msa_srar_h((v8i16) in1, (v8i16) shift); \
2000 #define SRAR_H2_UH(...) SRAR_H2(v8u16, __VA_ARGS__)
2001 #define SRAR_H2_SH(...) SRAR_H2(v8i16, __VA_ARGS__)
2003 #define SRAR_H3(RTYPE, in0, in1, in2, shift) \
2005 SRAR_H2(RTYPE, in0, in1, shift) \
2006 in2 = (RTYPE) __msa_srar_h((v8i16) in2, (v8i16) shift); \
2008 #define SRAR_H3_SH(...) SRAR_H3(v8i16, __VA_ARGS__)
2010 #define SRAR_H4(RTYPE, in0, in1, in2, in3, shift) \
2012 SRAR_H2(RTYPE, in0, in1, shift) \
2013 SRAR_H2(RTYPE, in2, in3, shift) \
2015 #define SRAR_H4_UH(...) SRAR_H4(v8u16, __VA_ARGS__)
2016 #define SRAR_H4_SH(...) SRAR_H4(v8i16, __VA_ARGS__)
2029 #define SRAR_W2(RTYPE, in0, in1, shift) \
2031 in0 = (RTYPE) __msa_srar_w((v4i32) in0, (v4i32) shift); \
2032 in1 = (RTYPE) __msa_srar_w((v4i32) in1, (v4i32) shift); \
2034 #define SRAR_W2_SW(...) SRAR_W2(v4i32, __VA_ARGS__)
2036 #define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \
2038 SRAR_W2(RTYPE, in0, in1, shift) \
2039 SRAR_W2(RTYPE, in2, in3, shift) \
2041 #define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__)
2053 #define SRARI_H2(RTYPE, in0, in1, shift) \
2055 in0 = (RTYPE) __msa_srari_h((v8i16) in0, shift); \
2056 in1 = (RTYPE) __msa_srari_h((v8i16) in1, shift); \
2058 #define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__)
2059 #define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__)
2061 #define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \
2063 SRARI_H2(RTYPE, in0, in1, shift); \
2064 SRARI_H2(RTYPE, in2, in3, shift); \
2066 #define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__)
2067 #define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__)
2079 #define SRARI_W2(RTYPE, in0, in1, shift) \
2081 in0 = (RTYPE) __msa_srari_w((v4i32) in0, shift); \
2082 in1 = (RTYPE) __msa_srari_w((v4i32) in1, shift); \
2084 #define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__)
2086 #define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \
2088 SRARI_W2(RTYPE, in0, in1, shift); \
2089 SRARI_W2(RTYPE, in2, in3, shift); \
2091 #define SRARI_W4_SH(...) SRARI_W4(v8i16, __VA_ARGS__)
2092 #define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
2101 #define MUL2(in0, in1, in2, in3, out0, out1) \
2106 #define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2108 MUL2(in0, in1, in2, in3, out0, out1); \
2109 MUL2(in4, in5, in6, in7, out2, out3); \
2118 #define ADD2(in0, in1, in2, in3, out0, out1) \
2123 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2125 ADD2(in0, in1, in2, in3, out0, out1); \
2126 ADD2(in4, in5, in6, in7, out2, out3); \
2135 #define SUB2(in0, in1, in2, in3, out0, out1) \
2140 #define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \
2156 #define UNPCK_R_SB_SH(in, out) \
2160 sign_m = __msa_clti_s_b((v16i8) in, 0); \
2161 out = (v8i16) __msa_ilvr_b(sign_m, (v16i8) in); \
2172 #define UNPCK_R_SH_SW(in, out) \
2176 sign_m = __msa_clti_s_h((v8i16) in, 0); \
2177 out = (v4i32) __msa_ilvr_h(sign_m, (v8i16) in); \
2191 #define UNPCK_SB_SH(in, out0, out1) \
2195 tmp_m = __msa_clti_s_b((v16i8) in, 0); \
2196 ILVRL_B2_SH(tmp_m, in, out0, out1); \
2206 #define UNPCK_UB_SH(in, out0, out1) \
2208 v16i8 zero_m = { 0 }; \
2210 ILVRL_B2_SH(zero_m, in, out0, out1); \
2224 #define UNPCK_SH_SW(in, out0, out1) \
2228 tmp_m = __msa_clti_s_h((v8i16) in, 0); \
2229 ILVRL_H2_SW(tmp_m, in, out0, out1); \
2237 #define SWAP(in0, in1) \
2249 #define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \
2263 #define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \
2264 out0, out1, out2, out3, out4, out5, out6, out7) \
2282 #define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, \
2283 in8, in9, in10, in11, in12, in13, in14, in15, \
2284 out0, out1, out2, out3, out4, out5, out6, out7, \
2285 out8, out9, out10, out11, out12, out13, out14, out15) \
2287 out0 = in0 + in15; \
2288 out1 = in1 + in14; \
2289 out2 = in2 + in13; \
2290 out3 = in3 + in12; \
2291 out4 = in4 + in11; \
2292 out5 = in5 + in10; \
2298 out10 = in5 - in10; \
2299 out11 = in4 - in11; \
2300 out12 = in3 - in12; \
2301 out13 = in2 - in13; \
2302 out14 = in1 - in14; \
2303 out15 = in0 - in15; \
2312 #define TRANSPOSE4x4_UB_UB(in0, in1, in2, in3, out0, out1, out2, out3) \
2314 v16i8 zero_m = { 0 }; \
2315 v16i8 s0_m, s1_m, s2_m, s3_m; \
2317 ILVR_D2_SB(in1, in0, in3, in2, s0_m, s1_m); \
2318 ILVRL_B2_SB(s1_m, s0_m, s2_m, s3_m); \
2320 out0 = (v16u8) __msa_ilvr_b(s3_m, s2_m); \
2321 out1 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out0, 4); \
2322 out2 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out1, 4); \
2323 out3 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out2, 4); \
2332 #define TRANSPOSE8x4_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2333 out0, out1, out2, out3) \
2335 v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2337 ILVEV_W2_SB(in0, in4, in1, in5, tmp0_m, tmp1_m); \
2338 tmp2_m = __msa_ilvr_b(tmp1_m, tmp0_m); \
2339 ILVEV_W2_SB(in2, in6, in3, in7, tmp0_m, tmp1_m); \
2341 tmp3_m = __msa_ilvr_b(tmp1_m, tmp0_m); \
2342 ILVRL_H2_SB(tmp3_m, tmp2_m, tmp0_m, tmp1_m); \
2344 ILVRL_W2(RTYPE, tmp1_m, tmp0_m, out0, out2); \
2345 out1 = (RTYPE) __msa_ilvl_d((v2i64) out2, (v2i64) out0); \
2346 out3 = (RTYPE) __msa_ilvl_d((v2i64) out0, (v2i64) out2); \
2348 #define TRANSPOSE8x4_UB_UB(...) TRANSPOSE8x4_UB(v16u8, __VA_ARGS__)
2349 #define TRANSPOSE8x4_UB_UH(...) TRANSPOSE8x4_UB(v8u16, __VA_ARGS__)
2359 #define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2360 out0, out1, out2, out3, out4, out5, out6, out7) \
2362 v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2363 v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2364 v16i8 zeros = { 0 }; \
2366 ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, \
2367 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2368 ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m); \
2369 ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m); \
2370 ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2); \
2371 ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \
2372 SLDI_B4(RTYPE, zeros, out0, zeros, out2, zeros, out4, zeros, out6, \
2373 8, out1, out3, out5, out7); \
2375 #define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__)
2376 #define TRANSPOSE8x8_UB_UH(...) TRANSPOSE8x8_UB(v8u16, __VA_ARGS__)
2385 #define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2386 in8, in9, in10, in11, in12, in13, in14, in15, \
2387 out0, out1, out2, out3) \
2389 v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2391 ILVEV_W2_SD(in0, in4, in8, in12, tmp0_m, tmp1_m); \
2392 out1 = (v16u8) __msa_ilvev_d(tmp1_m, tmp0_m); \
2394 ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \
2395 out3 = (v16u8) __msa_ilvev_d(tmp1_m, tmp0_m); \
2397 ILVEV_W2_SD(in2, in6, in10, in14, tmp0_m, tmp1_m); \
2399 tmp2_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
2400 ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
2402 tmp3_m = __msa_ilvev_d(tmp1_m, tmp0_m); \
2403 ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
2404 out0 = (v16u8) __msa_ilvev_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2405 out2 = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2407 tmp0_m = (v2i64) __msa_ilvod_b((v16i8) out3, (v16i8) out1); \
2408 tmp1_m = (v2i64) __msa_ilvod_b((v16i8) tmp3_m, (v16i8) tmp2_m); \
2409 out1 = (v16u8) __msa_ilvev_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2410 out3 = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2420 #define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2421 in8, in9, in10, in11, in12, in13, in14, in15, \
2422 out0, out1, out2, out3, out4, out5, out6, out7) \
2424 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2425 v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2427 ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
2428 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
2429 ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \
2430 ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \
2432 tmp0_m = (v16u8) __msa_ilvev_b((v16i8) out6, (v16i8) out7); \
2433 tmp4_m = (v16u8) __msa_ilvod_b((v16i8) out6, (v16i8) out7); \
2434 tmp1_m = (v16u8) __msa_ilvev_b((v16i8) out4, (v16i8) out5); \
2435 tmp5_m = (v16u8) __msa_ilvod_b((v16i8) out4, (v16i8) out5); \
2436 out5 = (v16u8) __msa_ilvev_b((v16i8) out2, (v16i8) out3); \
2437 tmp6_m = (v16u8) __msa_ilvod_b((v16i8) out2, (v16i8) out3); \
2438 out7 = (v16u8) __msa_ilvev_b((v16i8) out0, (v16i8) out1); \
2439 tmp7_m = (v16u8) __msa_ilvod_b((v16i8) out0, (v16i8) out1); \
2441 ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
2442 out0 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2443 out4 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2445 tmp2_m = (v16u8) __msa_ilvod_h((v8i16) tmp1_m, (v8i16) tmp0_m); \
2446 tmp3_m = (v16u8) __msa_ilvod_h((v8i16) out7, (v8i16) out5); \
2447 out2 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2448 out6 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2450 ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
2451 out1 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2452 out5 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2454 tmp2_m = (v16u8) __msa_ilvod_h((v8i16) tmp5_m, (v8i16) tmp4_m); \
2455 tmp3_m = (v16u8) __msa_ilvod_h((v8i16) tmp7_m, (v8i16) tmp6_m); \
2456 out3 = (v16u8) __msa_ilvev_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2457 out7 = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m); \
2466 #define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
2470 ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \
2471 ILVRL_W2_SH(s1_m, s0_m, out0, out2); \
2472 out1 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out0); \
2473 out3 = (v8i16) __msa_ilvl_d((v2i64) out0, (v2i64) out2); \
2482 #define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
2483 out0, out1, out2, out3, out4, out5, out6, out7) \
2486 v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2487 v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
2489 ILVR_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \
2490 ILVRL_H2_SH(s1_m, s0_m, tmp0_m, tmp1_m); \
2491 ILVL_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \
2492 ILVRL_H2_SH(s1_m, s0_m, tmp2_m, tmp3_m); \
2493 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2494 ILVRL_H2_SH(s1_m, s0_m, tmp4_m, tmp5_m); \
2495 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \
2496 ILVRL_H2_SH(s1_m, s0_m, tmp6_m, tmp7_m); \
2497 PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, \
2498 tmp3_m, tmp7_m, out0, out2, out4, out6); \
2499 out1 = (RTYPE) __msa_pckod_d((v2i64) tmp0_m, (v2i64) tmp4_m); \
2500 out3 = (RTYPE) __msa_pckod_d((v2i64) tmp1_m, (v2i64) tmp5_m); \
2501 out5 = (RTYPE) __msa_pckod_d((v2i64) tmp2_m, (v2i64) tmp6_m); \
2502 out7 = (RTYPE) __msa_pckod_d((v2i64) tmp3_m, (v2i64) tmp7_m); \
2504 #define TRANSPOSE8x8_UH_UH(...) TRANSPOSE8x8_H(v8u16, __VA_ARGS__)
2505 #define TRANSPOSE8x8_SH_SH(...) TRANSPOSE8x8_H(v8i16, __VA_ARGS__)
2513 #define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \
2515 v4i32 s0_m, s1_m, s2_m, s3_m; \
2517 ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
2518 ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
2520 out0 = (v4i32) __msa_ilvr_d((v2i64) s2_m, (v2i64) s0_m); \
2521 out1 = (v4i32) __msa_ilvl_d((v2i64) s2_m, (v2i64) s0_m); \
2522 out2 = (v4i32) __msa_ilvr_d((v2i64) s3_m, (v2i64) s1_m); \
2523 out3 = (v4i32) __msa_ilvl_d((v2i64) s3_m, (v2i64) s1_m); \
2540 #define AVE_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2542 uint64_t out0_m, out1_m, out2_m, out3_m; \
2543 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2545 tmp0_m = __msa_ave_u_b((v16u8) in0, (v16u8) in1); \
2546 tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2547 tmp2_m = __msa_ave_u_b((v16u8) in4, (v16u8) in5); \
2548 tmp3_m = __msa_ave_u_b((v16u8) in6, (v16u8) in7); \
2550 out0_m = __msa_copy_u_d((v2i64) tmp0_m, 0); \
2551 out1_m = __msa_copy_u_d((v2i64) tmp1_m, 0); \
2552 out2_m = __msa_copy_u_d((v2i64) tmp2_m, 0); \
2553 out3_m = __msa_copy_u_d((v2i64) tmp3_m, 0); \
2554 SD4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2571 #define AVE_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2573 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2575 tmp0_m = __msa_ave_u_b((v16u8) in0, (v16u8) in1); \
2576 tmp1_m = __msa_ave_u_b((v16u8) in2, (v16u8) in3); \
2577 tmp2_m = __msa_ave_u_b((v16u8) in4, (v16u8) in5); \
2578 tmp3_m = __msa_ave_u_b((v16u8) in6, (v16u8) in7); \
2580 ST_UB4(tmp0_m, tmp1_m, tmp2_m, tmp3_m, pdst, stride); \
2597 #define AVER_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2599 uint64_t out0_m, out1_m, out2_m, out3_m; \
2600 v16u8 tp0_m, tp1_m, tp2_m, tp3_m; \
2602 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2603 tp0_m, tp1_m, tp2_m, tp3_m); \
2605 out0_m = __msa_copy_u_d((v2i64) tp0_m, 0); \
2606 out1_m = __msa_copy_u_d((v2i64) tp1_m, 0); \
2607 out2_m = __msa_copy_u_d((v2i64) tp2_m, 0); \
2608 out3_m = __msa_copy_u_d((v2i64) tp3_m, 0); \
2609 SD4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2626 #define AVER_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \
2628 v16u8 t0_m, t1_m, t2_m, t3_m; \
2630 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2631 t0_m, t1_m, t2_m, t3_m); \
2632 ST_UB4(t0_m, t1_m, t2_m, t3_m, pdst, stride); \
2650 #define AVER_DST_ST8x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2653 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2654 v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
2656 LD_UB4(pdst, stride, dst0_m, dst1_m, dst2_m, dst3_m); \
2657 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2658 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2659 AVER_ST8x4_UB(dst0_m, tmp0_m, dst1_m, tmp1_m, \
2660 dst2_m, tmp2_m, dst3_m, tmp3_m, pdst, stride); \
2678 #define AVER_DST_ST16x4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2681 v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
2682 v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
2684 LD_UB4(pdst, stride, dst0_m, dst1_m, dst2_m, dst3_m); \
2685 AVER_UB4_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
2686 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
2687 AVER_ST16x4_UB(dst0_m, tmp0_m, dst1_m, tmp1_m, \
2688 dst2_m, tmp2_m, dst3_m, tmp3_m, pdst, stride); \
2696 #define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
2698 uint32_t src0_m, src1_m, src2_m, src3_m; \
2699 uint32_t out0_m, out1_m, out2_m, out3_m; \
2700 v8i16 inp0_m, inp1_m, res0_m, res1_m; \
2701 v16i8 dst0_m = { 0 }; \
2702 v16i8 dst1_m = { 0 }; \
2703 v16i8 zero_m = { 0 }; \
2705 ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m) \
2706 LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \
2707 INSERT_W2_SB(src0_m, src1_m, dst0_m); \
2708 INSERT_W2_SB(src2_m, src3_m, dst1_m); \
2709 ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \
2710 ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \
2711 CLIP_SH2_0_255(res0_m, res1_m); \
2712 PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
2714 out0_m = __msa_copy_u_w((v4i32) dst0_m, 0); \
2715 out1_m = __msa_copy_u_w((v4i32) dst0_m, 1); \
2716 out2_m = __msa_copy_u_w((v4i32) dst1_m, 0); \
2717 out3_m = __msa_copy_u_w((v4i32) dst1_m, 1); \
2718 SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2732 #define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \
2736 out0_m = __msa_dotp_s_h((v16i8) in0, (v16i8) coeff0); \
2737 out0_m = __msa_dpadd_s_h(out0_m, (v16i8) in1, (v16i8) coeff1); \
2738 out0_m = __msa_dpadd_s_h(out0_m, (v16i8) in2, (v16i8) coeff2); \
2751 #define PCKEV_XORI128_UB(in0, in1) \
2754 out_m = (v16u8) __msa_pckev_b((v16i8) in1, (v16i8) in0); \
2755 out_m = (v16u8) __msa_xori_b((v16u8) out_m, 128); \
2763 #define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3, \
2764 dst0, dst1, pdst, stride) \
2766 v16u8 tmp0_m, tmp1_m; \
2767 uint8_t *pdst_m = (uint8_t *) (pdst); \
2769 tmp0_m = PCKEV_XORI128_UB(in0, in1); \
2770 tmp1_m = PCKEV_XORI128_UB(in2, in3); \
2771 AVER_UB2_UB(tmp0_m, dst0, tmp1_m, dst1, tmp0_m, tmp1_m); \
2772 ST_D4(tmp0_m, tmp1_m, 0, 1, 0, 1, pdst_m, stride); \
2780 #define PCKEV_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \
2782 uint32_t out0_m, out1_m, out2_m, out3_m; \
2783 v16i8 tmp0_m, tmp1_m; \
2785 PCKEV_B2_SB(in1, in0, in3, in2, tmp0_m, tmp1_m); \
2787 out0_m = __msa_copy_u_w((v4i32) tmp0_m, 0); \
2788 out1_m = __msa_copy_u_w((v4i32) tmp0_m, 2); \
2789 out2_m = __msa_copy_u_w((v4i32) tmp1_m, 0); \
2790 out3_m = __msa_copy_u_w((v4i32) tmp1_m, 2); \
2792 SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
2799 #define PCKEV_ST_SB(in0, in1, pdst) \
2802 tmp_m = __msa_pckev_b((v16i8) in1, (v16i8) in0); \
2803 ST_SB(tmp_m, (pdst)); \
2809 #define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) \
2814 tmp0_m = __msa_vshf_b((v16i8) mask, (v16i8) in1, (v16i8) in0); \
2815 tmp1_m = __msa_dotp_u_h((v16u8) tmp0_m, (v16u8) coeff); \
2816 tmp1_m = (v8u16) __msa_srari_h((v8i16) tmp1_m, shift); \
2817 tmp1_m = __msa_sat_u_h(tmp1_m, shift); \