28 uint32_t tp0, tp1, offset_val;
31 v8i16 src0_r, tmp0, wgt, denom,
offset;
33 offset_val = (unsigned) offset_in << log2_denom;
35 wgt = __msa_fill_h(src_weight);
36 offset = __msa_fill_h(offset_val);
37 denom = __msa_fill_h(log2_denom);
39 LW2(data, stride, tp0, tp1);
41 src0_r = (v8i16) __msa_ilvr_b((v16i8)
zero, (v16i8) src0);
43 tmp0 = __msa_adds_s_h(tmp0, offset);
44 tmp0 = __msa_maxi_s_h(tmp0, 0);
45 tmp0 = __msa_srlr_h(tmp0, denom);
46 tmp0 = (v8i16) __msa_sat_u_h((v8u16) tmp0, 7);
47 src0 = (v16u8) __msa_pckev_b((v16i8) tmp0, (v16i8) tmp0);
54 uint32_t tp0, tp1, tp2, tp3, offset_val;
56 v8i16 src0_r, src1_r, tmp0, tmp1, wgt, denom,
offset;
58 offset_val = (unsigned) offset_in << log2_denom;
60 wgt = __msa_fill_h(src_weight);
61 offset = __msa_fill_h(offset_val);
62 denom = __msa_fill_h(log2_denom);
64 LW4(data, stride, tp0, tp1, tp2, tp3);
67 MUL2(wgt, src0_r, wgt, src1_r, tmp0, tmp1);
68 ADDS_SH2_SH(tmp0, offset, tmp1, offset, tmp0, tmp1);
70 tmp0 = __msa_srlr_h(tmp0, denom);
71 tmp1 = __msa_srlr_h(tmp1, denom);
73 src0 = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
74 ST4x4_UB(src0, src0, 0, 1, 2, 3, data, stride);
80 uint32_t tp0, tp1, tp2, tp3, offset_val;
82 v8i16 src0_r, src1_r, src2_r, src3_r, tmp0, tmp1, tmp2, tmp3;
85 offset_val = (unsigned) offset_in << log2_denom;
87 wgt = __msa_fill_h(src_weight);
88 offset = __msa_fill_h(offset_val);
89 denom = __msa_fill_h(log2_denom);
91 LW4(data, stride, tp0, tp1, tp2, tp3);
93 LW4(data + 4 * stride, stride, tp0, tp1, tp2, tp3);
97 MUL4(wgt, src0_r, wgt, src1_r, wgt, src2_r, wgt, src3_r, tmp0, tmp1, tmp2,
99 ADDS_SH4_SH(tmp0, offset, tmp1, offset, tmp2, offset, tmp3, offset, tmp0,
112 uint64_t tp0, tp1, tp2, tp3;
114 v8i16 src0_r, src1_r, src2_r, src3_r, tmp0, tmp1, tmp2, tmp3;
117 offset_val = (unsigned) offset_in << log2_denom;
119 wgt = __msa_fill_h(src_weight);
120 offset = __msa_fill_h(offset_val);
121 denom = __msa_fill_h(log2_denom);
123 LD4(data, stride, tp0, tp1, tp2, tp3);
128 MUL4(wgt, src0_r, wgt, src1_r, wgt, src2_r, wgt, src3_r, tmp0, tmp1, tmp2,
130 ADDS_SH4_SH(tmp0, offset, tmp1, offset, tmp2, offset, tmp3, offset, tmp0,
143 uint64_t tp0, tp1, tp2, tp3;
144 v16u8
src0 = { 0 },
src1 = { 0 }, src2 = { 0 }, src3 = { 0 };
145 v8i16 src0_r, src1_r, src2_r, src3_r, src4_r, src5_r, src6_r, src7_r;
146 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
149 offset_val = (unsigned) offset_in << log2_denom;
151 wgt = __msa_fill_h(src_weight);
152 offset = __msa_fill_h(offset_val);
153 denom = __msa_fill_h(log2_denom);
155 LD4(data, stride, tp0, tp1, tp2, tp3);
158 LD4(data + 4 * stride, stride, tp0, tp1, tp2, tp3);
165 MUL4(wgt, src0_r, wgt, src1_r, wgt, src2_r, wgt, src3_r, tmp0, tmp1, tmp2,
167 MUL4(wgt, src4_r, wgt, src5_r, wgt, src6_r, wgt, src7_r, tmp4, tmp5, tmp6,
169 ADDS_SH4_SH(tmp0, offset, tmp1, offset, tmp2, offset, tmp3, offset, tmp0,
171 ADDS_SH4_SH(tmp4, offset, tmp5, offset, tmp6, offset, tmp7, offset, tmp4,
173 MAXI_SH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 0);
174 SRLR_H8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom);
175 SAT_UH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 7);
176 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, src0,
src1,
184 uint32_t offset_val, cnt;
185 uint64_t tp0, tp1, tp2, tp3;
186 v16u8
src0 = { 0 },
src1 = { 0 }, src2 = { 0 }, src3 = { 0 };
187 v8i16 src0_r, src1_r, src2_r, src3_r, src4_r, src5_r, src6_r, src7_r;
188 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
191 offset_val = (unsigned) offset_in << log2_denom;
193 wgt = __msa_fill_h(src_weight);
194 offset = __msa_fill_h(offset_val);
195 denom = __msa_fill_h(log2_denom);
197 for (cnt = 2; cnt--;) {
198 LD4(data, stride, tp0, tp1, tp2, tp3);
201 LD4(data + 4 * stride, stride, tp0, tp1, tp2, tp3);
208 MUL4(wgt, src0_r, wgt, src1_r, wgt, src2_r, wgt, src3_r, tmp0, tmp1,
210 MUL4(wgt, src4_r, wgt, src5_r, wgt, src6_r, wgt, src7_r, tmp4, tmp5,
212 ADDS_SH4_SH(tmp0, offset, tmp1, offset, tmp2, offset, tmp3, offset,
213 tmp0, tmp1, tmp2, tmp3);
214 ADDS_SH4_SH(tmp4, offset, tmp5, offset, tmp6, offset, tmp7, offset,
215 tmp4, tmp5, tmp6, tmp7);
216 MAXI_SH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 0);
217 SRLR_H8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom);
218 SAT_UH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 7);
219 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, src0,
src1,
231 v16i8 src_wgt, dst_wgt, wgt, vec0;
232 v16u8
src0 = { 0 }, dst0 = { 0 };
233 v8i16 tmp0, denom,
offset, max255 = __msa_ldi_h(255);
235 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
236 offset_in += (128 * (src_weight + dst_weight));
238 src_wgt = __msa_fill_b(src_weight);
239 dst_wgt = __msa_fill_b(dst_weight);
240 offset = __msa_fill_h(offset_in);
241 denom = __msa_fill_h(log2_denom + 1);
243 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
245 LW2(src, stride, tp0, tp1);
247 LW2(dst, stride, tp0, tp1);
250 vec0 = (v16i8) __msa_ilvr_b((v16i8) dst0, (v16i8) src0);
251 tmp0 = __msa_dpadd_s_h(offset, wgt, vec0);
253 tmp0 = __msa_maxi_s_h(tmp0, 0);
254 tmp0 = __msa_min_s_h(max255, tmp0);
255 dst0 = (v16u8) __msa_pckev_b((v16i8) tmp0, (v16i8) tmp0);
263 uint32_t tp0, tp1, tp2, tp3;
264 v16i8 src_wgt, dst_wgt, wgt, vec0, vec1;
266 v8i16 tmp0, tmp1, denom,
offset;
268 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
269 offset_in += (128 * (src_weight + dst_weight));
271 src_wgt = __msa_fill_b(src_weight);
272 dst_wgt = __msa_fill_b(dst_weight);
273 offset = __msa_fill_h(offset_in);
274 denom = __msa_fill_h(log2_denom + 1);
276 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
278 LW4(src, stride, tp0, tp1, tp2, tp3);
280 LW4(dst, stride, tp0, tp1, tp2, tp3);
284 tmp0 = __msa_dpadd_s_h(offset, wgt, vec0);
285 tmp1 = __msa_dpadd_s_h(offset, wgt, vec1);
289 dst0 = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
290 ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dst, stride);
297 uint32_t tp0, tp1, tp2, tp3;
298 v16i8 src_wgt, dst_wgt, wgt, vec0, vec1, vec2, vec3;
300 v8i16 tmp0, tmp1, tmp2, tmp3, denom,
offset;
302 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
303 offset_in += (128 * (src_weight + dst_weight));
305 src_wgt = __msa_fill_b(src_weight);
306 dst_wgt = __msa_fill_b(dst_weight);
307 offset = __msa_fill_h(offset_in);
308 denom = __msa_fill_h(log2_denom + 1);
309 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
311 LW4(src, stride, tp0, tp1, tp2, tp3);
314 LW4(src, stride, tp0, tp1, tp2, tp3);
316 LW4(dst, stride, tp0, tp1, tp2, tp3);
318 LW4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
323 tmp0 = __msa_dpadd_s_h(offset, wgt, vec0);
324 tmp1 = __msa_dpadd_s_h(offset, wgt, vec1);
325 tmp2 = __msa_dpadd_s_h(offset, wgt, vec2);
326 tmp3 = __msa_dpadd_s_h(offset, wgt, vec3);
327 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
337 uint64_t tp0, tp1, tp2, tp3;
338 v16i8 src_wgt, dst_wgt, wgt, vec0, vec1, vec2, vec3;
340 v8i16 tmp0, tmp1, tmp2, tmp3, denom,
offset;
342 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
343 offset_in += (128 * (src_weight + dst_weight));
345 src_wgt = __msa_fill_b(src_weight);
346 dst_wgt = __msa_fill_b(dst_weight);
347 offset = __msa_fill_h(offset_in);
348 denom = __msa_fill_h(log2_denom + 1);
350 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
352 LD4(src, stride, tp0, tp1, tp2, tp3);
355 LD4(dst, stride, tp0, tp1, tp2, tp3);
361 tmp0 = __msa_dpadd_s_h(offset, wgt, vec0);
362 tmp1 = __msa_dpadd_s_h(offset, wgt, vec1);
363 tmp2 = __msa_dpadd_s_h(offset, wgt, vec2);
364 tmp3 = __msa_dpadd_s_h(offset, wgt, vec3);
365 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
375 uint64_t tp0, tp1, tp2, tp3;
376 v16i8 src_wgt, dst_wgt, wgt, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
377 v16u8
src0,
src1, src2, src3, dst0, dst1, dst2, dst3;
378 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom,
offset;
380 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
381 offset_in += (128 * (src_weight + dst_weight));
383 src_wgt = __msa_fill_b(src_weight);
384 dst_wgt = __msa_fill_b(dst_weight);
385 offset = __msa_fill_h(offset_in);
386 denom = __msa_fill_h(log2_denom + 1);
387 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
389 LD4(src, stride, tp0, tp1, tp2, tp3);
392 LD4(src + 4 * stride, stride, tp0, tp1, tp2, tp3);
395 LD4(dst, stride, tp0, tp1, tp2, tp3);
398 LD4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
406 tmp0 = __msa_dpadd_s_h(offset, wgt, vec0);
407 tmp1 = __msa_dpadd_s_h(offset, wgt, vec1);
408 tmp2 = __msa_dpadd_s_h(offset, wgt, vec2);
409 tmp3 = __msa_dpadd_s_h(offset, wgt, vec3);
410 tmp4 = __msa_dpadd_s_h(offset, wgt, vec4);
411 tmp5 = __msa_dpadd_s_h(offset, wgt, vec5);
412 tmp6 = __msa_dpadd_s_h(offset, wgt, vec6);
413 tmp7 = __msa_dpadd_s_h(offset, wgt, vec7);
414 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
415 SRA_4V(tmp4, tmp5, tmp6, tmp7, denom);
420 ST8x8_UB(dst0, dst1, dst2, dst3, dst, stride);
428 uint64_t tp0, tp1, tp2, tp3;
429 v16i8 src_wgt, dst_wgt, wgt;
431 v16u8 dst0, dst1, dst2, dst3;
432 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
433 v8i16 temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
436 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
437 offset_in += (128 * (src_weight + dst_weight));
439 src_wgt = __msa_fill_b(src_weight);
440 dst_wgt = __msa_fill_b(dst_weight);
441 offset = __msa_fill_h(offset_in);
442 denom = __msa_fill_h(log2_denom + 1);
443 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
445 for (cnt = 2; cnt--;) {
446 LD4(src, stride, tp0, tp1, tp2, tp3);
450 LD4(src, stride, tp0, tp1, tp2, tp3);
454 LD4(dst, stride, tp0, tp1, tp2, tp3);
457 LD4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
462 ILVR_B4_SB(dst0, src0, dst1, src1, dst2, src2, dst3, src3,
463 vec0, vec2, vec4, vec6);
464 ILVL_B4_SB(dst0, src0, dst1, src1, dst2, src2, dst3, src3,
465 vec1, vec3, vec5, vec7);
467 temp0 = __msa_dpadd_s_h(offset, wgt, vec0);
468 temp1 = __msa_dpadd_s_h(offset, wgt, vec1);
469 temp2 = __msa_dpadd_s_h(offset, wgt, vec2);
470 temp3 = __msa_dpadd_s_h(offset, wgt, vec3);
471 temp4 = __msa_dpadd_s_h(offset, wgt, vec4);
472 temp5 = __msa_dpadd_s_h(offset, wgt, vec5);
473 temp6 = __msa_dpadd_s_h(offset, wgt, vec6);
474 temp7 = __msa_dpadd_s_h(offset, wgt, vec7);
476 SRA_4V(temp0, temp1, temp2, temp3, denom);
477 SRA_4V(temp4, temp5, temp6, temp7, denom);
480 PCKEV_B4_UB(temp1, temp0, temp3, temp2, temp5, temp4, temp7, temp6,
481 dst0, dst1, dst2, dst3);
482 ST8x8_UB(dst0, dst1, dst2, dst3, dst, stride);
487 #define AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_or_q3_org_in, p0_or_q0_org_in, \
488 q3_or_p3_org_in, p1_or_q1_org_in, \
489 p2_or_q2_org_in, q1_or_p1_org_in, \
490 p0_or_q0_out, p1_or_q1_out, p2_or_q2_out) \
493 v8i16 const3 = __msa_ldi_h(3); \
495 threshold = (p0_or_q0_org_in) + (q3_or_p3_org_in); \
496 threshold += (p1_or_q1_org_in); \
498 (p0_or_q0_out) = threshold << 1; \
499 (p0_or_q0_out) += (p2_or_q2_org_in); \
500 (p0_or_q0_out) += (q1_or_p1_org_in); \
501 (p0_or_q0_out) = __msa_srari_h((p0_or_q0_out), 3); \
503 (p1_or_q1_out) = (p2_or_q2_org_in) + threshold; \
504 (p1_or_q1_out) = __msa_srari_h((p1_or_q1_out), 2); \
506 (p2_or_q2_out) = (p2_or_q2_org_in) * const3; \
507 (p2_or_q2_out) += (p3_or_q3_org_in); \
508 (p2_or_q2_out) += (p3_or_q3_org_in); \
509 (p2_or_q2_out) += threshold; \
510 (p2_or_q2_out) = __msa_srari_h((p2_or_q2_out), 3); \
514 #define AVC_LPF_P0_OR_Q0(p0_or_q0_org_in, q1_or_p1_org_in, \
515 p1_or_q1_org_in, p0_or_q0_out) \
517 (p0_or_q0_out) = (p0_or_q0_org_in) + (q1_or_p1_org_in); \
518 (p0_or_q0_out) += (p1_or_q1_org_in); \
519 (p0_or_q0_out) += (p1_or_q1_org_in); \
520 (p0_or_q0_out) = __msa_srari_h((p0_or_q0_out), 2); \
523 #define AVC_LPF_P1_OR_Q1(p0_or_q0_org_in, q0_or_p0_org_in, \
524 p1_or_q1_org_in, p2_or_q2_org_in, \
525 negate_tc_in, tc_in, p1_or_q1_out) \
529 clip3 = (v8i16) __msa_aver_u_h((v8u16) p0_or_q0_org_in, \
530 (v8u16) q0_or_p0_org_in); \
531 temp = p1_or_q1_org_in << 1; \
532 clip3 = clip3 - temp; \
533 clip3 = __msa_ave_s_h(p2_or_q2_org_in, clip3); \
534 clip3 = CLIP_SH(clip3, negate_tc_in, tc_in); \
535 p1_or_q1_out = p1_or_q1_org_in + clip3; \
538 #define AVC_LPF_P0Q0(q0_or_p0_org_in, p0_or_q0_org_in, \
539 p1_or_q1_org_in, q1_or_p1_org_in, \
540 negate_threshold_in, threshold_in, \
541 p0_or_q0_out, q0_or_p0_out) \
543 v8i16 q0_sub_p0, p1_sub_q1, delta; \
545 q0_sub_p0 = q0_or_p0_org_in - p0_or_q0_org_in; \
546 p1_sub_q1 = p1_or_q1_org_in - q1_or_p1_org_in; \
549 delta = q0_sub_p0 + p1_sub_q1; \
552 delta = CLIP_SH(delta, negate_threshold_in, threshold_in); \
554 p0_or_q0_out = p0_or_q0_org_in + delta; \
555 q0_or_p0_out = q0_or_p0_org_in - delta; \
557 CLIP_SH2_0_255(p0_or_q0_out, q0_or_p0_out); \
560 #define AVC_LPF_H_CHROMA_422(src, stride, tc_val, alpha, beta, res) \
562 uint32_t load0, load1, load2, load3; \
563 v16u8 src0 = { 0 }; \
564 v16u8 src1 = { 0 }; \
565 v16u8 src2 = { 0 }; \
566 v16u8 src3 = { 0 }; \
567 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0; \
568 v16u8 is_less_than, is_less_than_alpha, is_less_than_beta; \
569 v8i16 tc, q0_sub_p0, p1_sub_q1, delta; \
570 v8i16 res0_r, res1_r; \
571 v16i8 zeros = { 0 }; \
574 LW4((src - 2), stride, load0, load1, load2, load3); \
575 src0 = (v16u8) __msa_insert_w((v4i32) src0, 0, load0); \
576 src1 = (v16u8) __msa_insert_w((v4i32) src1, 0, load1); \
577 src2 = (v16u8) __msa_insert_w((v4i32) src2, 0, load2); \
578 src3 = (v16u8) __msa_insert_w((v4i32) src3, 0, load3); \
580 TRANSPOSE4x4_UB_UB(src0, src1, src2, src3, src0, src1, src2, src3); \
582 p0_asub_q0 = __msa_asub_u_b(src2, src1); \
583 p1_asub_p0 = __msa_asub_u_b(src1, src0); \
584 q1_asub_q0 = __msa_asub_u_b(src2, src3); \
586 tc = __msa_fill_h(tc_val); \
588 is_less_than_alpha = (p0_asub_q0 < alpha); \
589 is_less_than_beta = (p1_asub_p0 < beta); \
590 is_less_than = is_less_than_alpha & is_less_than_beta; \
591 is_less_than_beta = (q1_asub_q0 < beta); \
592 is_less_than = is_less_than_beta & is_less_than; \
594 ILVR_B2_SH(src2, src1, src0, src3, q0_sub_p0, p1_sub_q1); \
595 HSUB_UB2_SH(q0_sub_p0, p1_sub_q1, q0_sub_p0, p1_sub_q1); \
598 delta = q0_sub_p0 + p1_sub_q1; \
599 delta = __msa_srari_h(delta, 3); \
601 delta = CLIP_SH(delta, -tc, tc); \
603 ILVR_B2_SH(zeros, src1, zeros, src2, res0_r, res1_r); \
608 CLIP_SH2_0_255(res0_r, res1_r); \
609 PCKEV_B2_UB(res0_r, res0_r, res1_r, res1_r, res0, res1); \
611 res0 = __msa_bmnz_v(src1, res0, is_less_than); \
612 res1 = __msa_bmnz_v(src2, res1, is_less_than); \
614 res = (v16u8) __msa_ilvr_b((v16i8) res1, (v16i8) res0); \
617 #define TRANSPOSE2x4_B_UB(in0, in1, out0, out1, out2, out3) \
619 v16i8 zero_m = { 0 }; \
621 out0 = (v16u8) __msa_ilvr_b((v16i8) in1, (v16i8) in0); \
622 out1 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out0, 2); \
623 SLDI_B2_0_UB(out1, out2, out2, out3, 2); \
626 #define AVC_LPF_H_2BYTE_CHROMA_422(src, stride, tc_val, alpha, beta, res) \
628 uint32_t load0, load1; \
629 v16u8 src0 = { 0 }; \
630 v16u8 src1 = { 0 }; \
631 v16u8 src2 = { 0 }; \
632 v16u8 src3 = { 0 }; \
633 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0; \
634 v16u8 is_less_than, is_less_than_alpha, is_less_than_beta; \
635 v8i16 tc, q0_sub_p0, p1_sub_q1, delta, res0_r, res1_r; \
636 v16i8 zeros = { 0 }; \
639 load0 = LW(src - 2); \
640 load1 = LW(src - 2 + stride); \
642 src0 = (v16u8) __msa_insert_w((v4i32) src0, 0, load0); \
643 src1 = (v16u8) __msa_insert_w((v4i32) src1, 0, load1); \
645 TRANSPOSE2x4_B_UB(src0, src1, src0, src1, src2, src3); \
647 p0_asub_q0 = __msa_asub_u_b(src2, src1); \
648 p1_asub_p0 = __msa_asub_u_b(src1, src0); \
649 q1_asub_q0 = __msa_asub_u_b(src2, src3); \
651 tc = __msa_fill_h(tc_val); \
653 is_less_than_alpha = (p0_asub_q0 < alpha); \
654 is_less_than_beta = (p1_asub_p0 < beta); \
655 is_less_than = is_less_than_alpha & is_less_than_beta; \
656 is_less_than_beta = (q1_asub_q0 < beta); \
657 is_less_than = is_less_than_beta & is_less_than; \
659 ILVR_B2_SH(src2, src1, src0, src3, q0_sub_p0, p1_sub_q1); \
660 HSUB_UB2_SH(q0_sub_p0, p1_sub_q1, q0_sub_p0, p1_sub_q1); \
663 delta = q0_sub_p0 + p1_sub_q1; \
664 delta = __msa_srari_h(delta, 3); \
665 delta = CLIP_SH(delta, -tc, tc); \
667 ILVR_B2_SH(zeros, src1, zeros, src2, res0_r, res1_r); \
672 CLIP_SH2_0_255(res0_r, res1_r); \
673 PCKEV_B2_UB(res0_r, res0_r, res1_r, res1_r, res0, res1); \
675 res0 = __msa_bmnz_v(src1, res0, is_less_than); \
676 res1 = __msa_bmnz_v(src2, res1, is_less_than); \
678 res = (v16u8) __msa_ilvr_b((v16i8) res1, (v16i8) res0); \
686 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
687 v16u8 is_less_than, is_less_than_beta, is_less_than_alpha;
688 v16u8 p1_org, p0_org, q0_org, q1_org;
690 LD_UB4(data - (img_width << 1), img_width, p1_org, p0_org, q0_org, q1_org);
692 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
693 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
694 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
696 is_less_than_alpha = (p0_asub_q0 < alpha_in);
697 is_less_than_beta = (p1_asub_p0 < beta_in);
698 is_less_than = is_less_than_beta & is_less_than_alpha;
699 is_less_than_beta = (q1_asub_q0 < beta_in);
700 is_less_than = is_less_than_beta & is_less_than;
702 if (!__msa_test_bz_v(is_less_than)) {
703 v16u8 p2_asub_p0, q2_asub_q0, p0,
q0, negate_is_less_than_beta;
709 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
710 v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l;
711 v16u8 q2_org =
LD_UB(data + (2 * img_width));
712 v16u8 p2_org =
LD_UB(data - (3 * img_width));
713 v16u8 tmp_flag = (v16u8)__msa_fill_b((alpha_in >> 2) + 2);
719 tmp_flag = (p0_asub_q0 < tmp_flag);
721 p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org);
722 is_less_than_beta = (p2_asub_p0 < beta_in);
723 is_less_than_beta = is_less_than_beta & tmp_flag;
724 negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff);
725 is_less_than_beta = is_less_than_beta & is_less_than;
726 negate_is_less_than_beta = negate_is_less_than_beta & is_less_than;
728 q1_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q1_org);
729 q1_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q1_org);
732 if (!__msa_test_bz_v(is_less_than_beta)) {
733 v8i16 p3_org_l, p3_org_r;
734 v16u8 p3_org =
LD_UB(data - (img_width << 2));
741 ILVR_B2_SH(zero, p3_org, zero, p2_org, p3_org_r, p2_r);
743 p2_r, q1_org_r, p0_r, p1_r, p2_r);
745 ILVL_B2_SH(zero, p3_org, zero, p2_org, p3_org_l, p2_l);
747 p2_l, q1_org_l, p0_l, p1_l, p2_l);
749 PCKEV_B3_UB(p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p0, p1, p2);
751 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than_beta);
752 p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta);
753 p2_org = __msa_bmnz_v(p2_org, p2, is_less_than_beta);
755 ST_UB(p1_org, data - (2 * img_width));
756 ST_UB(p2_org, data - (3 * img_width));
763 p0 = (v16u8) __msa_pckev_b((v16i8) p0_l, (v16i8) p0_r);
764 p0_org = __msa_bmnz_v(p0_org, p0, negate_is_less_than_beta);
766 ST_UB(p0_org, data - img_width);
769 q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org);
770 is_less_than_beta = (q2_asub_q0 < beta_in);
771 is_less_than_beta = is_less_than_beta & tmp_flag;
772 negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff);
773 is_less_than_beta = is_less_than_beta & is_less_than;
774 negate_is_less_than_beta = negate_is_less_than_beta & is_less_than;
777 if (!__msa_test_bz_v(is_less_than_beta)) {
778 v8i16 q3_org_r, q3_org_l;
779 v16u8 q3_org =
LD_UB(data + (3 * img_width));
786 ILVR_B2_SH(zero, q3_org, zero, q2_org, q3_org_r, q2_r);
788 q2_r, p1_org_r, q0_r, q1_r, q2_r);
790 ILVL_B2_SH(zero, q3_org, zero, q2_org, q3_org_l, q2_l);
792 q2_l, p1_org_l, q0_l, q1_l, q2_l);
794 PCKEV_B3_UB(q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q0, q1, q2);
795 q0_org = __msa_bmnz_v(q0_org, q0, is_less_than_beta);
796 q1_org = __msa_bmnz_v(q1_org, q1, is_less_than_beta);
797 q2_org = __msa_bmnz_v(q2_org, q2, is_less_than_beta);
799 ST_UB(q1_org, data + img_width);
800 ST_UB(q2_org, data + 2 * img_width);
807 q0 = (v16u8) __msa_pckev_b((v16i8) q0_l, (v16i8) q0_r);
808 q0_org = __msa_bmnz_v(q0_org, q0, negate_is_less_than_beta);
820 v16u8
alpha, beta, p0_asub_q0;
821 v16u8 is_less_than_alpha, is_less_than, is_less_than_beta;
822 v16u8 p3_org, p2_org, p1_org, p0_org, q0_org, q1_org, q2_org, q3_org;
823 v16u8 p1_asub_p0, q1_asub_q0;
827 v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
828 v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
830 LD_UB8(src, img_width, row0, row1, row2, row3, row4, row5, row6, row7);
831 LD_UB8(src + (8 * img_width), img_width,
832 row8, row9, row10, row11, row12, row13, row14, row15);
835 row4, row5, row6, row7,
836 row8, row9, row10, row11,
837 row12, row13, row14, row15,
838 p3_org, p2_org, p1_org, p0_org,
839 q0_org, q1_org, q2_org, q3_org);
842 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
843 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
844 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
846 alpha = (v16u8) __msa_fill_b(alpha_in);
847 beta = (v16u8) __msa_fill_b(beta_in);
849 is_less_than_alpha = (p0_asub_q0 <
alpha);
850 is_less_than_beta = (p1_asub_p0 < beta);
851 is_less_than = is_less_than_beta & is_less_than_alpha;
852 is_less_than_beta = (q1_asub_q0 < beta);
853 is_less_than = is_less_than_beta & is_less_than;
855 if (!__msa_test_bz_v(is_less_than)) {
861 v16u8 tmp_flag, p0,
q0, p2_asub_p0, q2_asub_q0;
862 v16u8 negate_is_less_than_beta;
863 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
864 v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l;
871 tmp_flag = alpha >> 2;
872 tmp_flag = tmp_flag + 2;
873 tmp_flag = (p0_asub_q0 < tmp_flag);
875 p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org);
876 is_less_than_beta = (p2_asub_p0 < beta);
877 is_less_than_beta = tmp_flag & is_less_than_beta;
878 negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff);
879 is_less_than_beta = is_less_than_beta & is_less_than;
880 negate_is_less_than_beta = negate_is_less_than_beta & is_less_than;
882 if (!__msa_test_bz_v(is_less_than_beta)) {
884 v8i16 p3_org_r, p3_org_l;
890 ILVR_B2_SH(zero, p3_org, zero, p2_org, p3_org_r, p2_r);
892 p2_r, q1_org_r, p0_r, p1_r, p2_r);
894 ILVL_B2_SH(zero, p3_org, zero, p2_org, p3_org_l, p2_l);
896 p2_l, q1_org_l, p0_l, p1_l, p2_l);
898 PCKEV_B3_UB(p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p0, p1, p2);
899 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than_beta);
900 p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta);
901 p2_org = __msa_bmnz_v(p2_org, p2, is_less_than_beta);
907 p0 = (v16u8) __msa_pckev_b((v16i8) p0_l, (v16i8) p0_r);
908 p0_org = __msa_bmnz_v(p0_org, p0, negate_is_less_than_beta);
910 q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org);
911 is_less_than_beta = (q2_asub_q0 < beta);
913 is_less_than_beta = is_less_than_beta & tmp_flag;
914 negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff);
916 is_less_than_beta = is_less_than_beta & is_less_than;
917 negate_is_less_than_beta = negate_is_less_than_beta & is_less_than;
919 if (!__msa_test_bz_v(is_less_than_beta)) {
921 v8i16 q3_org_r, q3_org_l;
927 ILVR_B2_SH(zero, q3_org, zero, q2_org, q3_org_r, q2_r);
929 q2_r, p1_org_r, q0_r, q1_r, q2_r);
931 ILVL_B2_SH(zero, q3_org, zero, q2_org, q3_org_l, q2_l);
933 q2_l, p1_org_l, q0_l, q1_l, q2_l);
935 PCKEV_B3_UB(q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q0, q1, q2);
936 q0_org = __msa_bmnz_v(q0_org, q0, is_less_than_beta);
937 q1_org = __msa_bmnz_v(q1_org, q1, is_less_than_beta);
938 q2_org = __msa_bmnz_v(q2_org, q2, is_less_than_beta);
944 q0 = (v16u8) __msa_pckev_b((v16i8) q0_l, (v16i8) q0_r);
945 q0_org = __msa_bmnz_v(q0_org, q0, negate_is_less_than_beta);
948 v8i16 tp0, tp1, tp2, tp3, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
958 ST4x4_UB(tmp3, tmp3, 0, 1, 2, 3, src, img_width);
959 ST2x4_UB(tmp2, 0, src + 4, img_width);
960 src += 4 * img_width;
961 ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, src, img_width);
962 ST2x4_UB(tmp2, 4, src + 4, img_width);
963 src += 4 * img_width;
965 ST4x4_UB(tmp6, tmp6, 0, 1, 2, 3, src, img_width);
966 ST2x4_UB(tmp5, 0, src + 4, img_width);
967 src += 4 * img_width;
968 ST4x4_UB(tmp7, tmp7, 0, 1, 2, 3, src, img_width);
969 ST2x4_UB(tmp5, 4, src + 4, img_width);
978 uint64_t load0, load1;
981 v8u16 src0_r, src1_r, src2_r, src3_r, src4_r, src5_r, src6_r, src7_r;
982 v8u16 dst0_r, dst1_r, dst4_r, dst5_r;
983 v8u16 dst2_x_r, dst2_y_r, dst3_x_r, dst3_y_r;
984 v16u8 dst0, dst1, dst4, dst5, dst2_x, dst2_y, dst3_x, dst3_y;
985 v8i16 tmp0, tmp1, tmp2, tmp3;
987 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0, p2_asub_p0, q2_asub_q0;
988 v16u8 is_less_than, is_less_than_alpha, is_less_than_beta;
989 v16u8 is_less_than_beta1, is_less_than_beta2;
1000 load0 =
LD(src - 4);
1001 load1 =
LD(src + stride - 4);
1002 src0 = (v16i8) __msa_insert_d((v2i64)
src0, 0, load0);
1003 src1 = (v16i8) __msa_insert_d((v2i64)
src1, 0, load1);
1005 load0 =
LD(src + (2 * stride) - 4);
1006 load1 =
LD(src + (3 * stride) - 4);
1007 src2 = (v16i8) __msa_insert_d((v2i64) src2, 0, load0);
1008 src3 = (v16i8) __msa_insert_d((v2i64) src3, 0, load1);
1010 load0 =
LD(src + (4 * stride) - 4);
1011 load1 =
LD(src + (5 * stride) - 4);
1012 src4 = (v16i8) __msa_insert_d((v2i64) src4, 0, load0);
1013 src5 = (v16i8) __msa_insert_d((v2i64) src5, 0, load1);
1015 load0 =
LD(src + (6 * stride) - 4);
1016 load1 =
LD(src + (7 * stride) - 4);
1017 src6 = (v16i8) __msa_insert_d((v2i64) src6, 0, load0);
1018 src7 = (v16i8) __msa_insert_d((v2i64) src7, 0, load1);
1020 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src7, src6,
1021 src0, src1, src2, src3);
1023 ILVR_H2_SH(src1, src0, src3, src2, tmp0, tmp2);
1024 ILVL_H2_SH(src1, src0, src3, src2, tmp1, tmp3);
1026 ILVR_W2_SB(tmp2, tmp0, tmp3, tmp1, src6, src3);
1027 ILVL_W2_SB(tmp2, tmp0, tmp3, tmp1, src1, src5);
1028 SLDI_B4_0_SB(src6, src1, src3, src5, src0, src2, src4, src7, 8);
1030 p0_asub_q0 = __msa_asub_u_b((v16u8) src2, (v16u8) src3);
1031 p1_asub_p0 = __msa_asub_u_b((v16u8) src1, (v16u8) src2);
1032 q1_asub_q0 = __msa_asub_u_b((v16u8) src4, (v16u8) src3);
1034 alpha = (v16u8) __msa_fill_b(alpha_in);
1035 beta = (v16u8) __msa_fill_b(beta_in);
1037 is_less_than_alpha = (p0_asub_q0 <
alpha);
1038 is_less_than_beta = (p1_asub_p0 < beta);
1039 is_less_than = is_less_than_alpha & is_less_than_beta;
1040 is_less_than_beta = (q1_asub_q0 < beta);
1041 is_less_than = is_less_than & is_less_than_beta;
1046 is_less_than_alpha = (p0_asub_q0 <
alpha);
1048 p2_asub_p0 = __msa_asub_u_b((v16u8) src0, (v16u8) src2);
1049 is_less_than_beta1 = (p2_asub_p0 < beta);
1050 q2_asub_q0 = __msa_asub_u_b((v16u8) src5, (v16u8) src3);
1051 is_less_than_beta2 = (q2_asub_q0 < beta);
1053 ILVR_B4_UH(zeros, src0, zeros, src1, zeros, src2, zeros, src3,
1054 src0_r, src1_r, src2_r, src3_r);
1055 ILVR_B4_UH(zeros, src4, zeros, src5, zeros, src6, zeros, src7,
1056 src4_r, src5_r, src6_r, src7_r);
1058 dst2_x_r = src1_r + src2_r + src3_r;
1059 dst2_x_r = src0_r + (2 * (dst2_x_r)) + src4_r;
1060 dst2_x_r = (v8u16) __msa_srari_h((v8i16) dst2_x_r, 3);
1061 dst1_r = src0_r + src1_r + src2_r + src3_r;
1062 dst1_r = (v8u16) __msa_srari_h((v8i16) dst1_r, 2);
1064 dst0_r = (2 * src6_r) + (3 * src0_r);
1065 dst0_r += src1_r + src2_r + src3_r;
1066 dst0_r = (v8u16) __msa_srari_h((v8i16) dst0_r, 3);
1067 dst2_y_r = (2 * src1_r) + src2_r + src4_r;
1068 dst2_y_r = (v8u16) __msa_srari_h((v8i16) dst2_y_r, 2);
1070 PCKEV_B2_UB(dst2_x_r, dst2_x_r, dst2_y_r, dst2_y_r, dst2_x, dst2_y);
1071 dst2_x = __msa_bmnz_v(dst2_y, dst2_x, is_less_than_beta1);
1073 dst3_x_r = src2_r + src3_r + src4_r;
1074 dst3_x_r = src1_r + (2 * dst3_x_r) + src5_r;
1075 dst3_x_r = (v8u16) __msa_srari_h((v8i16) dst3_x_r, 3);
1076 dst4_r = src2_r + src3_r + src4_r + src5_r;
1077 dst4_r = (v8u16) __msa_srari_h((v8i16) dst4_r, 2);
1079 dst5_r = (2 * src7_r) + (3 * src5_r);
1080 dst5_r += src4_r + src3_r + src2_r;
1081 dst5_r = (v8u16) __msa_srari_h((v8i16) dst5_r, 3);
1082 dst3_y_r = (2 * src4_r) + src3_r + src1_r;
1083 dst3_y_r = (v8u16) __msa_srari_h((v8i16) dst3_y_r, 2);
1085 PCKEV_B2_UB(dst3_x_r, dst3_x_r, dst3_y_r, dst3_y_r, dst3_x, dst3_y);
1086 dst3_x = __msa_bmnz_v(dst3_y, dst3_x, is_less_than_beta2);
1088 dst2_y_r = (2 * src1_r) + src2_r + src4_r;
1089 dst2_y_r = (v8u16) __msa_srari_h((v8i16) dst2_y_r, 2);
1090 dst3_y_r = (2 * src4_r) + src3_r + src1_r;
1091 dst3_y_r = (v8u16) __msa_srari_h((v8i16) dst3_y_r, 2);
1093 PCKEV_B2_UB(dst2_y_r, dst2_y_r, dst3_y_r, dst3_y_r, dst2_y, dst3_y);
1095 dst2_x = __msa_bmnz_v(dst2_y, dst2_x, is_less_than_alpha);
1096 dst3_x = __msa_bmnz_v(dst3_y, dst3_x, is_less_than_alpha);
1097 dst2_x = __msa_bmnz_v((v16u8) src2, dst2_x, is_less_than);
1098 dst3_x = __msa_bmnz_v((v16u8) src3, dst3_x, is_less_than);
1100 is_less_than = is_less_than_alpha & is_less_than;
1101 dst1 = (v16u8) __msa_pckev_b((v16i8) dst1_r, (v16i8) dst1_r);
1102 is_less_than_beta1 = is_less_than_beta1 & is_less_than;
1103 dst1 = __msa_bmnz_v((v16u8) src1, dst1, is_less_than_beta1);
1105 dst0 = (v16u8) __msa_pckev_b((v16i8) dst0_r, (v16i8) dst0_r);
1106 dst0 = __msa_bmnz_v((v16u8) src0, dst0, is_less_than_beta1);
1107 dst4 = (v16u8) __msa_pckev_b((v16i8) dst4_r, (v16i8) dst4_r);
1108 is_less_than_beta2 = is_less_than_beta2 & is_less_than;
1109 dst4 = __msa_bmnz_v((v16u8) src4, dst4, is_less_than_beta2);
1110 dst5 = (v16u8) __msa_pckev_b((v16i8) dst5_r, (v16i8) dst5_r);
1111 dst5 = __msa_bmnz_v((v16u8) src5, dst5, is_less_than_beta2);
1113 ILVR_B2_UB(dst1, dst0, dst3_x, dst2_x, dst0, dst1);
1114 dst2_x = (v16u8) __msa_ilvr_b((v16i8) dst5, (v16i8) dst4);
1118 ILVR_W2_UB(tmp2, tmp0, tmp3, tmp1, dst0, dst4);
1120 dst2_x = (v16u8) __msa_ilvl_w((v4i32) tmp2, (v4i32) tmp0);
1121 dst2_y = (v16u8) __msa_ilvl_w((v4i32) tmp3, (v4i32) tmp1);
1124 out0 = __msa_copy_u_w((v4i32) dst0, 0);
1125 out1 = __msa_copy_u_h((v8i16) dst0, 2);
1126 out2 = __msa_copy_u_w((v4i32) dst1, 0);
1127 out3 = __msa_copy_u_h((v8i16) dst1, 2);
1129 SW(out0, (src - 3));
1130 SH(out1, (src + 1));
1132 SW(out2, (src - 3));
1133 SH(out3, (src + 1));
1136 out0 = __msa_copy_u_w((v4i32) dst2_x, 0);
1137 out1 = __msa_copy_u_h((v8i16) dst2_x, 2);
1138 out2 = __msa_copy_u_w((v4i32) dst3_x, 0);
1139 out3 = __msa_copy_u_h((v8i16) dst3_x, 2);
1141 SW(out0, (src - 3));
1142 SH(out1, (src + 1));
1144 SW(out2, (src - 3));
1145 SH(out3, (src + 1));
1148 out0 = __msa_copy_u_w((v4i32) dst4, 0);
1149 out1 = __msa_copy_u_h((v8i16) dst4, 2);
1150 out2 = __msa_copy_u_w((v4i32) dst5, 0);
1151 out3 = __msa_copy_u_h((v8i16) dst5, 2);
1153 SW(out0, (src - 3));
1154 SH(out1, (src + 1));
1156 SW(out2, (src - 3));
1157 SH(out3, (src + 1));
1160 out0 = __msa_copy_u_w((v4i32) dst2_y, 0);
1161 out1 = __msa_copy_u_h((v8i16) dst2_y, 2);
1162 out2 = __msa_copy_u_w((v4i32) dst3_y, 0);
1163 out3 = __msa_copy_u_h((v8i16) dst3_y, 2);
1165 SW(out0, (src - 3));
1166 SH(out1, (src + 1));
1168 SW(out2, (src - 3));
1169 SH(out3, (src + 1));
1179 v8i16 p0_or_q0, q0_or_p0;
1180 v16u8 p1_or_q1_org, p0_or_q0_org, q0_or_p0_org, q1_or_p1_org;
1182 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
1183 v16u8 is_less_than_alpha, is_less_than_beta;
1184 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1186 alpha = (v16u8) __msa_fill_b(alpha_in);
1187 beta = (v16u8) __msa_fill_b(beta_in);
1189 LD_UB4(data_cb_or_cr - (img_width << 1), img_width,
1190 p1_or_q1_org, p0_or_q0_org, q0_or_p0_org, q1_or_p1_org);
1192 p0_asub_q0 = __msa_asub_u_b(p0_or_q0_org, q0_or_p0_org);
1193 p1_asub_p0 = __msa_asub_u_b(p1_or_q1_org, p0_or_q0_org);
1194 q1_asub_q0 = __msa_asub_u_b(q1_or_p1_org, q0_or_p0_org);
1196 is_less_than_alpha = (p0_asub_q0 <
alpha);
1197 is_less_than_beta = (p1_asub_p0 < beta);
1198 is_less_than = is_less_than_beta & is_less_than_alpha;
1199 is_less_than_beta = (q1_asub_q0 < beta);
1200 is_less_than = is_less_than_beta & is_less_than;
1202 is_less_than = (v16u8) __msa_ilvr_d((v2i64)
zero, (v2i64) is_less_than);
1204 if (!__msa_test_bz_v(is_less_than)) {
1205 ILVR_B4_SH(zero, p1_or_q1_org, zero, p0_or_q0_org, zero, q0_or_p0_org,
1206 zero, q1_or_p1_org, p1_org_r, p0_org_r, q0_org_r, q1_org_r);
1209 PCKEV_B2_SH(zero, p0_or_q0, zero, q0_or_p0, p0_or_q0, q0_or_p0);
1212 __msa_bmnz_v(p0_or_q0_org, (v16u8) p0_or_q0, is_less_than);
1214 __msa_bmnz_v(q0_or_p0_org, (v16u8) q0_or_p0, is_less_than);
1216 ST_UB(q0_or_p0_org, data_cb_or_cr);
1217 ST_UB(p0_or_q0_org, data_cb_or_cr - img_width);
1227 v16u8
alpha, beta, is_less_than;
1228 v8i16 p0_or_q0, q0_or_p0;
1229 v16u8 p1_or_q1_org, p0_or_q0_org, q0_or_p0_org, q1_or_p1_org;
1231 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
1232 v16u8 is_less_than_alpha, is_less_than_beta;
1233 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1236 v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
1238 LD_UB8((data_cb_or_cr - 2), img_width,
1239 row0, row1, row2, row3, row4, row5, row6, row7);
1242 p1_or_q1_org, p0_or_q0_org,
1243 q0_or_p0_org, q1_or_p1_org);
1246 alpha = (v16u8) __msa_fill_b(alpha_in);
1247 beta = (v16u8) __msa_fill_b(beta_in);
1249 p0_asub_q0 = __msa_asub_u_b(p0_or_q0_org, q0_or_p0_org);
1250 p1_asub_p0 = __msa_asub_u_b(p1_or_q1_org, p0_or_q0_org);
1251 q1_asub_q0 = __msa_asub_u_b(q1_or_p1_org, q0_or_p0_org);
1253 is_less_than_alpha = (p0_asub_q0 <
alpha);
1254 is_less_than_beta = (p1_asub_p0 < beta);
1255 is_less_than = is_less_than_beta & is_less_than_alpha;
1256 is_less_than_beta = (q1_asub_q0 < beta);
1257 is_less_than = is_less_than_beta & is_less_than;
1258 is_less_than = (v16u8) __msa_ilvr_d((v2i64)
zero, (v2i64) is_less_than);
1260 if (!__msa_test_bz_v(is_less_than)) {
1261 ILVR_B4_SH(zero, p1_or_q1_org, zero, p0_or_q0_org, zero, q0_or_p0_org,
1262 zero, q1_or_p1_org, p1_org_r, p0_org_r, q0_org_r, q1_org_r);
1268 PCKEV_B2_SH(zero, p0_or_q0, zero, q0_or_p0, p0_or_q0, q0_or_p0);
1271 __msa_bmnz_v(p0_or_q0_org, (v16u8) p0_or_q0, is_less_than);
1273 __msa_bmnz_v(q0_or_p0_org, (v16u8) q0_or_p0, is_less_than);
1274 tmp1 = (v8i16) __msa_ilvr_b((v16i8) q0_or_p0_org, (v16i8) p0_or_q0_org);
1277 ST2x4_UB(tmp1, 0, data_cb_or_cr, img_width);
1278 data_cb_or_cr += 4 * img_width;
1279 ST2x4_UB(tmp1, 4, data_cb_or_cr, img_width);
1292 v16u8 tmp_vec, bs = { 0 };
1294 tmp_vec = (v16u8) __msa_fill_b(bs0);
1295 bs = (v16u8) __msa_insve_w((v4i32) bs, 0, (v4i32) tmp_vec);
1296 tmp_vec = (v16u8) __msa_fill_b(bs1);
1297 bs = (v16u8) __msa_insve_w((v4i32) bs, 1, (v4i32) tmp_vec);
1298 tmp_vec = (v16u8) __msa_fill_b(bs2);
1299 bs = (v16u8) __msa_insve_w((v4i32) bs, 2, (v4i32) tmp_vec);
1300 tmp_vec = (v16u8) __msa_fill_b(bs3);
1301 bs = (v16u8) __msa_insve_w((v4i32) bs, 3, (v4i32) tmp_vec);
1303 if (!__msa_test_bz_v(bs)) {
1305 v16u8 p3_org, p2_org, p1_org, p0_org, q0_org, q1_org, q2_org, q3_org;
1306 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0,
alpha, beta;
1307 v16u8 is_less_than, is_less_than_beta, is_less_than_alpha;
1308 v16u8 is_bs_greater_than0;
1312 tmp_vec = (v16u8) __msa_fill_b(tc0);
1313 tc = (v16u8) __msa_insve_w((v4i32)
tc, 0, (v4i32) tmp_vec);
1314 tmp_vec = (v16u8) __msa_fill_b(tc1);
1315 tc = (v16u8) __msa_insve_w((v4i32)
tc, 1, (v4i32) tmp_vec);
1316 tmp_vec = (v16u8) __msa_fill_b(tc2);
1317 tc = (v16u8) __msa_insve_w((v4i32)
tc, 2, (v4i32) tmp_vec);
1318 tmp_vec = (v16u8) __msa_fill_b(tc3);
1319 tc = (v16u8) __msa_insve_w((v4i32)
tc, 3, (v4i32) tmp_vec);
1321 is_bs_greater_than0 = (zero < bs);
1324 v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
1325 v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
1328 row0, row1, row2, row3, row4, row5, row6, row7);
1329 src += (8 * img_width);
1331 row8, row9, row10, row11, row12, row13, row14, row15);
1334 row8, row9, row10, row11,
1335 row12, row13, row14, row15,
1336 p3_org, p2_org, p1_org, p0_org,
1337 q0_org, q1_org, q2_org, q3_org);
1340 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
1341 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
1342 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
1344 alpha = (v16u8) __msa_fill_b(alpha_in);
1345 beta = (v16u8) __msa_fill_b(beta_in);
1347 is_less_than_alpha = (p0_asub_q0 <
alpha);
1348 is_less_than_beta = (p1_asub_p0 < beta);
1349 is_less_than = is_less_than_beta & is_less_than_alpha;
1350 is_less_than_beta = (q1_asub_q0 < beta);
1351 is_less_than = is_less_than_beta & is_less_than;
1352 is_less_than = is_less_than & is_bs_greater_than0;
1354 if (!__msa_test_bz_v(is_less_than)) {
1355 v16i8 negate_tc, sign_negate_tc;
1356 v16u8 p0,
q0, p2_asub_p0, q2_asub_q0;
1357 v8i16 tc_r, tc_l, negate_tc_r, i16_negatetc_l;
1358 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1359 v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l;
1360 v8i16 p0_r, q0_r, p0_l, q0_l;
1362 negate_tc = zero - (v16i8) tc;
1363 sign_negate_tc = __msa_clti_s_b(negate_tc, 0);
1365 ILVRL_B2_SH(sign_negate_tc, negate_tc, negate_tc_r, i16_negatetc_l);
1372 p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org);
1373 is_less_than_beta = (p2_asub_p0 < beta);
1374 is_less_than_beta = is_less_than_beta & is_less_than;
1376 if (!__msa_test_bz_v(is_less_than_beta)) {
1380 v8i16 p2_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) p2_org);
1381 v8i16 p2_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) p2_org);
1384 negate_tc_r, tc_r, p1_r);
1386 i16_negatetc_l, tc_l, p1_l);
1388 p1 = (v16u8) __msa_pckev_b((v16i8) p1_l, (v16i8) p1_r);
1389 p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta);
1391 is_less_than_beta = __msa_andi_b(is_less_than_beta, 1);
1392 tc = tc + is_less_than_beta;
1395 q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org);
1396 is_less_than_beta = (q2_asub_q0 < beta);
1397 is_less_than_beta = is_less_than_beta & is_less_than;
1399 q1_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q1_org);
1400 q1_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q1_org);
1402 if (!__msa_test_bz_v(is_less_than_beta)) {
1406 v8i16 q2_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q2_org);
1407 v8i16 q2_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q2_org);
1410 negate_tc_r, tc_r, q1_r);
1412 i16_negatetc_l, tc_l, q1_l);
1414 q1 = (v16u8) __msa_pckev_b((v16i8) q1_l, (v16i8) q1_r);
1415 q1_org = __msa_bmnz_v(q1_org, q1, is_less_than_beta);
1417 is_less_than_beta = __msa_andi_b(is_less_than_beta, 1);
1418 tc = tc + is_less_than_beta;
1422 v8i16 threshold_r, negate_thresh_r;
1423 v8i16 threshold_l, negate_thresh_l;
1424 v16i8 negate_thresh, sign_negate_thresh;
1426 negate_thresh = zero - (v16i8) tc;
1427 sign_negate_thresh = __msa_clti_s_b(negate_thresh, 0);
1429 ILVR_B2_SH(zero, tc, sign_negate_thresh, negate_thresh,
1430 threshold_r, negate_thresh_r);
1433 negate_thresh_r, threshold_r, p0_r, q0_r);
1435 threshold_l = (v8i16) __msa_ilvl_b(zero, (v16i8)
tc);
1436 negate_thresh_l = (v8i16) __msa_ilvl_b(sign_negate_thresh,
1440 negate_thresh_l, threshold_l, p0_l, q0_l);
1445 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than);
1446 q0_org = __msa_bmnz_v(q0_org, q0, is_less_than);
1449 v16i8 tp0, tp1, tp2, tp3;
1451 v4i32 tmp3, tmp4, tmp6, tmp7;
1452 uint32_t out0, out2;
1453 uint16_t out1, out3;
1464 out0 = __msa_copy_u_w(tmp3, 0);
1465 out1 = __msa_copy_u_h(tmp2, 0);
1466 out2 = __msa_copy_u_w(tmp3, 1);
1467 out3 = __msa_copy_u_h(tmp2, 1);
1470 SH(out1, (src + 4));
1473 SH(out3, (src + 4));
1475 out0 = __msa_copy_u_w(tmp3, 2);
1476 out1 = __msa_copy_u_h(tmp2, 2);
1477 out2 = __msa_copy_u_w(tmp3, 3);
1478 out3 = __msa_copy_u_h(tmp2, 3);
1482 SH(out1, (src + 4));
1485 SH(out3, (src + 4));
1487 out0 = __msa_copy_u_w(tmp4, 0);
1488 out1 = __msa_copy_u_h(tmp2, 4);
1489 out2 = __msa_copy_u_w(tmp4, 1);
1490 out3 = __msa_copy_u_h(tmp2, 5);
1494 SH(out1, (src + 4));
1497 SH(out3, (src + 4));
1499 out0 = __msa_copy_u_w(tmp4, 2);
1500 out1 = __msa_copy_u_h(tmp2, 6);
1501 out2 = __msa_copy_u_w(tmp4, 3);
1502 out3 = __msa_copy_u_h(tmp2, 7);
1506 SH(out1, (src + 4));
1509 SH(out3, (src + 4));
1511 out0 = __msa_copy_u_w(tmp6, 0);
1512 out1 = __msa_copy_u_h(tmp5, 0);
1513 out2 = __msa_copy_u_w(tmp6, 1);
1514 out3 = __msa_copy_u_h(tmp5, 1);
1518 SH(out1, (src + 4));
1521 SH(out3, (src + 4));
1523 out0 = __msa_copy_u_w(tmp6, 2);
1524 out1 = __msa_copy_u_h(tmp5, 2);
1525 out2 = __msa_copy_u_w(tmp6, 3);
1526 out3 = __msa_copy_u_h(tmp5, 3);
1530 SH(out1, (src + 4));
1533 SH(out3, (src + 4));
1535 out0 = __msa_copy_u_w(tmp7, 0);
1536 out1 = __msa_copy_u_h(tmp5, 4);
1537 out2 = __msa_copy_u_w(tmp7, 1);
1538 out3 = __msa_copy_u_h(tmp5, 5);
1542 SH(out1, (src + 4));
1545 SH(out3, (src + 4));
1547 out0 = __msa_copy_u_w(tmp7, 2);
1548 out1 = __msa_copy_u_h(tmp5, 6);
1549 out2 = __msa_copy_u_w(tmp7, 3);
1550 out3 = __msa_copy_u_h(tmp5, 7);
1554 SH(out1, (src + 4));
1557 SH(out3, (src + 4));
1570 uint32_t image_width)
1575 tmp_vec = (v16u8) __msa_fill_b(bs0);
1576 bs = (v16u8) __msa_insve_w((v4i32) bs, 0, (v4i32) tmp_vec);
1577 tmp_vec = (v16u8) __msa_fill_b(bs1);
1578 bs = (v16u8) __msa_insve_w((v4i32) bs, 1, (v4i32) tmp_vec);
1579 tmp_vec = (v16u8) __msa_fill_b(bs2);
1580 bs = (v16u8) __msa_insve_w((v4i32) bs, 2, (v4i32) tmp_vec);
1581 tmp_vec = (v16u8) __msa_fill_b(bs3);
1582 bs = (v16u8) __msa_insve_w((v4i32) bs, 3, (v4i32) tmp_vec);
1584 if (!__msa_test_bz_v(bs)) {
1585 v16u8
alpha, beta, is_less_than, is_less_than_beta;
1586 v16u8 p0,
q0, p2_org, p1_org, p0_org, q0_org, q1_org, q2_org;
1587 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
1588 v16u8 is_less_than_alpha, is_bs_greater_than0;
1589 v8i16 p0_r, q0_r, p0_l, q0_l;
1590 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1591 v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l;
1595 tmp_vec = (v16u8) __msa_fill_b(tc0);
1596 tc = (v16i8) __msa_insve_w((v4i32)
tc, 0, (v4i32) tmp_vec);
1597 tmp_vec = (v16u8) __msa_fill_b(tc1);
1598 tc = (v16i8) __msa_insve_w((v4i32)
tc, 1, (v4i32) tmp_vec);
1599 tmp_vec = (v16u8) __msa_fill_b(tc2);
1600 tc = (v16i8) __msa_insve_w((v4i32)
tc, 2, (v4i32) tmp_vec);
1601 tmp_vec = (v16u8) __msa_fill_b(tc3);
1602 tc = (v16i8) __msa_insve_w((v4i32)
tc, 3, (v4i32) tmp_vec);
1604 alpha = (v16u8) __msa_fill_b(alpha_in);
1605 beta = (v16u8) __msa_fill_b(beta_in);
1607 LD_UB5(data - (3 * image_width), image_width,
1608 p2_org, p1_org, p0_org, q0_org, q1_org);
1610 is_bs_greater_than0 = ((v16u8) zero < bs);
1611 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
1612 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
1613 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
1615 is_less_than_alpha = (p0_asub_q0 <
alpha);
1616 is_less_than_beta = (p1_asub_p0 < beta);
1617 is_less_than = is_less_than_beta & is_less_than_alpha;
1618 is_less_than_beta = (q1_asub_q0 < beta);
1619 is_less_than = is_less_than_beta & is_less_than;
1620 is_less_than = is_less_than & is_bs_greater_than0;
1622 if (!__msa_test_bz_v(is_less_than)) {
1623 v16i8 sign_negate_tc, negate_tc;
1624 v8i16 negate_tc_r, i16_negatetc_l, tc_l, tc_r;
1625 v16u8 p2_asub_p0, q2_asub_q0;
1627 q2_org =
LD_UB(data + (2 * image_width));
1628 negate_tc = zero -
tc;
1629 sign_negate_tc = __msa_clti_s_b(negate_tc, 0);
1631 ILVRL_B2_SH(sign_negate_tc, negate_tc, negate_tc_r, i16_negatetc_l);
1638 p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org);
1639 is_less_than_beta = (p2_asub_p0 < beta);
1640 is_less_than_beta = is_less_than_beta & is_less_than;
1642 if (!__msa_test_bz_v(is_less_than_beta)) {
1646 v8i16 p2_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) p2_org);
1647 v8i16 p2_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) p2_org);
1650 negate_tc_r, tc_r, p1_r);
1652 i16_negatetc_l, tc_l, p1_l);
1654 p1 = (v16u8) __msa_pckev_b((v16i8) p1_l, (v16i8) p1_r);
1655 p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta);
1656 ST_UB(p1_org, data - (2 * image_width));
1658 is_less_than_beta = __msa_andi_b(is_less_than_beta, 1);
1659 tc = tc + (v16i8) is_less_than_beta;
1662 q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org);
1663 is_less_than_beta = (q2_asub_q0 < beta);
1664 is_less_than_beta = is_less_than_beta & is_less_than;
1666 q1_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q1_org);
1667 q1_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q1_org);
1669 if (!__msa_test_bz_v(is_less_than_beta)) {
1673 v8i16 q2_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q2_org);
1674 v8i16 q2_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q2_org);
1677 negate_tc_r, tc_r, q1_r);
1679 i16_negatetc_l, tc_l, q1_l);
1681 q1 = (v16u8) __msa_pckev_b((v16i8) q1_l, (v16i8) q1_r);
1682 q1_org = __msa_bmnz_v(q1_org, q1, is_less_than_beta);
1683 ST_UB(q1_org, data + image_width);
1685 is_less_than_beta = __msa_andi_b(is_less_than_beta, 1);
1686 tc = tc + (v16i8) is_less_than_beta;
1689 v16i8 negate_thresh, sign_negate_thresh;
1690 v8i16 threshold_r, threshold_l;
1691 v8i16 negate_thresh_l, negate_thresh_r;
1693 negate_thresh = zero -
tc;
1694 sign_negate_thresh = __msa_clti_s_b(negate_thresh, 0);
1696 ILVR_B2_SH(zero, tc, sign_negate_thresh, negate_thresh,
1697 threshold_r, negate_thresh_r);
1699 negate_thresh_r, threshold_r, p0_r, q0_r);
1701 threshold_l = (v8i16) __msa_ilvl_b(zero, tc);
1702 negate_thresh_l = (v8i16) __msa_ilvl_b(sign_negate_thresh,
1705 negate_thresh_l, threshold_l, p0_l, q0_l);
1710 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than);
1711 q0_org = __msa_bmnz_v(q0_org, q0, is_less_than);
1713 ST_UB(p0_org, (data - image_width));
1714 ST_UB(q0_org, data);
1724 uint32_t out0, out1, out2, out3;
1737 v8i16 src4, src5, src6, src7;
1738 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0, p2_asub_p0, q2_asub_q0;
1739 v16u8 is_less_than, is_less_than_alpha, is_less_than_beta;
1740 v16u8 is_less_than_beta1, is_less_than_beta2;
1741 v8i16
tc, tc_orig_r, tc_plus1;
1742 v16u8 is_tc_orig1, is_tc_orig2, tc_orig = { 0 };
1743 v8i16 p0_ilvr_q0, p0_add_q0, q0_sub_p0, p1_sub_q1;
1744 v8u16 src2_r, src3_r;
1745 v8i16 p2_r, p1_r, q2_r, q1_r;
1746 v16u8 p2, q2, p0,
q0;
1748 v16i8 zeros = { 0 };
1750 alpha = (v16u8) __msa_fill_b(alpha_in);
1751 beta = (v16u8) __msa_fill_b(beta_in);
1756 load =
LD(data - 3);
1757 inp0 = (v16i8) __msa_insert_d((v2i64) inp0, 0, load);
1758 load =
LD(data - 3 + stride);
1759 inp1 = (v16i8) __msa_insert_d((v2i64) inp1, 0, load);
1766 load =
LD(data - 3);
1767 inp2 = (v16i8) __msa_insert_d((v2i64) inp2, 0, load);
1768 load =
LD(data - 3 + stride);
1769 inp3 = (v16i8) __msa_insert_d((v2i64) inp3, 0, load);
1776 load =
LD(data - 3);
1777 inp4 = (v16i8) __msa_insert_d((v2i64) inp4, 0, load);
1778 load =
LD(data - 3 + stride);
1779 inp5 = (v16i8) __msa_insert_d((v2i64) inp5, 0, load);
1786 load =
LD(data - 3);
1787 inp6 = (v16i8) __msa_insert_d((v2i64) inp6, 0, load);
1788 load =
LD(data - 3 + stride);
1789 inp7 = (v16i8) __msa_insert_d((v2i64) inp7, 0, load);
1793 ILVR_B4_SB(inp1, inp0, inp3, inp2, inp5, inp4, inp7, inp6,
1794 src0, src1, src2, src3);
1796 ILVR_H2_SH(src1, src0, src3, src2, src4, src6);
1797 ILVL_H2_SH(src1, src0, src3, src2, src5, src7);
1799 src0 = (v16i8) __msa_ilvr_w((v4i32) src6, (v4i32) src4);
1800 src1 = __msa_sldi_b(zeros, (v16i8) src0, 8);
1801 src2 = (v16i8) __msa_ilvl_w((v4i32) src6, (v4i32) src4);
1802 src3 = __msa_sldi_b(zeros, (v16i8) src2, 8);
1803 src4 = (v8i16) __msa_ilvr_w((v4i32) src7, (v4i32) src5);
1804 src5 = (v8i16) __msa_sldi_b(zeros, (v16i8) src4, 8);
1806 p0_asub_q0 = __msa_asub_u_b((v16u8) src2, (v16u8) src3);
1807 p1_asub_p0 = __msa_asub_u_b((v16u8) src1, (v16u8) src2);
1808 q1_asub_q0 = __msa_asub_u_b((v16u8) src4, (v16u8) src3);
1809 p2_asub_p0 = __msa_asub_u_b((v16u8) src0, (v16u8) src2);
1810 q2_asub_q0 = __msa_asub_u_b((v16u8) src5, (v16u8) src3);
1812 is_less_than_alpha = (p0_asub_q0 <
alpha);
1813 is_less_than_beta = (p1_asub_p0 < beta);
1814 is_less_than = is_less_than_alpha & is_less_than_beta;
1815 is_less_than_beta = (q1_asub_q0 < beta);
1816 is_less_than = is_less_than_beta & is_less_than;
1818 is_less_than_beta1 = (p2_asub_p0 < beta);
1819 is_less_than_beta2 = (q2_asub_q0 < beta);
1821 p0_ilvr_q0 = (v8i16) __msa_ilvr_b((v16i8) src3, (v16i8) src2);
1822 p0_add_q0 = (v8i16) __msa_hadd_u_h((v16u8) p0_ilvr_q0, (v16u8) p0_ilvr_q0);
1823 p0_add_q0 = __msa_srari_h(p0_add_q0, 1);
1825 ILVR_B2_SH(zeros, src0, zeros, src1, p2_r, p1_r);
1829 ILVR_B2_SH(zeros, src5, zeros, src4, q2_r, q1_r);
1835 tc_orig = (v16u8) __msa_insert_w((v4i32) tc_orig, 0, tc_val);
1836 tc_orig = (v16u8) __msa_ilvr_b((v16i8) tc_orig, (v16i8) tc_orig);
1837 is_tc_orig1 = tc_orig;
1838 is_tc_orig2 = tc_orig;
1839 tc_orig_r = (v8i16) __msa_ilvr_b(zeros, (v16i8) tc_orig);
1842 p2_r =
CLIP_SH(p2_r, -tc_orig_r, tc_orig_r);
1843 q2_r =
CLIP_SH(q2_r, -tc_orig_r, tc_orig_r);
1850 is_tc_orig1 = (zeros < is_tc_orig1);
1851 is_tc_orig2 = is_tc_orig1;
1852 is_tc_orig1 = is_less_than_beta1 & is_tc_orig1;
1853 is_tc_orig2 = is_less_than_beta2 & is_tc_orig2;
1854 is_tc_orig1 = is_less_than & is_tc_orig1;
1855 is_tc_orig2 = is_less_than & is_tc_orig2;
1857 p2 = __msa_bmnz_v((v16u8) src1, p2, is_tc_orig1);
1858 q2 = __msa_bmnz_v((v16u8) src4, q2, is_tc_orig2);
1860 q0_sub_p0 = __msa_hsub_u_h((v16u8) p0_ilvr_q0, (v16u8) p0_ilvr_q0);
1862 p1_sub_q1 = p1_r - q1_r;
1863 q0_sub_p0 += p1_sub_q1;
1864 q0_sub_p0 = __msa_srari_h(q0_sub_p0, 3);
1867 is_less_than_beta1 = (v16u8) __msa_ilvr_b((v16i8) is_less_than_beta1,
1868 (v16i8) is_less_than_beta1);
1869 tc = (v8i16) __msa_bmnz_v((v16u8)
tc, (v16u8) tc_plus1, is_less_than_beta1);
1871 is_less_than_beta2 = (v16u8) __msa_ilvr_b((v16i8) is_less_than_beta2,
1872 (v16i8) is_less_than_beta2);
1873 tc = (v8i16) __msa_bmnz_v((v16u8)
tc, (v16u8) tc_plus1, is_less_than_beta2);
1875 q0_sub_p0 =
CLIP_SH(q0_sub_p0, -tc, tc);
1877 ILVR_B2_UH(zeros, src2, zeros, src3, src2_r, src3_r);
1878 src2_r += q0_sub_p0;
1879 src3_r -= q0_sub_p0;
1884 PCKEV_B2_UB(src2_r, src2_r, src3_r, src3_r, p0, q0);
1886 p0 = __msa_bmnz_v((v16u8) src2, p0, is_less_than);
1887 q0 = __msa_bmnz_v((v16u8) src3, q0, is_less_than);
1895 out0 = __msa_copy_u_w(dst0, 0);
1896 out1 = __msa_copy_u_w(dst0, 1);
1897 out2 = __msa_copy_u_w(dst0, 2);
1898 out3 = __msa_copy_u_w(dst0, 3);
1903 SW(out0, (data - 2));
1905 SW(out1, (data - 2));
1912 SW(out2, (data - 2));
1914 SW(out3, (data - 2));
1918 out0 = __msa_copy_u_w(dst1, 0);
1919 out1 = __msa_copy_u_w(dst1, 1);
1920 out2 = __msa_copy_u_w(dst1, 2);
1921 out3 = __msa_copy_u_w(dst1, 3);
1926 SW(out0, (data - 2));
1928 SW(out1, (data - 2));
1933 SW(out2, (data - 2));
1935 SW(out3, (data - 2));
1952 v16u8 p0,
q0, p0_asub_q0, p1_asub_p0, q1_asub_q0;
1954 v16u8 is_less_than_beta, is_less_than_alpha, is_bs_greater_than0;
1956 v16u8 p1_org, p0_org, q0_org, q1_org;
1957 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
1958 v16i8 negate_tc, sign_negate_tc;
1959 v8i16 tc_r, negate_tc_r;
1962 tmp_vec = (v8i16) __msa_fill_b(bs0);
1963 bs = __msa_insve_h(bs, 0, tmp_vec);
1964 tmp_vec = (v8i16) __msa_fill_b(bs1);
1965 bs = __msa_insve_h(bs, 1, tmp_vec);
1966 tmp_vec = (v8i16) __msa_fill_b(bs2);
1967 bs = __msa_insve_h(bs, 2, tmp_vec);
1968 tmp_vec = (v8i16) __msa_fill_b(bs3);
1969 bs = __msa_insve_h(bs, 3, tmp_vec);
1971 if (!__msa_test_bz_v((v16u8) bs)) {
1972 tmp_vec = (v8i16) __msa_fill_b(tc0);
1973 tc = __msa_insve_h(tc, 0, tmp_vec);
1974 tmp_vec = (v8i16) __msa_fill_b(tc1);
1975 tc = __msa_insve_h(tc, 1, tmp_vec);
1976 tmp_vec = (v8i16) __msa_fill_b(tc2);
1977 tc = __msa_insve_h(tc, 2, tmp_vec);
1978 tmp_vec = (v8i16) __msa_fill_b(tc3);
1979 tc = __msa_insve_h(tc, 3, tmp_vec);
1981 is_bs_greater_than0 = (v16u8) (zero < (v16i8) bs);
1983 alpha = (v16u8) __msa_fill_b(alpha_in);
1984 beta = (v16u8) __msa_fill_b(beta_in);
1986 LD_UB4(data - (img_width << 1), img_width,
1987 p1_org, p0_org, q0_org, q1_org);
1989 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
1990 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
1991 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
1993 is_less_than_alpha = (p0_asub_q0 <
alpha);
1994 is_less_than_beta = (p1_asub_p0 < beta);
1995 is_less_than = is_less_than_beta & is_less_than_alpha;
1996 is_less_than_beta = (q1_asub_q0 < beta);
1997 is_less_than = is_less_than_beta & is_less_than;
1998 is_less_than = is_less_than & is_bs_greater_than0;
2000 is_less_than = (v16u8) __msa_ilvr_d((v2i64)
zero, (v2i64) is_less_than);
2002 if (!__msa_test_bz_v(is_less_than)) {
2003 negate_tc = zero - (v16i8) tc;
2004 sign_negate_tc = __msa_clti_s_b(negate_tc, 0);
2006 ILVR_B2_SH(zero, tc, sign_negate_tc, negate_tc, tc_r, negate_tc_r);
2008 ILVR_B4_SH(zero, p1_org, zero, p0_org, zero, q0_org, zero, q1_org,
2009 p1_org_r, p0_org_r, q0_org_r, q1_org_r);
2011 AVC_LPF_P0Q0(q0_org_r, p0_org_r, p1_org_r, q1_org_r, negate_tc_r,
2016 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than);
2017 q0_org = __msa_bmnz_v(q0_org, q0, is_less_than);
2019 ST_UB(q0_org, data);
2020 ST_UB(p0_org, (data - img_width));
2036 v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0;
2037 v16u8 is_less_than, is_less_than_beta, is_less_than_alpha;
2041 v16u8 p1_org, p0_org, q0_org, q1_org;
2042 v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r;
2043 v16u8 is_bs_greater_than0;
2044 v8i16 tc_r, negate_tc_r;
2045 v16i8 negate_tc, sign_negate_tc;
2047 v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
2048 v8i16 tmp1, tmp_vec, bs = { 0 };
2051 tmp_vec = (v8i16) __msa_fill_b(bs0);
2052 bs = __msa_insve_h(bs, 0, tmp_vec);
2053 tmp_vec = (v8i16) __msa_fill_b(bs1);
2054 bs = __msa_insve_h(bs, 1, tmp_vec);
2055 tmp_vec = (v8i16) __msa_fill_b(bs2);
2056 bs = __msa_insve_h(bs, 2, tmp_vec);
2057 tmp_vec = (v8i16) __msa_fill_b(bs3);
2058 bs = __msa_insve_h(bs, 3, tmp_vec);
2060 if (!__msa_test_bz_v((v16u8) bs)) {
2061 tmp_vec = (v8i16) __msa_fill_b(tc0);
2062 tc = __msa_insve_h(tc, 0, tmp_vec);
2063 tmp_vec = (v8i16) __msa_fill_b(tc1);
2064 tc = __msa_insve_h(tc, 1, tmp_vec);
2065 tmp_vec = (v8i16) __msa_fill_b(tc2);
2066 tc = __msa_insve_h(tc, 2, tmp_vec);
2067 tmp_vec = (v8i16) __msa_fill_b(tc3);
2068 tc = __msa_insve_h(tc, 3, tmp_vec);
2070 is_bs_greater_than0 = (v16u8) (zero < (v16i8) bs);
2072 LD_UB8((data - 2), img_width,
2073 row0, row1, row2, row3, row4, row5, row6, row7);
2076 row4, row5, row6, row7,
2077 p1_org, p0_org, q0_org, q1_org);
2079 p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org);
2080 p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org);
2081 q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org);
2083 alpha = (v16u8) __msa_fill_b(alpha_in);
2084 beta = (v16u8) __msa_fill_b(beta_in);
2086 is_less_than_alpha = (p0_asub_q0 <
alpha);
2087 is_less_than_beta = (p1_asub_p0 < beta);
2088 is_less_than = is_less_than_beta & is_less_than_alpha;
2089 is_less_than_beta = (q1_asub_q0 < beta);
2090 is_less_than = is_less_than_beta & is_less_than;
2091 is_less_than = is_bs_greater_than0 & is_less_than;
2093 is_less_than = (v16u8) __msa_ilvr_d((v2i64)
zero, (v2i64) is_less_than);
2095 if (!__msa_test_bz_v(is_less_than)) {
2096 ILVR_B4_SH(zero, p1_org, zero, p0_org, zero, q0_org, zero, q1_org,
2097 p1_org_r, p0_org_r, q0_org_r, q1_org_r);
2099 negate_tc = zero - (v16i8) tc;
2100 sign_negate_tc = __msa_clti_s_b(negate_tc, 0);
2102 ILVR_B2_SH(sign_negate_tc, negate_tc, zero, tc, negate_tc_r, tc_r);
2104 AVC_LPF_P0Q0(q0_org_r, p0_org_r, p1_org_r, q1_org_r, negate_tc_r,
2109 p0_org = __msa_bmnz_v(p0_org, p0, is_less_than);
2110 q0_org = __msa_bmnz_v(q0_org, q0, is_less_than);
2111 tmp1 = (v8i16) __msa_ilvr_b((v16i8) q0_org, (v16i8) p0_org);
2114 src += 4 * img_width;
2125 v16u8
alpha, beta, res;
2127 alpha = (v16u8) __msa_fill_b(alpha_in);
2128 beta = (v16u8) __msa_fill_b(beta_in);
2130 for (col = 0; col < 4; col++) {
2131 tc_val = (tc0[col] - 1) + 1;
2139 ST2x4_UB(res, 0, (src - 1), stride);
2151 v16u8
alpha, beta, res;
2153 alpha = (v16u8) __msa_fill_b(alpha_in);
2154 beta = (v16u8) __msa_fill_b(beta_in);
2156 for (col = 0; col < 4; col++) {
2157 tc_val = (tc0[col] - 1) + 1;
2166 out0 = __msa_copy_s_h((v8i16) res, 0);
2167 out1 = __msa_copy_s_h((v8i16) res, 1);
2169 SH(out0, (src - 1));
2171 SH(out1, (src - 1));
2177 int alpha,
int beta, int8_t *
tc)
2194 tc[0], tc[1], tc[2], tc[3],
2195 alpha, beta, img_width);
2199 int alpha,
int beta, int8_t *
tc)
2217 tc[0], tc[1], tc[2], tc[3],
2218 alpha, beta, img_width);
2222 int alpha,
int beta, int8_t *
tc)
2239 tc[0], tc[1], tc[2], tc[3],
2240 alpha, beta, img_width);
2244 int alpha,
int beta, int8_t *
tc)
2261 tc[0], tc[1], tc[2], tc[3],
2262 alpha, beta, img_width);
2266 int alpha,
int beta)
2270 (
unsigned int) img_width);
2274 int alpha,
int beta)
2278 (
unsigned int) img_width);
2282 int alpha,
int beta)
2286 (
unsigned int) img_width);
2290 int alpha,
int beta)
2294 (
unsigned int) img_width);
2332 int height,
int log2_denom,
2333 int weight_src,
int offset_in)
2335 uint32_t offset_val;
2337 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7;
2338 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
2339 v8i16 src0_l, src1_l, src2_l, src3_l, src0_r, src1_r, src2_r, src3_r;
2340 v8i16 src4_l, src5_l, src6_l, src7_l, src4_r, src5_r, src6_r, src7_r;
2341 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
2342 v8i16 tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
2343 v8i16 wgt, denom,
offset;
2345 offset_val = (unsigned) offset_in << log2_denom;
2347 wgt = __msa_fill_h(weight_src);
2348 offset = __msa_fill_h(offset_val);
2349 denom = __msa_fill_h(log2_denom);
2351 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
2352 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, src0_r, src1_r,
2354 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, src0_l, src1_l,
2356 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, src4_r, src5_r,
2358 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, src4_l, src5_l,
2360 MUL4(wgt, src0_r, wgt, src0_l, wgt, src1_r, wgt, src1_l, tmp0, tmp1, tmp2,
2362 MUL4(wgt, src2_r, wgt, src2_l, wgt, src3_r, wgt, src3_l, tmp4, tmp5, tmp6,
2364 MUL4(wgt, src4_r, wgt, src4_l, wgt, src5_r, wgt, src5_l, tmp8, tmp9, tmp10,
2366 MUL4(wgt, src6_r, wgt, src6_l, wgt, src7_r, wgt, src7_l, tmp12, tmp13,
2368 ADDS_SH4_SH(tmp0, offset, tmp1, offset, tmp2, offset, tmp3, offset, tmp0,
2370 ADDS_SH4_SH(tmp4, offset, tmp5, offset, tmp6, offset, tmp7, offset, tmp4,
2372 ADDS_SH4_SH(tmp8, offset, tmp9, offset, tmp10, offset, tmp11, offset, tmp8,
2373 tmp9, tmp10, tmp11);
2374 ADDS_SH4_SH(tmp12, offset, tmp13, offset, tmp14, offset, tmp15, offset,
2375 tmp12, tmp13, tmp14, tmp15);
2376 MAXI_SH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 0);
2377 MAXI_SH8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, 0);
2378 SRLR_H8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom);
2379 SRLR_H8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, denom);
2380 SAT_UH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 7);
2381 SAT_UH8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, 7);
2382 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, dst0, dst1,
2384 PCKEV_B4_UB(tmp9, tmp8, tmp11, tmp10, tmp13, tmp12, tmp15, tmp14, dst4,
2386 ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, src, stride);
2390 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
2391 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, src0_r,
2392 src1_r, src2_r, src3_r);
2393 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, src0_l,
2394 src1_l, src2_l, src3_l);
2395 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, src4_r,
2396 src5_r, src6_r, src7_r);
2397 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, src4_l,
2398 src5_l, src6_l, src7_l);
2399 MUL4(wgt, src0_r, wgt, src0_l, wgt, src1_r, wgt, src1_l, tmp0, tmp1,
2401 MUL4(wgt, src2_r, wgt, src2_l, wgt, src3_r, wgt, src3_l, tmp4, tmp5,
2403 MUL4(wgt, src4_r, wgt, src4_l, wgt, src5_r, wgt, src5_l, tmp8, tmp9,
2405 MUL4(wgt, src6_r, wgt, src6_l, wgt, src7_r, wgt, src7_l, tmp12, tmp13,
2407 ADDS_SH4_SH(tmp0, offset, tmp1, offset, tmp2, offset, tmp3, offset,
2408 tmp0, tmp1, tmp2, tmp3);
2409 ADDS_SH4_SH(tmp4, offset, tmp5, offset, tmp6, offset, tmp7, offset,
2410 tmp4, tmp5, tmp6, tmp7);
2411 ADDS_SH4_SH(tmp8, offset, tmp9, offset, tmp10, offset, tmp11, offset,
2412 tmp8, tmp9, tmp10, tmp11);
2413 ADDS_SH4_SH(tmp12, offset, tmp13, offset, tmp14, offset, tmp15, offset,
2414 tmp12, tmp13, tmp14, tmp15);
2415 MAXI_SH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 0);
2416 MAXI_SH8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, 0);
2417 SRLR_H8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, denom);
2418 SRLR_H8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, denom);
2419 SAT_UH8_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 7);
2420 SAT_UH8_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, 7);
2421 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, dst0, dst1,
2423 PCKEV_B4_UB(tmp9, tmp8, tmp11, tmp10, tmp13, tmp12, tmp15, tmp14, dst4,
2425 ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, src, stride);
2430 int height,
int log2_denom,
2431 int weight_src,
int offset)
2435 }
else if (8 == height) {
2443 int height,
int log2_denom,
2444 int weight_src,
int offset)
2448 }
else if (4 == height) {
2457 int log2_denom,
int weight_dst,
2458 int weight_src,
int offset_in)
2460 v16i8 src_wgt, dst_wgt, wgt;
2461 v16u8
src0,
src1, src2, src3, src4, src5, src6, src7;
2462 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
2463 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2464 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
2465 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
2466 v8i16 tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
2469 offset_in = (unsigned) ((offset_in + 1) | 1) << log2_denom;
2470 offset_in += (128 * (weight_src + weight_dst));
2472 src_wgt = __msa_fill_b(weight_src);
2473 dst_wgt = __msa_fill_b(weight_dst);
2474 offset = __msa_fill_h(offset_in);
2475 denom = __msa_fill_h(log2_denom + 1);
2477 wgt = __msa_ilvev_b(dst_wgt, src_wgt);
2479 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
2481 LD_UB8(dst, stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
2484 ILVR_B4_SB(dst0, src0, dst1, src1, dst2, src2, dst3, src3, vec0, vec2, vec4,
2486 ILVL_B4_SB(dst0, src0, dst1, src1, dst2, src2, dst3, src3, vec1, vec3, vec5,
2488 ILVR_B4_SB(dst4, src4, dst5, src5, dst6, src6, dst7, src7, vec8, vec10,
2490 ILVL_B4_SB(dst4, src4, dst5, src5, dst6, src6, dst7, src7, vec9, vec11,
2492 tmp0 = __msa_dpadd_s_h(offset, wgt, vec0);
2493 tmp1 = __msa_dpadd_s_h(offset, wgt, vec1);
2494 tmp2 = __msa_dpadd_s_h(offset, wgt, vec2);
2495 tmp3 = __msa_dpadd_s_h(offset, wgt, vec3);
2496 tmp4 = __msa_dpadd_s_h(offset, wgt, vec4);
2497 tmp5 = __msa_dpadd_s_h(offset, wgt, vec5);
2498 tmp6 = __msa_dpadd_s_h(offset, wgt, vec6);
2499 tmp7 = __msa_dpadd_s_h(offset, wgt, vec7);
2500 tmp8 = __msa_dpadd_s_h(offset, wgt, vec8);
2501 tmp9 = __msa_dpadd_s_h(offset, wgt, vec9);
2502 tmp10 = __msa_dpadd_s_h(offset, wgt, vec10);
2503 tmp11 = __msa_dpadd_s_h(offset, wgt, vec11);
2504 tmp12 = __msa_dpadd_s_h(offset, wgt, vec12);
2505 tmp13 = __msa_dpadd_s_h(offset, wgt, vec13);
2506 tmp14 = __msa_dpadd_s_h(offset, wgt, vec14);
2507 tmp15 = __msa_dpadd_s_h(offset, wgt, vec15);
2508 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
2509 SRA_4V(tmp4, tmp5, tmp6, tmp7, denom);
2510 SRA_4V(tmp8, tmp9, tmp10, tmp11, denom);
2511 SRA_4V(tmp12, tmp13, tmp14, tmp15, denom);
2516 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, dst0, dst1,
2518 PCKEV_B4_UB(tmp9, tmp8, tmp11, tmp10, tmp13, tmp12, tmp15, tmp14, dst4,
2520 ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst, stride);
2524 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
2525 LD_UB8(dst, stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
2528 ILVR_B4_SB(dst0, src0, dst1, src1, dst2, src2, dst3, src3, vec0, vec2,
2530 ILVL_B4_SB(dst0, src0, dst1, src1, dst2, src2, dst3, src3, vec1, vec3,
2532 ILVR_B4_SB(dst4, src4, dst5, src5, dst6, src6, dst7, src7, vec8, vec10,
2534 ILVL_B4_SB(dst4, src4, dst5, src5, dst6, src6, dst7, src7, vec9, vec11,
2536 tmp0 = __msa_dpadd_s_h(offset, wgt, vec0);
2537 tmp1 = __msa_dpadd_s_h(offset, wgt, vec1);
2538 tmp2 = __msa_dpadd_s_h(offset, wgt, vec2);
2539 tmp3 = __msa_dpadd_s_h(offset, wgt, vec3);
2540 tmp4 = __msa_dpadd_s_h(offset, wgt, vec4);
2541 tmp5 = __msa_dpadd_s_h(offset, wgt, vec5);
2542 tmp6 = __msa_dpadd_s_h(offset, wgt, vec6);
2543 tmp7 = __msa_dpadd_s_h(offset, wgt, vec7);
2544 tmp8 = __msa_dpadd_s_h(offset, wgt, vec8);
2545 tmp9 = __msa_dpadd_s_h(offset, wgt, vec9);
2546 tmp10 = __msa_dpadd_s_h(offset, wgt, vec10);
2547 tmp11 = __msa_dpadd_s_h(offset, wgt, vec11);
2548 tmp12 = __msa_dpadd_s_h(offset, wgt, vec12);
2549 tmp13 = __msa_dpadd_s_h(offset, wgt, vec13);
2550 tmp14 = __msa_dpadd_s_h(offset, wgt, vec14);
2551 tmp15 = __msa_dpadd_s_h(offset, wgt, vec15);
2552 SRA_4V(tmp0, tmp1, tmp2, tmp3, denom);
2553 SRA_4V(tmp4, tmp5, tmp6, tmp7, denom);
2554 SRA_4V(tmp8, tmp9, tmp10, tmp11, denom);
2555 SRA_4V(tmp12, tmp13, tmp14, tmp15, denom);
2560 PCKEV_B4_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, dst0, dst1,
2562 PCKEV_B4_UB(tmp9, tmp8, tmp11, tmp10, tmp13, tmp12, tmp15, tmp14, dst4,
2564 ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst, stride);
2570 int log2_denom,
int weight_dst,
2571 int weight_src,
int offset)
2576 }
else if (8 == height) {
2587 int log2_denom,
int weight_dst,
2588 int weight_src,
int offset)
2593 }
else if (4 == height) {
void ff_h264_h_loop_filter_luma_mbaff_msa(uint8_t *src, int32_t ystride, int32_t alpha, int32_t beta, int8_t *tc0)
static void avc_loopfilter_cb_or_cr_intra_edge_ver_msa(uint8_t *data_cb_or_cr, uint8_t alpha_in, uint8_t beta_in, uint32_t img_width)
#define XORI_B8_128_UB(...)
static void avc_loopfilter_cb_or_cr_inter_edge_ver_msa(uint8_t *data, uint8_t bs0, uint8_t bs1, uint8_t bs2, uint8_t bs3, uint8_t tc0, uint8_t tc1, uint8_t tc2, uint8_t tc3, uint8_t alpha_in, uint8_t beta_in, uint32_t img_width)
#define AVC_LPF_P1_OR_Q1(p0_or_q0_org_in, q0_or_p0_org_in,p1_or_q1_org_in, p2_or_q2_org_in,negate_tc_in, tc_in, p1_or_q1_out)
ptrdiff_t const GLvoid * data
static void avc_h_loop_filter_chroma422_mbaff_msa(uint8_t *src, int32_t stride, int32_t alpha_in, int32_t beta_in, int8_t *tc0)
static void avc_h_loop_filter_luma_mbaff_msa(uint8_t *in, int32_t stride, int32_t alpha_in, int32_t beta_in, int8_t *tc0)
static void avc_loopfilter_cb_or_cr_intra_edge_hor_msa(uint8_t *data_cb_or_cr, uint8_t alpha_in, uint8_t beta_in, uint32_t img_width)
void ff_weight_h264_pixels4_8_msa(uint8_t *src, ptrdiff_t stride, int height, int log2_denom, int weight_src, int offset)
#define MUL2(in0, in1, in2, in3, out0, out1)
void ff_h264_h_lpf_chroma_inter_msa(uint8_t *data, int img_width, int alpha, int beta, int8_t *tc)
static const uint8_t q1[256]
#define AVC_LPF_H_2BYTE_CHROMA_422(src, stride, tc_val, alpha, beta, res)
static void avc_wgt_8x16_msa(uint8_t *data, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t offset_in)
#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride)
#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7,in8, in9, in10, in11, in12, in13, in14, in15,out0, out1, out2, out3, out4, out5, out6, out7)
static void avc_biwgt_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t dst_weight, int32_t offset_in)
#define SRA_4V(in0, in1, in2, in3, shift)
void ff_h264_h_lpf_luma_intra_msa(uint8_t *data, int img_width, int alpha, int beta)
#define XORI_B4_128_UB(...)
static void avc_wgt_4x8_msa(uint8_t *data, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t offset_in)
#define LD4(psrc, stride, out0, out1, out2, out3)
static void avc_wgt_8x8_msa(uint8_t *data, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t offset_in)
#define UNPCK_UB_SH(in, out0, out1)
void ff_h264_v_lpf_chroma_inter_msa(uint8_t *data, int img_width, int alpha, int beta, int8_t *tc)
void ff_weight_h264_pixels16_8_msa(uint8_t *src, ptrdiff_t stride, int height, int log2_denom, int weight_src, int offset_in)
void ff_h264_v_lpf_luma_intra_msa(uint8_t *data, int img_width, int alpha, int beta)
#define SLDI_B4_0_SB(...)
static void avc_loopfilter_luma_inter_edge_hor_msa(uint8_t *data, uint8_t bs0, uint8_t bs1, uint8_t bs2, uint8_t bs3, uint8_t tc0, uint8_t tc1, uint8_t tc2, uint8_t tc3, uint8_t alpha_in, uint8_t beta_in, uint32_t image_width)
#define CLIP_SH_0_255(in)
#define XORI_B2_128_UB(...)
#define AVC_LPF_P0_OR_Q0(p0_or_q0_org_in, q1_or_p1_org_in,p1_or_q1_org_in, p0_or_q0_out)
static void avc_loopfilter_luma_intra_edge_ver_msa(uint8_t *data, uint8_t alpha_in, uint8_t beta_in, uint32_t img_width)
#define CLIP_SH2_0_255(in0, in1)
static void avc_loopfilter_luma_intra_edge_hor_msa(uint8_t *data, uint8_t alpha_in, uint8_t beta_in, uint32_t img_width)
void ff_h264_h_lpf_chroma_intra_msa(uint8_t *data, int img_width, int alpha, int beta)
#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3)
#define INSERT_W2_UB(...)
static void avc_wgt_4x2_msa(uint8_t *data, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t offset_in)
#define LW2(psrc, stride, out0, out1)
void ff_h264_h_loop_filter_chroma422_msa(uint8_t *src, int32_t ystride, int32_t alpha, int32_t beta, int8_t *tc0)
static const uint8_t offset[127][2]
static const uint8_t q0[256]
#define CLIP_SH4_0_255(in0, in1, in2, in3)
#define SLDI_B2_0_UB(...)
#define TRANSPOSE8x4_UB_UB(...)
static void avc_loopfilter_cb_or_cr_inter_edge_hor_msa(uint8_t *data, uint8_t bs0, uint8_t bs1, uint8_t bs2, uint8_t bs3, uint8_t tc0, uint8_t tc1, uint8_t tc2, uint8_t tc3, uint8_t alpha_in, uint8_t beta_in, uint32_t img_width)
static void avc_biwgt_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t dst_weight, int32_t offset_in)
static void avc_loopfilter_luma_inter_edge_ver_msa(uint8_t *data, uint8_t bs0, uint8_t bs1, uint8_t bs2, uint8_t bs3, uint8_t tc0, uint8_t tc1, uint8_t tc2, uint8_t tc3, uint8_t alpha_in, uint8_t beta_in, uint32_t img_width)
void ff_biweight_h264_pixels4_8_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int log2_denom, int weight_dst, int weight_src, int offset)
#define AVC_LPF_P0Q0(q0_or_p0_org_in, p0_or_q0_org_in,p1_or_q1_org_in, q1_or_p1_org_in,negate_threshold_in, threshold_in,p0_or_q0_out, q0_or_p0_out)
#define ST2x4_UB(in, stidx, pdst, stride)
void ff_h264_h_loop_filter_luma_mbaff_intra_msa(uint8_t *src, int32_t ystride, int32_t alpha, int32_t beta)
#define INSERT_W4_UB(...)
void ff_biweight_h264_pixels8_8_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int log2_denom, int weight_dst, int weight_src, int offset)
void ff_h264_v_lpf_chroma_intra_msa(uint8_t *data, int img_width, int alpha, int beta)
static const int16_t alpha[]
#define AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_or_q3_org_in, p0_or_q0_org_in,q3_or_p3_org_in, p1_or_q1_org_in,p2_or_q2_org_in, q1_or_p1_org_in,p0_or_q0_out, p1_or_q1_out, p2_or_q2_out)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define CLIP_SH(in, min, max)
static void avc_wgt_4x4_msa(uint8_t *data, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t offset_in)
static void avc_biwgt_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t dst_weight, int32_t offset_in)
static void avc_biwgt_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t dst_weight, int32_t offset_in)
static void avc_h_loop_filter_luma_mbaff_intra_msa(uint8_t *src, int32_t stride, int32_t alpha_in, int32_t beta_in)
void ff_h264_v_lpf_luma_inter_msa(uint8_t *data, int img_width, int alpha, int beta, int8_t *tc)
void ff_h264_h_lpf_luma_inter_msa(uint8_t *data, int img_width, int alpha, int beta, int8_t *tc)
#define AVC_LPF_H_CHROMA_422(src, stride, tc_val, alpha, beta, res)
void ff_weight_h264_pixels8_8_msa(uint8_t *src, ptrdiff_t stride, int height, int log2_denom, int weight_src, int offset)
#define ST4x8_UB(in0, in1, pdst, stride)
#define INSERT_D2_UB(...)
GLint GLenum GLboolean GLsizei stride
#define LW4(psrc, stride, out0, out1, out2, out3)
#define ST8x4_UB(in0, in1, pdst, stride)
#define ST8x8_UB(in0, in1, in2, in3, pdst, stride)
void ff_h264_h_loop_filter_chroma422_mbaff_msa(uint8_t *src, int32_t ystride, int32_t alpha, int32_t beta, int8_t *tc0)
static void avc_h_loop_filter_chroma422_msa(uint8_t *src, int32_t stride, int32_t alpha_in, int32_t beta_in, int8_t *tc0)
static void avc_biwgt_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t dst_weight, int32_t offset_in)
void ff_biweight_h264_pixels16_8_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int log2_denom, int weight_dst, int weight_src, int offset_in)
#define ST4x2_UB(in, pdst, stride)
static void avc_biwgt_8x16_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t dst_weight, int32_t offset_in)
static void avc_wgt_8x4_msa(uint8_t *data, int32_t stride, int32_t log2_denom, int32_t src_weight, int32_t offset_in)