25 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
26 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12
32 v16u8 in0, in1, in2, in3, in4, in5, in6, in7;
33 v8i16 temp0, temp1, temp2;
34 v8i16 diff0, diff2, diff4, diff6, diff8;
35 v8i16 d0, a_d0, str_x2, str;
38 LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7);
42 temp0 = (v8i16) __msa_ilvr_b((v16i8) in0, (v16i8) in1);
43 a_d0 = __msa_hsub_u_h((v16u8) temp0, (v16u8) temp0);
44 temp2 = (v8i16) __msa_ilvr_b((v16i8) in2, (v16i8) in3);
45 temp2 = __msa_hsub_u_h((v16u8) temp2, (v16u8) temp2);
48 diff2 = -(-diff0 >> 3);
49 str_x2 = __msa_fill_h(-(strength << 1));
50 temp0 = (str_x2 <= diff2);
51 diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) temp0, (v16u8) temp0);
52 temp2 = str_x2 - diff2;
53 str = __msa_fill_h(-strength);
54 temp0 = (diff2 < str);
55 diff2 = (v8i16) __msa_bmnz_v((v16u8) diff2, (v16u8) temp2, (v16u8) temp0);
57 str_x2 = __msa_fill_h(strength << 1);
58 temp0 = (diff4 <= str_x2);
59 diff4 = (v8i16) __msa_bmz_v((v16u8) diff4, (v16u8) temp0, (v16u8) temp0);
60 temp2 = str_x2 - diff4;
61 str = __msa_fill_h(strength);
62 temp0 = (str < diff4);
63 diff4 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) temp2, (v16u8) temp0);
64 temp0 = __msa_clti_s_h(diff0, 0);
65 d0 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
68 diff8 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
72 temp0 = (diff6 < temp2);
73 diff6 = (v8i16) __msa_bmnz_v((v16u8) diff6, (v16u8) temp2, (v16u8) temp0);
75 temp0 = (diff2 <= diff8);
76 diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) diff8, (v16u8) temp0);
77 temp0 = __msa_clti_s_h(a_d0, 0);
78 diff6 = (v8i16) __msa_bmz_v((v16u8) diff6, (v16u8) diff2, (v16u8) temp0);
80 in0 = (v16u8) ((v16i8) in0 - (v16i8) diff6);
81 in1 = (v16u8) ((v16i8) in1 + (v16i8) diff6);
82 in3 = __msa_xori_b(in3, 128);
83 in3 = (v16u8) __msa_adds_s_b((v16i8) in3, (v16i8) d0);
84 in3 = __msa_xori_b(in3, 128);
85 in2 = __msa_subsus_u_b(in2, (v16i8) d0);
87 in0 = (v16u8) __msa_ilvr_h(temp1, temp0);
88 in3 = (v16u8) __msa_ilvl_h(temp1, temp0);
89 ST4x4_UB(in0, in0, 0, 1, 2, 3, src, stride);
91 ST4x4_UB(in3, in3, 0, 1, 2, 3, src, stride);
98 uint64_t res0, res1, res2, res3;
99 v16u8 in0, in1, in2, in3;
100 v8i16 temp0, temp2, diff0, diff2, diff4, diff6, diff8;
101 v8i16 d0, a_d0, str_x2, str;
104 LD_UB4(src, stride, in0, in3, in2, in1);
105 temp0 = (v8i16) __msa_ilvr_b((v16i8) in0, (v16i8) in1);
106 a_d0 = __msa_hsub_u_h((v16u8) temp0, (v16u8) temp0);
107 temp2 = (v8i16) __msa_ilvr_b((v16i8) in2, (v16i8) in3);
108 temp2 = __msa_hsub_u_h((v16u8) temp2, (v16u8) temp2);
110 diff0 = a_d0 + temp2;
111 diff2 = -(-diff0 >> 3);
112 str_x2 = __msa_fill_h(-(strength << 1));
113 temp0 = (str_x2 <= diff2);
114 diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) temp0, (v16u8) temp0);
115 temp2 = str_x2 - diff2;
116 str = __msa_fill_h(-strength);
117 temp0 = (diff2 < str);
118 diff2 = (v8i16) __msa_bmnz_v((v16u8) diff2, (v16u8) temp2, (v16u8) temp0);
120 str_x2 = __msa_fill_h(strength << 1);
121 temp0 = (diff4 <= str_x2);
122 diff4 = (v8i16) __msa_bmz_v((v16u8) diff4, (v16u8) temp0, (v16u8) temp0);
123 temp2 = str_x2 - diff4;
124 str = __msa_fill_h(strength);
125 temp0 = (str < diff4);
126 diff4 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) temp2, (v16u8) temp0);
127 temp0 = __msa_clti_s_h(diff0, 0);
128 d0 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
131 diff8 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
132 diff6 = (-a_d0) >> 2;
135 temp0 = (diff6 < temp2);
136 diff6 = (v8i16) __msa_bmnz_v((v16u8) diff6, (v16u8) temp2, (v16u8) temp0);
138 temp0 = (diff2 <= diff8);
139 diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) diff8, (v16u8) temp0);
140 temp0 = __msa_clti_s_h(a_d0, 0);
141 diff6 = (v8i16) __msa_bmz_v((v16u8) diff6, (v16u8) diff2, (v16u8) temp0);
143 in0 = (v16u8) ((v16i8) in0 - (v16i8) diff6);
144 in1 = (v16u8) ((v16i8) in1 + (v16i8) diff6);
145 in3 = __msa_xori_b(in3, 128);
146 in3 = (v16u8) __msa_adds_s_b((v16i8) in3, (v16i8) d0);
147 in3 = __msa_xori_b(in3, 128);
148 in2 = __msa_subsus_u_b(in2, (v16i8) d0);
149 res0 = __msa_copy_u_d((v2i64) in0, 0);
150 res1 = __msa_copy_u_d((v2i64) in3, 0);
151 res2 = __msa_copy_u_d((v2i64) in2, 0);
152 res3 = __msa_copy_u_d((v2i64) in1, 0);
153 SD4(res0, res1, res2, res3, src, stride);
void ff_h263_v_loop_filter_msa(uint8_t *src, int32_t stride, int32_t q_scale)
#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride)
static void h263_v_loop_filter_msa(uint8_t *src, int32_t stride, int32_t qscale)
void ff_h263_h_loop_filter_msa(uint8_t *src, int32_t stride, int32_t q_scale)
#define TRANSPOSE8x4_UB_UB(...)
#define SD4(in0, in1, in2, in3, pdst, stride)
static void h263_h_loop_filter_msa(uint8_t *src, int32_t stride, int32_t qscale)
static const uint8_t h263_loop_filter_strength_msa[32]
GLint GLenum GLboolean GLsizei stride