FFmpeg
hevc_mc_uni_lsx.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022 Loongson Technology Corporation Limited
3  * Contributed by Lu Wang <wanglu@loongson.cn>
4  * Hao Chen <chenhao@loongson.cn>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
24 #include "hevcdsp_lsx.h"
25 
26 static const uint8_t ff_hevc_mask_arr[16 * 3] __attribute__((aligned(0x40))) = {
27  /* 8 width cases */
28  0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
29  /* 4 width cases */
30  0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
31  /* 4 width cases */
32  8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
33 };
34 
35 static av_always_inline
36 void common_hz_8t_64w_lsx(const uint8_t *src, int32_t src_stride,
37  uint8_t *dst, int32_t dst_stride,
38  const int8_t *filter, int32_t height)
39 {
40  int32_t loop_cnt;
41  __m128i mask0, mask1, mask2, mask3, out1, out2;
42  __m128i src0, src1, src2, src3, src4, src5, src6, src7;
43  __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
44  __m128i filt0, filt1, filt2, filt3;
45  __m128i res0, res1, res2, res3;
46 
47  mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
48  src -= 3;
49 
50  /* rearranging filter */
51  DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
52  filt0, filt1, filt2, filt3);
53 
54  DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
55  mask3 = __lsx_vaddi_bu(mask0, 6);
56 
57  for (loop_cnt = height; loop_cnt--;) {
58  DUP4_ARG2(__lsx_vld, src, 0, src, 8, src, 16, src, 24,
59  src0, src1, src2, src3);
60  DUP4_ARG2(__lsx_vld, src, 32, src, 40, src, 48, src, 56,
61  src4, src5, src6, src7);
62  src += src_stride;
63 
64  DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
65  vec0, vec1);
66  DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0,
67  vec2, vec3);
68  DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
69  vec3, filt0, res0, res1, res2, res3);
70  DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
71  vec0, vec1);
72  DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask2, src3, src3, mask2,
73  vec2, vec3);
74  DUP4_ARG3(__lsx_vdp2add_h_bu_b, res0, vec0, filt2, res1, vec1, filt2,
75  res2, vec2, filt2, res3, vec3, filt2, res0, res1, res2, res3);
76  DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
77  vec4, vec5);
78  DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1,
79  vec6, vec7);
80  DUP4_ARG3(__lsx_vdp2add_h_bu_b, res0, vec4, filt1, res1, vec5, filt1,
81  res2, vec6, filt1, res3, vec7, filt1, res0, res1, res2, res3);
82  DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
83  vec4, vec5);
84  DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask3, src3, src3, mask3,
85  vec6, vec7);
86  DUP4_ARG3(__lsx_vdp2add_h_bu_b, res0, vec4, filt3, res1, vec5, filt3,
87  res2, vec6, filt3, res3, vec7, filt3, res0, res1, res2, res3);
88 
89  DUP2_ARG3(__lsx_vssrarni_bu_h, res1, res0, 6, res3, res2, 6,
90  out1, out2);
91  __lsx_vst(out1, dst, 0);
92  __lsx_vst(out2, dst, 16);
93 
94  DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src5, src5, mask0,
95  vec0, vec1);
96  DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src7, src7, mask0,
97  vec2, vec3);
98  DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
99  vec3, filt0, res0, res1, res2, res3);
100  DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask2, src5, src5, mask2,
101  vec0, vec1);
102  DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask2, src7, src7, mask2,
103  vec2, vec3);
104  DUP4_ARG3(__lsx_vdp2add_h_bu_b, res0, vec0, filt2, res1, vec1, filt2,
105  res2, vec2, filt2, res3, vec3, filt2, res0, res1, res2, res3);
106  DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask1, src5, src5, mask1,
107  vec4, vec5);
108  DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask1, src7, src7, mask1,
109  vec6, vec7);
110  DUP4_ARG3(__lsx_vdp2add_h_bu_b, res0, vec4, filt1, res1, vec5, filt1,
111  res2, vec6, filt1, res3, vec7, filt1, res0, res1, res2, res3);
112  DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask3, src5, src5, mask3,
113  vec4, vec5);
114  DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask3, src7, src7, mask3,
115  vec6, vec7);
116  DUP4_ARG3(__lsx_vdp2add_h_bu_b, res0, vec4, filt3, res1, vec5, filt3,
117  res2, vec6, filt3, res3, vec7, filt3, res0, res1, res2, res3);
118 
119  DUP2_ARG3(__lsx_vssrarni_bu_h, res1, res0, 6, res3, res2, 6,
120  out1, out2);
121  __lsx_vst(out1, dst, 32);
122  __lsx_vst(out2, dst, 48);
123  dst += dst_stride;
124  }
125 }
126 
127 static av_always_inline
128 void common_vt_8t_8w_lsx(const uint8_t *src, int32_t src_stride,
129  uint8_t *dst, int32_t dst_stride,
130  const int8_t *filter, int32_t height)
131 {
132  uint32_t loop_cnt;
133  int32_t src_stride_2x = (src_stride << 1);
134  int32_t dst_stride_2x = (dst_stride << 1);
135  int32_t src_stride_4x = (src_stride << 2);
136  int32_t dst_stride_4x = (dst_stride << 2);
137  int32_t src_stride_3x = src_stride_2x + src_stride;
138  int32_t dst_stride_3x = dst_stride_2x + dst_stride;
139 
140  __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
141  __m128i src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
142  __m128i src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3;
143  __m128i tmp0, tmp1;
144  __m128i out0_r, out1_r, out2_r, out3_r;
145 
146  src -= src_stride_3x;
147  DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
148  filt0, filt1, filt2, filt3);
149 
150  src0 = __lsx_vld(src, 0);
151  DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
152  src3 = __lsx_vldx(src, src_stride_3x);
153  src += src_stride_4x;
154  src4 = __lsx_vld(src, 0);
155  DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
156  src += src_stride_3x;
157  DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
158  src10_r, src32_r, src54_r, src21_r);
159  DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, src43_r, src65_r);
160 
161  for (loop_cnt = (height >> 2); loop_cnt--;) {
162  src7 = __lsx_vld(src, 0);
163  DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src8, src9);
164  src10 = __lsx_vldx(src, src_stride_3x);
165  src += src_stride_4x;
166 
167  DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10,
168  src9, src76_r, src87_r, src98_r, src109_r);
169  DUP4_ARG2(__lsx_vdp2_h_bu_b, src10_r, filt0, src21_r, filt0, src32_r,
170  filt0, src43_r, filt0, out0_r, out1_r, out2_r, out3_r);
171  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_r, src32_r, filt1, out1_r,
172  src43_r, filt1, out2_r, src54_r, filt1, out3_r, src65_r,
173  filt1, out0_r, out1_r, out2_r, out3_r);
174  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_r, src54_r, filt2, out1_r,
175  src65_r, filt2, out2_r, src76_r, filt2, out3_r, src87_r,
176  filt2, out0_r, out1_r, out2_r, out3_r);
177  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_r, src76_r, filt3, out1_r,
178  src87_r, filt3, out2_r, src98_r, filt3, out3_r, src109_r,
179  filt3, out0_r, out1_r, out2_r, out3_r);
180 
181  DUP2_ARG3(__lsx_vssrarni_bu_h, out1_r, out0_r, 6, out3_r, out2_r, 6,
182  tmp0, tmp1)
183  __lsx_vstelm_d(tmp0, dst, 0, 0);
184  __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1);
185  __lsx_vstelm_d(tmp1, dst + dst_stride_2x, 0, 0);
186  __lsx_vstelm_d(tmp1, dst + dst_stride_3x, 0, 1);
187  dst += dst_stride_4x;
188 
189  src10_r = src54_r;
190  src32_r = src76_r;
191  src54_r = src98_r;
192  src21_r = src65_r;
193  src43_r = src87_r;
194  src65_r = src109_r;
195  src6 = src10;
196  }
197 }
198 
199 static av_always_inline
200 void common_vt_8t_16w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
201  int32_t dst_stride, const int8_t *filter,
203 {
204  const uint8_t *src_tmp;
205  uint8_t *dst_tmp;
206  uint32_t loop_cnt, cnt;
207  const int32_t src_stride_2x = (src_stride << 1);
208  const int32_t dst_stride_2x = (dst_stride << 1);
209  const int32_t src_stride_4x = (src_stride << 2);
210  const int32_t dst_stride_4x = (dst_stride << 2);
211  const int32_t src_stride_3x = src_stride_2x + src_stride;
212  const int32_t dst_stride_3x = dst_stride_2x + dst_stride;
213 
214  __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
215  __m128i filt0, filt1, filt2, filt3;
216  __m128i src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
217  __m128i src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l;
218  __m128i src98_l, src21_l, src43_l, src65_l, src87_l, src109_l;
219  __m128i tmp0, tmp1, tmp2, tmp3;
220  __m128i out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
221 
222  src -= src_stride_3x;
223  DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6, filt0,
224  filt1, filt2, filt3);
225 
226  for (cnt = (width >> 4); cnt--;) {
227  src_tmp = src;
228  dst_tmp = dst;
229 
230  src0 = __lsx_vld(src_tmp, 0);
231  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
232  src1, src2);
233  src3 = __lsx_vldx(src_tmp, src_stride_3x);
234  src_tmp += src_stride_4x;
235  src4 = __lsx_vld(src_tmp, 0);
236  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
237  src5, src6);
238  src_tmp += src_stride_3x;
239  DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
240  src10_r, src32_r, src54_r, src21_r);
241  DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, src43_r, src65_r);
242  DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2, src1,
243  src10_l, src32_l, src54_l, src21_l);
244  DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, src43_l, src65_l);
245 
246  for (loop_cnt = (height >> 2); loop_cnt--;) {
247  src7 = __lsx_vld(src_tmp, 0);
248  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
249  src8, src9);
250  src10 = __lsx_vldx(src_tmp, src_stride_3x);
251  src_tmp += src_stride_4x;
252  DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10,
253  src9, src76_r, src87_r, src98_r, src109_r);
254  DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8, src10,
255  src9, src76_l, src87_l, src98_l, src109_l);
256  DUP4_ARG2(__lsx_vdp2_h_bu_b, src10_r, filt0, src21_r, filt0, src32_r,
257  filt0, src43_r, filt0, out0_r, out1_r, out2_r, out3_r);
258  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_r, src32_r, filt1, out1_r,
259  src43_r, filt1, out2_r, src54_r, filt1, out3_r, src65_r,
260  filt1, out0_r, out1_r, out2_r, out3_r);
261  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_r, src54_r, filt2, out1_r,
262  src65_r, filt2, out2_r, src76_r, filt2, out3_r, src87_r,
263  filt2, out0_r, out1_r, out2_r, out3_r);
264  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_r, src76_r, filt3, out1_r,
265  src87_r, filt3, out2_r, src98_r, filt3, out3_r, src109_r,
266  filt3, out0_r, out1_r, out2_r, out3_r);
267  DUP4_ARG2(__lsx_vdp2_h_bu_b, src10_l, filt0, src21_l, filt0, src32_l,
268  filt0, src43_l, filt0, out0_l, out1_l, out2_l, out3_l);
269  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_l, src32_l, filt1, out1_l,
270  src43_l, filt1, out2_l, src54_l, filt1, out3_l, src65_l,
271  filt1, out0_l, out1_l, out2_l, out3_l);
272  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_l, src54_l, filt2, out1_l,
273  src65_l, filt2, out2_l, src76_l, filt2, out3_l, src87_l,
274  filt2, out0_l, out1_l, out2_l, out3_l);
275  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_l, src76_l, filt3, out1_l,
276  src87_l, filt3, out2_l, src98_l, filt3, out3_l, src109_l,
277  filt3, out0_l, out1_l, out2_l, out3_l);
278  DUP4_ARG3(__lsx_vssrarni_bu_h, out0_l, out0_r, 6, out1_l, out1_r,
279  6, out2_l, out2_r, 6, out3_l, out3_r, 6,
280  tmp0, tmp1, tmp2, tmp3);
281  __lsx_vst(tmp0, dst_tmp, 0);
282  __lsx_vstx(tmp1, dst_tmp, dst_stride);
283  __lsx_vstx(tmp2, dst_tmp, dst_stride_2x);
284  __lsx_vstx(tmp3, dst_tmp, dst_stride_3x);
285  dst_tmp += dst_stride_4x;
286 
287  src10_r = src54_r;
288  src32_r = src76_r;
289  src54_r = src98_r;
290  src21_r = src65_r;
291  src43_r = src87_r;
292  src65_r = src109_r;
293  src10_l = src54_l;
294  src32_l = src76_l;
295  src54_l = src98_l;
296  src21_l = src65_l;
297  src43_l = src87_l;
298  src65_l = src109_l;
299  src6 = src10;
300  }
301 
302  src += 16;
303  dst += 16;
304  }
305 }
306 
307 static void common_vt_8t_24w_lsx(const uint8_t *src, int32_t src_stride,
308  uint8_t *dst, int32_t dst_stride,
309  const int8_t *filter, int32_t height)
310 {
311  common_vt_8t_16w_lsx(src, src_stride, dst, dst_stride, filter, height, 16);
312  common_vt_8t_8w_lsx(src + 16, src_stride, dst + 16, dst_stride, filter,
313  height);
314 }
315 
316 static void common_vt_8t_32w_lsx(const uint8_t *src, int32_t src_stride,
317  uint8_t *dst, int32_t dst_stride,
318  const int8_t *filter, int32_t height)
319 {
320  common_vt_8t_16w_lsx(src, src_stride, dst, dst_stride, filter, height, 32);
321 }
322 
323 static void common_vt_8t_48w_lsx(const uint8_t *src, int32_t src_stride,
324  uint8_t *dst, int32_t dst_stride,
325  const int8_t *filter, int32_t height)
326 {
327  common_vt_8t_16w_lsx(src, src_stride, dst, dst_stride, filter, height, 48);
328 }
329 
330 static void common_vt_8t_64w_lsx(const uint8_t *src, int32_t src_stride,
331  uint8_t *dst, int32_t dst_stride,
332  const int8_t *filter, int32_t height)
333 {
334  common_vt_8t_16w_lsx(src, src_stride, dst, dst_stride, filter, height, 64);
335 }
336 
337 static av_always_inline
338 void hevc_hv_8t_8x2_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
339  int32_t dst_stride, const int8_t *filter_x,
340  const int8_t *filter_y, int32_t height, int32_t width)
341 {
342  uint32_t loop_cnt, cnt;
343  const uint8_t *src_tmp;
344  uint8_t *dst_tmp;
345  const int32_t src_stride_2x = (src_stride << 1);
346  const int32_t dst_stride_2x = (dst_stride << 1);
347  const int32_t src_stride_4x = (src_stride << 2);
348  const int32_t src_stride_3x = src_stride_2x + src_stride;
349 
350  __m128i out;
351  __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
352  __m128i filt0, filt1, filt2, filt3;
353  __m128i filt_h0, filt_h1, filt_h2, filt_h3;
354  __m128i mask1, mask2, mask3;
355  __m128i filter_vec;
356  __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
357  __m128i vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
358  __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
359  __m128i dst0_r, dst0_l, dst1_r, dst1_l;
360  __m128i dst10_r, dst32_r, dst54_r, dst76_r;
361  __m128i dst10_l, dst32_l, dst54_l, dst76_l;
362  __m128i dst21_r, dst43_r, dst65_r, dst87_r;
363  __m128i dst21_l, dst43_l, dst65_l, dst87_l;
364  __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
365 
366  src -= (src_stride_3x + 3);
367  DUP4_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filter_x, 4,
368  filter_x, 6, filt0, filt1, filt2, filt3);
369 
370  filter_vec = __lsx_vld(filter_y, 0);
371  filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
372  DUP4_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filter_vec, 2,
373  filter_vec, 3, filt_h0, filt_h1, filt_h2, filt_h3);
374 
375  DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
376  mask3 = __lsx_vaddi_bu(mask0, 6);
377 
378  for (cnt = width >> 3; cnt--;) {
379  src_tmp = src;
380  dst_tmp = dst;
381 
382  src0 = __lsx_vld(src_tmp, 0);
383  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
384  src1, src2);
385  src3 = __lsx_vldx(src_tmp, src_stride_3x);
386  src_tmp += src_stride_4x;
387  src4 = __lsx_vld(src_tmp, 0);
388  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
389  src5, src6);
390  src_tmp += src_stride_3x;
391 
392  /* row 0 row 1 row 2 row 3 */
393  DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src0,
394  src0, mask2, src0, src0, mask3, vec0, vec1, vec2, vec3);
395  DUP4_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, src1,
396  src1, mask2, src1, src1, mask3, vec4, vec5, vec6, vec7);
397  DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, src2,
398  src2, mask2, src2, src2, mask3, vec8, vec9, vec10, vec11);
399  DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src3,
400  src3, mask2, src3, src3, mask3, vec12, vec13, vec14, vec15);
401  DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec4, filt0, vec8, filt0,
402  vec12, filt0, dst0, dst1, dst2, dst3);
403  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec5, filt1,
404  dst2, vec9, filt1, dst3, vec13, filt1, dst0, dst1, dst2, dst3);
405  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec2, filt2, dst1, vec6, filt2,
406  dst2, vec10, filt2, dst3, vec14, filt2, dst0, dst1, dst2, dst3);
407  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec3, filt3, dst1, vec7, filt3,
408  dst2, vec11, filt3, dst3, vec15, filt3, dst0, dst1, dst2, dst3);
409 
410  DUP4_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, src4,
411  src4, mask2, src4, src4, mask3, vec0, vec1, vec2, vec3);
412  DUP4_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, src5,
413  src5, mask2, src5, src5, mask3, vec4, vec5, vec6, vec7);
414  DUP4_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, src6,
415  src6, mask2, src6, src6, mask3, vec8, vec9, vec10, vec11);
416  DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec4, filt0, dst4, dst5);
417  dst6 = __lsx_vdp2_h_bu_b(vec8, filt0);
418  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec1, filt1, dst5, vec5, filt1,
419  dst6, vec9, filt1, dst4, vec2, filt2, dst4, dst5, dst6, dst4);
420  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst5, vec6, filt2, dst6, vec10, filt2,
421  dst4, vec3, filt3, dst5, vec7, filt3, dst5, dst6, dst4, dst5);
422  dst6 = __lsx_vdp2add_h_bu_b(dst6, vec11, filt3);
423  DUP4_ARG2(__lsx_vilvl_h, dst1, dst0, dst3, dst2, dst5, dst4, dst2,
424  dst1, dst10_r, dst32_r, dst54_r, dst21_r);
425  DUP4_ARG2(__lsx_vilvh_h, dst1, dst0, dst3, dst2, dst5, dst4, dst2,
426  dst1, dst10_l, dst32_l, dst54_l, dst21_l);
427  DUP2_ARG2(__lsx_vilvl_h, dst4, dst3, dst6, dst5, dst43_r, dst65_r);
428  DUP2_ARG2(__lsx_vilvh_h, dst4, dst3, dst6, dst5, dst43_l, dst65_l);
429 
430  for (loop_cnt = height >> 1; loop_cnt--;) {
431  src7 = __lsx_vld(src_tmp, 0);
432  src8 = __lsx_vldx(src_tmp, src_stride);
433  src_tmp += src_stride_2x;
434 
435  DUP4_ARG3(__lsx_vshuf_b, src7, src7, mask0, src7, src7, mask1, src7,
436  src7, mask2, src7, src7, mask3, vec0, vec1, vec2, vec3);
437  dst7 = __lsx_vdp2_h_bu_b(vec0, filt0);
438  DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst7, vec1, filt1, dst7, vec2,
439  filt2, dst7, dst7);
440  dst7 = __lsx_vdp2add_h_bu_b(dst7, vec3, filt3);
441  dst76_r = __lsx_vilvl_h(dst7, dst6);
442  dst76_l = __lsx_vilvh_h(dst7, dst6);
443  DUP2_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0,
444  dst0_r, dst0_l);
445  DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l,
446  dst32_l, filt_h1, dst0_r, dst54_r, filt_h2, dst0_l,
447  dst54_l, filt_h2, dst0_r, dst0_l, dst0_r, dst0_l);
448  DUP2_ARG3(__lsx_vdp2add_w_h, dst0_r, dst76_r, filt_h3, dst0_l,
449  dst76_l, filt_h3, dst0_r, dst0_l);
450  DUP2_ARG2(__lsx_vsrai_w, dst0_r, 6, dst0_l, 6, dst0_r, dst0_l);
451 
452  DUP4_ARG3(__lsx_vshuf_b, src8, src8, mask0, src8, src8, mask1, src8,
453  src8, mask2, src8, src8, mask3, vec0, vec1, vec2, vec3);
454  dst8 = __lsx_vdp2_h_bu_b(vec0, filt0);
455  DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst8, vec1, filt1, dst8, vec2,
456  filt2, dst8, dst8);
457  dst8 = __lsx_vdp2add_h_bu_b(dst8, vec3, filt3);
458 
459  dst87_r = __lsx_vilvl_h(dst8, dst7);
460  dst87_l = __lsx_vilvh_h(dst8, dst7);
461  DUP2_ARG2(__lsx_vdp2_w_h, dst21_r, filt_h0, dst21_l, filt_h0,
462  dst1_r, dst1_l);
463  DUP4_ARG3(__lsx_vdp2add_w_h, dst1_r, dst43_r, filt_h1, dst1_l,
464  dst43_l, filt_h1, dst1_r, dst65_r, filt_h2, dst1_l,
465  dst65_l, filt_h2, dst1_r, dst1_l, dst1_r, dst1_l);
466  DUP2_ARG3(__lsx_vdp2add_w_h, dst1_r, dst87_r, filt_h3, dst1_l,
467  dst87_l, filt_h3, dst1_r, dst1_l);
468  DUP2_ARG2(__lsx_vsrai_w, dst1_r, 6, dst1_l, 6, dst1_r, dst1_l);
469  DUP4_ARG2(__lsx_vsrari_w, dst0_r, 6, dst0_l, 6,dst1_r, 6, dst1_l,
470  6, dst0_r, dst0_l, dst1_r, dst1_l);
471  DUP4_ARG1(__lsx_vclip255_w, dst0_l, dst0_r, dst1_l, dst1_r,
472  dst0_l, dst0_r, dst1_l, dst1_r);
473  DUP2_ARG2(__lsx_vpickev_h, dst0_l, dst0_r, dst1_l, dst1_r,
474  dst0, dst1);
475  out = __lsx_vpickev_b(dst1, dst0);
476  __lsx_vstelm_d(out, dst_tmp, 0, 0);
477  __lsx_vstelm_d(out, dst_tmp + dst_stride, 0, 1);
478  dst_tmp += dst_stride_2x;
479 
480  dst10_r = dst32_r;
481  dst32_r = dst54_r;
482  dst54_r = dst76_r;
483  dst10_l = dst32_l;
484  dst32_l = dst54_l;
485  dst54_l = dst76_l;
486  dst21_r = dst43_r;
487  dst43_r = dst65_r;
488  dst65_r = dst87_r;
489  dst21_l = dst43_l;
490  dst43_l = dst65_l;
491  dst65_l = dst87_l;
492  dst6 = dst8;
493  }
494  src += 8;
495  dst += 8;
496  }
497 }
498 
499 static void hevc_hv_8t_8w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
500  int32_t dst_stride, const int8_t *filter_x,
501  const int8_t *filter_y, int32_t height)
502 {
503  hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride,
504  filter_x, filter_y, height, 8);
505 }
506 
507 static void hevc_hv_8t_16w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
508  int32_t dst_stride, const int8_t *filter_x,
509  const int8_t *filter_y, int32_t height)
510 {
511  hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride,
512  filter_x, filter_y, height, 16);
513 }
514 
515 static void hevc_hv_8t_24w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
516  int32_t dst_stride, const int8_t *filter_x,
517  const int8_t *filter_y, int32_t height)
518 {
519  hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride,
520  filter_x, filter_y, height, 24);
521 }
522 
523 static void hevc_hv_8t_32w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
524  int32_t dst_stride, const int8_t *filter_x,
525  const int8_t *filter_y, int32_t height)
526 {
527  hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride,
528  filter_x, filter_y, height, 32);
529 }
530 
531 static void hevc_hv_8t_48w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
532  int32_t dst_stride, const int8_t *filter_x,
533  const int8_t *filter_y, int32_t height)
534 {
535  hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride,
536  filter_x, filter_y, height, 48);
537 }
538 
539 static void hevc_hv_8t_64w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
540  int32_t dst_stride, const int8_t *filter_x,
541  const int8_t *filter_y, int32_t height)
542 {
543  hevc_hv_8t_8x2_lsx(src, src_stride, dst, dst_stride,
544  filter_x, filter_y, height, 64);
545 }
546 
547 static av_always_inline
548 void common_vt_2t_24w_lsx(const uint8_t *src, int32_t src_stride,
549  uint8_t *dst, int32_t dst_stride,
550  const int8_t *filter, int32_t height)
551 {
552  uint32_t loop_cnt;
553  int32_t src_stride_2x = (src_stride << 1);
554  int32_t src_stride_3x = src_stride_2x + src_stride;
555  const uint8_t *_src;
556 
557  __m128i src0, src1, src2, src3, src4, src6, src7, src8, src9, src10;
558  __m128i filt0, filt1;
559  __m128i src10_r, src32_r, src76_r, src98_r, src21_r, src43_r, src87_r;
560  __m128i src109_r, src10_l, src32_l, src21_l, src43_l;
561  __m128i out0_r, out1_r, out2_r, out3_r, out0_l, out1_l;
562  __m128i out1, out2, out3, out4;
563 
564  src -= src_stride;
565  DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
566  _src = src + 16;
567 
568  /* 16 width */
569  src0 = __lsx_vld(src, 0);
570  DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
571  DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r);
572  DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_l, src21_l);
573 
574  /* 8 width */
575  src6 = __lsx_vld(_src, 0);
576  DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride_2x, src7, src8);
577  src += src_stride_3x;
578  _src += src_stride_3x;
579  DUP2_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src76_r, src87_r);
580 
581  for (loop_cnt = height >> 1; loop_cnt--;) {
582  /* 16 width */
583  DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src3, src9);
584  DUP2_ARG2(__lsx_vldx, src, src_stride, _src, src_stride, src4, src10);
585  DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src32_r, src43_r);
586  DUP2_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, src32_l, src43_l);
587 
588  /* 8 width */
589  src += src_stride_2x;
590  _src += src_stride_2x;
591  DUP2_ARG2(__lsx_vilvl_b, src9, src8, src10, src9, src98_r, src109_r);
592 
593  /* 16 width */
594  DUP4_ARG2(__lsx_vdp2_h_bu_b, src10_r, filt0, src10_l, filt0, src21_r,
595  filt0, src21_l, filt0, out0_r, out0_l, out1_r, out1_l);
596  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_r, src32_r, filt1, out0_l, src32_l,
597  filt1, out1_r, src43_r, filt1, out1_l, src43_l, filt1,
598  out0_r, out0_l, out1_r, out1_l);
599 
600  /* 8 width */
601  DUP2_ARG2(__lsx_vdp2_h_bu_b, src76_r, filt0, src87_r, filt0,
602  out2_r, out3_r);
603  DUP2_ARG3(__lsx_vdp2add_h_bu_b, out2_r, src98_r, filt1, out3_r,
604  src109_r, filt1, out2_r, out3_r);
605 
606  /* 16 + 8 width */
607  DUP4_ARG3(__lsx_vssrarni_bu_h, out0_l, out0_r, 6, out2_r, out2_r, 6,
608  out1_l, out1_r, 6, out3_r, out3_r, 6, out1, out2, out3, out4);
609  __lsx_vst(out1, dst, 0);
610  __lsx_vstelm_d(out2, dst, 16, 0);
611  dst += dst_stride;
612  __lsx_vst(out3, dst, 0);
613  __lsx_vstelm_d(out4, dst, 16, 0);
614  dst += dst_stride;
615 
616  src10_r = src32_r;
617  src21_r = src43_r;
618  src10_l = src32_l;
619  src21_l = src43_l;
620  src2 = src4;
621  src76_r = src98_r;
622  src87_r = src109_r;
623  src8 = src10;
624  }
625 }
626 
627 static av_always_inline
628 void common_vt_2t_32w_lsx(const uint8_t *src, int32_t src_stride,
629  uint8_t *dst, int32_t dst_stride,
630  const int8_t *filter, int32_t height)
631 {
632  uint32_t loop_cnt;
633  int32_t src_stride_2x = (src_stride << 1);
634  int32_t dst_stride_2x = (dst_stride << 1);
635  int32_t src_stride_3x = src_stride_2x + src_stride;
636  const uint8_t *_src;
637 
638  __m128i src0, src1, src2, src3, src4, src6, src7, src8, src9, src10;
639  __m128i src10_r, src32_r, src76_r, src98_r;
640  __m128i src21_r, src43_r, src87_r, src109_r;
641  __m128i out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
642  __m128i src10_l, src32_l, src76_l, src98_l;
643  __m128i src21_l, src43_l, src87_l, src109_l;
644  __m128i filt0, filt1;
645  __m128i out1, out2;
646 
647  src -= src_stride;
648  DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
649  _src = src + 16;
650 
651  /* 16 width */
652  src0 = __lsx_vld(src, 0);
653  DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
654 
655  DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r);
656  DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_l, src21_l);
657 
658  /* next 16 width */
659  src6 = __lsx_vld(_src, 0);
660  DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride_2x, src7, src8);
661  src += src_stride_3x;
662  _src += src_stride_3x;
663 
664  DUP2_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src76_r, src87_r);
665  DUP2_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src76_l, src87_l);
666 
667  for (loop_cnt = (height >> 1); loop_cnt--;) {
668  /* 16 width */
669  DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src3, src9);
670  DUP2_ARG2(__lsx_vldx, src, src_stride, _src, src_stride, src4, src10);
671  DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src32_r, src43_r);
672  DUP2_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, src32_l, src43_l);
673 
674  /* 16 width */
675  DUP4_ARG2(__lsx_vdp2_h_bu_b, src10_r, filt0, src10_l, filt0, src21_r,
676  filt0, src21_l, filt0, out0_r, out0_l, out1_r, out1_l);
677  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_r, src32_r, filt1, out0_l, src32_l,
678  filt1, out1_r, src43_r, filt1, out1_l, src43_l, filt1,
679  out0_r, out0_l, out1_r, out1_l);
680 
681  DUP2_ARG3(__lsx_vssrarni_bu_h, out0_l, out0_r, 6, out1_l, out1_r, 6,
682  out1, out2);
683  __lsx_vst(out1, dst, 0);
684  __lsx_vstx(out2, dst, dst_stride);
685 
686  src10_r = src32_r;
687  src21_r = src43_r;
688  src10_l = src32_l;
689  src21_l = src43_l;
690  src2 = src4;
691 
692  /* next 16 width */
693  src += src_stride_2x;
694  _src += src_stride_2x;
695  DUP2_ARG2(__lsx_vilvl_b, src9, src8, src10, src9, src98_r, src109_r);
696  DUP2_ARG2(__lsx_vilvh_b, src9, src8, src10, src9, src98_l, src109_l);
697 
698  /* next 16 width */
699  DUP4_ARG2(__lsx_vdp2_h_bu_b, src76_r, filt0, src76_l, filt0, src87_r,
700  filt0, src87_l, filt0, out2_r, out2_l, out3_r, out3_l);
701  DUP4_ARG3(__lsx_vdp2add_h_bu_b, out2_r, src98_r, filt1, out2_l, src98_l,
702  filt1, out3_r, src109_r, filt1, out3_l, src109_l, filt1,
703  out2_r, out2_l, out3_r, out3_l);
704 
705  /* next 16 width */
706  DUP2_ARG3(__lsx_vssrarni_bu_h, out2_l, out2_r, 6, out3_l, out3_r, 6,
707  out1, out2);
708  __lsx_vst(out1, dst, 16);
709  __lsx_vst(out2, dst + dst_stride, 16);
710 
711  dst += dst_stride_2x;
712 
713  src76_r = src98_r;
714  src87_r = src109_r;
715  src76_l = src98_l;
716  src87_l = src109_l;
717  src8 = src10;
718  }
719 }
720 
721 static av_always_inline
722 void hevc_hv_4t_8x2_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
723  int32_t dst_stride, const int8_t *filter_x,
724  const int8_t *filter_y)
725 {
726  const int32_t src_stride_2x = (src_stride << 1);
727  const int32_t src_stride_4x = (src_stride << 2);
728  const int32_t src_stride_3x = src_stride_2x + src_stride;
729  __m128i out;
730  __m128i src0, src1, src2, src3, src4;
731  __m128i filt0, filt1;
732  __m128i filt_h0, filt_h1, filter_vec;
733  __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
734  __m128i mask1;
735  __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
736  __m128i dst0, dst1, dst2, dst3, dst4;
737  __m128i dst0_r, dst0_l, dst1_r, dst1_l;
738  __m128i dst10_r, dst32_r, dst21_r, dst43_r;
739  __m128i dst10_l, dst32_l, dst21_l, dst43_l;
740  __m128i out0_r, out1_r;
741 
742  src -= (src_stride + 1);
743  DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
744 
745  filter_vec = __lsx_vld(filter_y, 0);
746  filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
747  DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0, filt_h1);
748 
749  mask1 = __lsx_vaddi_bu(mask0, 2);
750  src0 = __lsx_vld(src, 0);
751  DUP4_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src,
752  src_stride_3x, src, src_stride_4x, src1, src2, src3, src4);
753 
754  DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src1, src1,
755  mask0, src1, src1, mask1, vec0, vec1, vec2, vec3);
756  DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, src3, src3,
757  mask0, src3, src3, mask1, vec4, vec5, vec6, vec7);
758  DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, vec8, vec9);
759 
760  DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0, vec6,
761  filt0, dst0, dst1, dst2, dst3);
762  dst4 = __lsx_vdp2_h_bu_b(vec8, filt0);
763  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1, dst2,
764  vec5, filt1, dst3, vec7, filt1, dst0, dst1, dst2, dst3);
765  dst4 = __lsx_vdp2add_h_bu_b(dst4, vec9, filt1);
766  DUP4_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst3, dst2, dst4, dst3,
767  dst10_r, dst21_r, dst32_r, dst43_r);
768  DUP4_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst3, dst2, dst4, dst3,
769  dst10_l, dst21_l, dst32_l, dst43_l);
770  DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
771  filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
772  DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l, dst32_l,
773  filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l, filt_h1,
774  dst0_r, dst0_l, dst1_r, dst1_l);
775  DUP2_ARG3(__lsx_vsrani_h_w, dst0_l, dst0_r, 6, dst1_l, dst1_r, 6,
776  out0_r, out1_r);
777  out = __lsx_vssrarni_bu_h(out1_r, out0_r, 6);
778  __lsx_vstelm_d(out, dst, 0, 0);
779  __lsx_vstelm_d(out, dst + dst_stride, 0, 1);
780 }
781 
782 static av_always_inline
783 void hevc_hv_4t_8x6_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
784  int32_t dst_stride, const int8_t *filter_x,
785  const int8_t *filter_y)
786 {
787  const int32_t src_stride_2x = (src_stride << 1);
788  const int32_t dst_stride_2x = (dst_stride << 1);
789  const int32_t src_stride_4x = (src_stride << 2);
790  const int32_t dst_stride_4x = (dst_stride << 2);
791  const int32_t src_stride_3x = src_stride_2x + src_stride;
792  const int32_t dst_stride_3x = dst_stride_2x + dst_stride;
793  __m128i out0, out1, out2;
794  __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
795  __m128i filt0, filt1;
796  __m128i filt_h0, filt_h1, filter_vec;
797  __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
798  __m128i mask1;
799  __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
800  __m128i vec10, vec11, vec12, vec13, vec14, vec15, vec16, vec17;
801  __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
802  __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
803  __m128i dst4_r, dst4_l, dst5_r, dst5_l;
804  __m128i dst10_r, dst32_r, dst10_l, dst32_l;
805  __m128i dst21_r, dst43_r, dst21_l, dst43_l;
806  __m128i dst54_r, dst54_l, dst65_r, dst65_l;
807  __m128i dst76_r, dst76_l, dst87_r, dst87_l;
808  __m128i out0_r, out1_r, out2_r, out3_r, out4_r, out5_r;
809 
810  src -= (src_stride + 1);
811  DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
812 
813  filter_vec = __lsx_vld(filter_y, 0);
814  filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
815  DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0, filt_h1);
816 
817  mask1 = __lsx_vaddi_bu(mask0, 2);
818 
819  src0 = __lsx_vld(src, 0);
820  DUP4_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x,src,
821  src_stride_3x, src, src_stride_4x, src1, src2, src3, src4);
822  src += src_stride_4x;
823  DUP4_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x,src,
824  src_stride_3x, src, src_stride_4x, src5, src6, src7, src8);
825 
826  DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src1, src1,
827  mask0, src1, src1, mask1, vec0, vec1, vec2, vec3);
828  DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, src3, src3,
829  mask0, src3, src3, mask1, vec4, vec5, vec6, vec7);
830  DUP4_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, src5, src5,
831  mask0, src5, src5, mask1, vec8, vec9, vec10, vec11);
832  DUP4_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, src7, src7,
833  mask0, src7, src7, mask1, vec12, vec13, vec14, vec15);
834  DUP2_ARG3(__lsx_vshuf_b, src8, src8, mask0, src8, src8, mask1, vec16, vec17);
835 
836  DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0, vec6,
837  filt0, dst0, dst1, dst2, dst3);
838  DUP4_ARG2(__lsx_vdp2_h_bu_b, vec8, filt0, vec10, filt0, vec12, filt0, vec14,
839  filt0, dst4, dst5, dst6, dst7);
840  dst8 = __lsx_vdp2_h_bu_b(vec16, filt0);
841  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1, dst2,
842  vec5, filt1, dst3, vec7, filt1, dst0, dst1, dst2, dst3);
843  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec9, filt1, dst5, vec11, filt1, dst6,
844  vec13, filt1, dst7, vec15, filt1, dst4, dst5, dst6, dst7);
845  dst8 = __lsx_vdp2add_h_bu_b(dst8, vec17, filt1);
846 
847  DUP4_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst3, dst2, dst4, dst3,
848  dst10_r, dst21_r, dst32_r, dst43_r);
849  DUP4_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst3, dst2, dst4, dst3,
850  dst10_l, dst21_l, dst32_l, dst43_l);
851  DUP4_ARG2(__lsx_vilvl_h, dst5, dst4, dst6, dst5, dst7, dst6, dst8, dst7,
852  dst54_r, dst65_r, dst76_r, dst87_r);
853  DUP4_ARG2(__lsx_vilvh_h, dst5, dst4, dst6, dst5, dst7, dst6, dst8, dst7,
854  dst54_l, dst65_l, dst76_l, dst87_l);
855 
856  DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
857  filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
858  DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0, dst43_r,
859  filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
860  DUP4_ARG2(__lsx_vdp2_w_h, dst54_r, filt_h0, dst54_l, filt_h0, dst65_r,
861  filt_h0, dst65_l, filt_h0, dst4_r, dst4_l, dst5_r, dst5_l);
862  DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l, dst32_l,
863  filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l, filt_h1,
864  dst0_r, dst0_l, dst1_r, dst1_l);
865  DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l, dst54_l,
866  filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l, filt_h1,
867  dst2_r, dst2_l, dst3_r, dst3_l);
868  DUP4_ARG3(__lsx_vdp2add_w_h, dst4_r, dst76_r, filt_h1, dst4_l, dst76_l,
869  filt_h1, dst5_r, dst87_r, filt_h1, dst5_l, dst87_l, filt_h1,
870  dst4_r, dst4_l, dst5_r, dst5_l);
871 
872  DUP4_ARG3(__lsx_vsrani_h_w, dst0_l, dst0_r, 6, dst1_l, dst1_r, 6, dst2_l,
873  dst2_r, 6, dst3_l, dst3_r, 6, out0_r, out1_r, out2_r, out3_r);
874  DUP2_ARG3(__lsx_vsrani_h_w, dst4_l, dst4_r, 6, dst5_l, dst5_r, 6,
875  out4_r, out5_r);
876  DUP2_ARG3(__lsx_vssrarni_bu_h, out1_r, out0_r, 6, out3_r, out2_r, 6,
877  out0, out1);
878  out2 = __lsx_vssrarni_bu_h(out5_r, out4_r, 6);
879 
880  __lsx_vstelm_d(out0, dst, 0, 0);
881  __lsx_vstelm_d(out0, dst + dst_stride, 0, 1);
882  __lsx_vstelm_d(out1, dst + dst_stride_2x, 0, 0);
883  __lsx_vstelm_d(out1, dst + dst_stride_3x, 0, 1);
884  dst += dst_stride_4x;
885  __lsx_vstelm_d(out2, dst, 0, 0);
886  __lsx_vstelm_d(out2, dst + dst_stride, 0, 1);
887 }
888 
889 static av_always_inline
890 void hevc_hv_4t_8mult_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
891  int32_t dst_stride, const int8_t *filter_x,
892  const int8_t *filter_y, int32_t height, int32_t width8mult)
893 {
894  uint32_t loop_cnt, cnt;
895  const uint8_t *src_tmp;
896  uint8_t *dst_tmp;
897  const int32_t src_stride_2x = (src_stride << 1);
898  const int32_t dst_stride_2x = (dst_stride << 1);
899  const int32_t src_stride_4x = (src_stride << 2);
900  const int32_t dst_stride_4x = (dst_stride << 2);
901  const int32_t src_stride_3x = src_stride_2x + src_stride;
902  const int32_t dst_stride_3x = dst_stride_2x + dst_stride;
903 
904  __m128i out0, out1;
905  __m128i src0, src1, src2, src3, src4, src5, src6;
906  __m128i filt0, filt1;
907  __m128i filt_h0, filt_h1, filter_vec;
908  __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
909  __m128i mask1;
910  __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
911  __m128i dst0, dst1, dst2, dst3, dst4, dst5;
912  __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
913  __m128i dst10_r, dst32_r, dst21_r, dst43_r;
914  __m128i dst10_l, dst32_l, dst21_l, dst43_l;
915  __m128i dst54_r, dst54_l, dst65_r, dst65_l, dst6;
916  __m128i out0_r, out1_r, out2_r, out3_r;
917 
918  src -= (src_stride + 1);
919  DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
920 
921  filter_vec = __lsx_vld(filter_y, 0);
922  filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
923  DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0, filt_h1);
924  mask1 = __lsx_vaddi_bu(mask0, 2);
925 
926  for (cnt = width8mult; cnt--;) {
927  src_tmp = src;
928  dst_tmp = dst;
929 
930  src0 = __lsx_vld(src_tmp, 0);
931  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
932  src1, src2);
933  src_tmp += src_stride_3x;
934 
935  DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1,
936  vec0, vec1);
937  DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1,
938  vec2, vec3);
939  DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1,
940  vec4, vec5);
941 
942  DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst0, dst1);
943  dst2 = __lsx_vdp2_h_bu_b(vec4, filt0);
944  DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
945  dst0, dst1);
946  dst2 = __lsx_vdp2add_h_bu_b(dst2, vec5, filt1);
947 
948  DUP2_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst10_r, dst21_r);
949  DUP2_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst10_l, dst21_l);
950 
951  for (loop_cnt = (height >> 2); loop_cnt--;) {
952  src3 = __lsx_vld(src_tmp, 0);
953  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
954  src4, src5);
955  src6 = __lsx_vldx(src_tmp, src_stride_3x);
956  src_tmp += src_stride_4x;
957 
958  DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src4,
959  src4, mask0, src4, src4, mask1, vec0, vec1, vec2, vec3);
960  DUP4_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, src6,
961  src6, mask0, src6, src6, mask1, vec4, vec5, vec6, vec7);
962 
963  DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
964  vec6, filt0, dst3, dst4, dst5, dst6);
965  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3,
966  filt1, dst5, vec5, filt1, dst6, vec7, filt1,
967  dst3, dst4, dst5, dst6);
968 
969  DUP4_ARG2(__lsx_vilvl_h, dst3, dst2, dst4, dst3, dst5, dst4,
970  dst6, dst5, dst32_r, dst43_r, dst54_r, dst65_r);
971  DUP4_ARG2(__lsx_vilvh_h, dst3, dst2, dst4, dst3, dst5, dst4,
972  dst6, dst5, dst32_l, dst43_l, dst54_l, dst65_l);
973 
974  DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
975  filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
976  DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0, dst43_r,
977  filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
978  DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l,
979  dst32_l, filt_h1, dst1_r, dst43_r, filt_h1, dst1_l,
980  dst43_l, filt_h1, dst0_r, dst0_l, dst1_r, dst1_l);
981  DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l,
982  dst54_l, filt_h1, dst3_r, dst65_r, filt_h1, dst3_l,
983  dst65_l, filt_h1, dst2_r, dst2_l, dst3_r, dst3_l);
984 
985  DUP4_ARG3(__lsx_vsrani_h_w, dst0_l, dst0_r, 6, dst1_l, dst1_r, 6,
986  dst2_l, dst2_r, 6, dst3_l, dst3_r, 6, out0_r, out1_r,
987  out2_r, out3_r);
988  DUP2_ARG3(__lsx_vssrarni_bu_h, out1_r, out0_r, 6, out3_r, out2_r,
989  6, out0, out1);
990  __lsx_vstelm_d(out0, dst_tmp, 0, 0);
991  __lsx_vstelm_d(out0, dst_tmp + dst_stride, 0, 1);
992  __lsx_vstelm_d(out1, dst_tmp + dst_stride_2x, 0, 0);
993  __lsx_vstelm_d(out1, dst_tmp + dst_stride_3x, 0, 1);
994  dst_tmp += dst_stride_4x;
995 
996  dst10_r = dst54_r;
997  dst10_l = dst54_l;
998  dst21_r = dst65_r;
999  dst21_l = dst65_l;
1000  dst2 = dst6;
1001  }
1002  src += 8;
1003  dst += 8;
1004  }
1005 }
1006 
1007 static
1008 void hevc_hv_4t_8w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
1009  int32_t dst_stride, const int8_t *filter_x,
1010  const int8_t *filter_y, int32_t height)
1011 {
1012  if (2 == height) {
1013  hevc_hv_4t_8x2_lsx(src, src_stride, dst, dst_stride, filter_x, filter_y);
1014  } else if (6 == height) {
1015  hevc_hv_4t_8x6_lsx(src, src_stride, dst, dst_stride, filter_x, filter_y);
1016  } else if (0 == (height & 0x03)) {
1017  hevc_hv_4t_8mult_lsx(src, src_stride, dst, dst_stride, filter_x, filter_y, height, 1);
1018  }
1019 }
1020 
1021 static av_always_inline
1022 void hevc_hv_4t_12w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
1023  int32_t dst_stride, const int8_t *filter_x,
1024  const int8_t *filter_y, int32_t height)
1025 {
1026  uint32_t loop_cnt;
1027  const uint8_t *src_tmp;
1028  uint8_t *dst_tmp;
1029  const int32_t src_stride_2x = (src_stride << 1);
1030  const int32_t dst_stride_2x = (dst_stride << 1);
1031  const int32_t src_stride_4x = (src_stride << 2);
1032  const int32_t dst_stride_4x = (dst_stride << 2);
1033  const int32_t src_stride_3x = src_stride_2x + src_stride;
1034  const int32_t dst_stride_3x = dst_stride_2x + dst_stride;
1035  __m128i out0, out1;
1036  __m128i src0, src1, src2, src3, src4, src5, src6;
1037  __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1038  __m128i mask0, mask1;
1039  __m128i filt0, filt1, filt_h0, filt_h1, filter_vec, tmp0, tmp1, tmp2, tmp3;
1040  __m128i dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6;
1041  __m128i dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
1042  __m128i dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
1043  __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
1044 
1045  src -= (src_stride + 1);
1046  DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
1047 
1048  filter_vec = __lsx_vld(filter_y, 0);
1049  filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
1050  DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0, filt_h1);
1051 
1052  mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
1053  mask1 = __lsx_vaddi_bu(mask0, 2);
1054 
1055  src_tmp = src;
1056  dst_tmp = dst;
1057 
1058  src0 = __lsx_vld(src_tmp, 0);
1059  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x, src1, src2);
1060  src_tmp += src_stride_3x;
1061 
1062  DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0, vec1);
1063  DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, vec2, vec3);
1064  DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, vec4, vec5);
1065 
1066  DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dsth0, dsth1);
1067  dsth2 = __lsx_vdp2_h_bu_b(vec4, filt0);
1068  DUP2_ARG3(__lsx_vdp2add_h_bu_b, dsth0, vec1, filt1, dsth1, vec3, filt1, dsth0, dsth1);
1069  dsth2 = __lsx_vdp2add_h_bu_b(dsth2, vec5, filt1);
1070 
1071  DUP2_ARG2(__lsx_vilvl_h, dsth1, dsth0, dsth2, dsth1, dst10_r, dst21_r);
1072  DUP2_ARG2(__lsx_vilvh_h, dsth1, dsth0, dsth2, dsth1, dst10_l, dst21_l);
1073 
1074  for (loop_cnt = height >> 2; loop_cnt--;) {
1075  src3 = __lsx_vld(src_tmp, 0);
1076  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x, src4, src5);
1077  src6 = __lsx_vldx(src_tmp, src_stride_3x);
1078  src_tmp += src_stride_4x;
1079 
1080  DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src4,
1081  src4, mask0, src4, src4, mask1, vec0, vec1, vec2, vec3);
1082  DUP4_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, src6,
1083  src6, mask0, src6, src6, mask1, vec4, vec5, vec6, vec7);
1084 
1085  DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
1086  vec6, filt0, dsth3, dsth4, dsth5, dsth6);
1087  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dsth3, vec1, filt1, dsth4,
1088  vec3, filt1, dsth5, vec5, filt1, dsth6, vec7, filt1,
1089  dsth3, dsth4, dsth5, dsth6);
1090 
1091  DUP4_ARG2(__lsx_vilvl_h, dsth3, dsth2, dsth4, dsth3, dsth5, dsth4,
1092  dsth6, dsth5, dst32_r, dst43_r, dst54_r, dst65_r);
1093  DUP4_ARG2(__lsx_vilvh_h, dsth3, dsth2, dsth4, dsth3, dsth5, dsth4,
1094  dsth6, dsth5, dst32_l, dst43_l, dst54_l, dst65_l);
1095 
1096  DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
1097  filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
1098  DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0, dst43_r,
1099  filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
1100  DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l, dst32_l,
1101  filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l, filt_h1,
1102  dst0_r, dst0_l, dst1_r, dst1_l);
1103  DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l, dst54_l,
1104  filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l, filt_h1,
1105  dst2_r, dst2_l, dst3_r, dst3_l);
1106 
1107  DUP4_ARG3(__lsx_vsrani_h_w, dst0_l, dst0_r, 6, dst1_l, dst1_r, 6, dst2_l,
1108  dst2_r, 6, dst3_l, dst3_r, 6, tmp0, tmp1, tmp2, tmp3);
1109  DUP2_ARG3(__lsx_vssrarni_bu_h, tmp1, tmp0, 6, tmp3, tmp2, 6, out0, out1);
1110 
1111  __lsx_vstelm_d(out0, dst_tmp, 0, 0);
1112  __lsx_vstelm_d(out0, dst_tmp + dst_stride, 0, 1);
1113  __lsx_vstelm_d(out1, dst_tmp + dst_stride_2x, 0, 0);
1114  __lsx_vstelm_d(out1, dst_tmp + dst_stride_3x, 0, 1);
1115  dst_tmp += dst_stride_4x;
1116 
1117  dst10_r = dst54_r;
1118  dst10_l = dst54_l;
1119  dst21_r = dst65_r;
1120  dst21_l = dst65_l;
1121  dsth2 = dsth6;
1122  }
1123 
1124  src_tmp = src + 8;
1125  dst_tmp = dst + 8;
1126 
1127  src0 = __lsx_vld(src_tmp, 0);
1128  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x, src1, src2);
1129  src_tmp += src_stride_3x;
1130 
1131  DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0, vec1);
1132  DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, vec2, vec3);
1133  DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, vec4, vec5);
1134 
1135  DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dsth0, dsth1);
1136  dsth2 = __lsx_vdp2_h_bu_b(vec4, filt0);
1137  DUP2_ARG3(__lsx_vdp2add_h_bu_b, dsth0, vec1, filt1, dsth1, vec3, filt1, dsth0, dsth1);
1138  dsth2 = __lsx_vdp2add_h_bu_b(dsth2, vec5, filt1);
1139 
1140  DUP2_ARG2(__lsx_vilvl_h, dsth1, dsth0, dsth2, dsth1, dst10_r, dst21_r);
1141  DUP2_ARG2(__lsx_vilvh_h, dsth1, dsth0, dsth2, dsth1, dst10_l, dst21_l);
1142 
1143  for (loop_cnt = height >> 2; loop_cnt--;) {
1144  src3 = __lsx_vld(src_tmp, 0);
1145  DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x, src4, src5);
1146  src6 = __lsx_vldx(src_tmp, src_stride_3x);
1147  src_tmp += src_stride_4x;
1148 
1149  DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src4,
1150  src4, mask0, src4, src4, mask1, vec0, vec1, vec2, vec3);
1151  DUP4_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, src6,
1152  src6, mask0, src6, src6, mask1, vec4, vec5, vec6, vec7);
1153 
1154  DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
1155  vec6, filt0, dsth3, dsth4, dsth5, dsth6);
1156  DUP4_ARG3(__lsx_vdp2add_h_bu_b, dsth3, vec1, filt1, dsth4,
1157  vec3, filt1, dsth5, vec5, filt1, dsth6, vec7, filt1,
1158  dsth3, dsth4, dsth5, dsth6);
1159 
1160  DUP4_ARG2(__lsx_vilvl_h, dsth3, dsth2, dsth4, dsth3, dsth5, dsth4,
1161  dsth6, dsth5, dst32_r, dst43_r, dst54_r, dst65_r);
1162  DUP4_ARG2(__lsx_vilvh_h, dsth3, dsth2, dsth4, dsth3, dsth5, dsth4,
1163  dsth6, dsth5, dst32_l, dst43_l, dst54_l, dst65_l);
1164 
1165  DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
1166  filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
1167  DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0, dst43_r,
1168  filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
1169  DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l, dst32_l,
1170  filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l, filt_h1,
1171  dst0_r, dst0_l, dst1_r, dst1_l);
1172  DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l, dst54_l,
1173  filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l, filt_h1,
1174  dst2_r, dst2_l, dst3_r, dst3_l);
1175 
1176  DUP4_ARG3(__lsx_vsrani_h_w, dst0_l, dst0_r, 6, dst1_l, dst1_r, 6, dst2_l,
1177  dst2_r, 6, dst3_l, dst3_r, 6, tmp0, tmp1, tmp2, tmp3);
1178  DUP2_ARG3(__lsx_vssrarni_bu_h, tmp1, tmp0, 6, tmp3, tmp2, 6, out0, out1);
1179 
1180  __lsx_vstelm_w(out0, dst_tmp, 0, 0);
1181  __lsx_vstelm_w(out0, dst_tmp + dst_stride, 0, 2);
1182  __lsx_vstelm_w(out1, dst_tmp + dst_stride_2x, 0, 0);
1183  __lsx_vstelm_w(out1, dst_tmp + dst_stride_3x, 0, 2);
1184  dst_tmp += dst_stride_4x;
1185 
1186  dst10_r = dst54_r;
1187  dst10_l = dst54_l;
1188  dst21_r = dst65_r;
1189  dst21_l = dst65_l;
1190  dsth2 = dsth6;
1191  }
1192 }
1193 
1194 static void hevc_hv_4t_16w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
1195  int32_t dst_stride, const int8_t *filter_x,
1196  const int8_t *filter_y, int32_t height)
1197 {
1198  hevc_hv_4t_8mult_lsx(src, src_stride, dst, dst_stride, filter_x, filter_y, height, 2);
1199 }
1200 
1201 static void hevc_hv_4t_24w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
1202  int32_t dst_stride, const int8_t *filter_x,
1203  const int8_t *filter_y, int32_t height)
1204 {
1205  hevc_hv_4t_8mult_lsx(src, src_stride, dst, dst_stride, filter_x, filter_y, height, 3);
1206 }
1207 
1208 static void hevc_hv_4t_32w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
1209  int32_t dst_stride, const int8_t *filter_x,
1210  const int8_t *filter_y, int32_t height)
1211 {
1212  hevc_hv_4t_8mult_lsx(src, src_stride, dst, dst_stride, filter_x, filter_y, height, 4);
1213 }
1214 
1215 #define UNI_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
1216 void ff_hevc_put_hevc_uni_##PEL##_##DIR##WIDTH##_8_lsx(uint8_t *dst, \
1217  ptrdiff_t dst_stride, \
1218  const uint8_t *src, \
1219  ptrdiff_t src_stride, \
1220  int height, \
1221  intptr_t mx, \
1222  intptr_t my, \
1223  int width) \
1224 { \
1225  const int8_t *filter = ff_hevc_##PEL##_filters[FILT_DIR]; \
1226  \
1227  common_##DIR1##_##TAP##t_##WIDTH##w_lsx(src, src_stride, dst, dst_stride, \
1228  filter, height); \
1229 }
1230 
1231 UNI_MC(qpel, h, 64, 8, hz, mx);
1232 
1233 UNI_MC(qpel, v, 24, 8, vt, my);
1234 UNI_MC(qpel, v, 32, 8, vt, my);
1235 UNI_MC(qpel, v, 48, 8, vt, my);
1236 UNI_MC(qpel, v, 64, 8, vt, my);
1237 
1238 UNI_MC(epel, v, 24, 2, vt, my);
1239 UNI_MC(epel, v, 32, 2, vt, my);
1240 
1241 #undef UNI_MC
1242 
1243 #define UNI_MC_HV(PEL, WIDTH, TAP) \
1244 void ff_hevc_put_hevc_uni_##PEL##_hv##WIDTH##_8_lsx(uint8_t *dst, \
1245  ptrdiff_t dst_stride, \
1246  const uint8_t *src, \
1247  ptrdiff_t src_stride, \
1248  int height, \
1249  intptr_t mx, \
1250  intptr_t my, \
1251  int width) \
1252 { \
1253  const int8_t *filter_x = ff_hevc_##PEL##_filters[mx]; \
1254  const int8_t *filter_y = ff_hevc_##PEL##_filters[my]; \
1255  \
1256  hevc_hv_##TAP##t_##WIDTH##w_lsx(src, src_stride, dst, dst_stride, \
1257  filter_x, filter_y, height); \
1258 }
1259 
1260 UNI_MC_HV(qpel, 8, 8);
1261 UNI_MC_HV(qpel, 16, 8);
1262 UNI_MC_HV(qpel, 24, 8);
1263 UNI_MC_HV(qpel, 32, 8);
1264 UNI_MC_HV(qpel, 48, 8);
1265 UNI_MC_HV(qpel, 64, 8);
1266 
1267 UNI_MC_HV(epel, 8, 4);
1268 UNI_MC_HV(epel, 12, 4);
1269 UNI_MC_HV(epel, 16, 4);
1270 UNI_MC_HV(epel, 24, 4);
1271 UNI_MC_HV(epel, 32, 4);
1272 
1273 #undef UNI_MC_HV
hevc_hv_8t_48w_lsx
static void hevc_hv_8t_48w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:531
hevc_hv_8t_16w_lsx
static void hevc_hv_8t_16w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:507
out
FILE * out
Definition: movenc.c:55
hevc_hv_8t_24w_lsx
static void hevc_hv_8t_24w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:515
hevc_hv_8t_32w_lsx
static void hevc_hv_8t_32w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:523
src1
const pixel * src1
Definition: h264pred_template.c:420
hevc_hv_4t_8x6_lsx
static av_always_inline void hevc_hv_4t_8x6_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y)
Definition: hevc_mc_uni_lsx.c:783
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
DUP2_ARG2
#define DUP2_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _OUT0, _OUT1)
Definition: loongson_intrinsics.h:58
_src
uint8_t ptrdiff_t const uint8_t * _src
Definition: dsp.h:56
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
UNI_MC
#define UNI_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR)
Definition: hevc_mc_uni_lsx.c:1215
aligned
static int aligned(int val)
Definition: dashdec.c:171
DUP4_ARG2
#define DUP4_ARG2(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _IN6, _IN7, _OUT0, _OUT1, _OUT2, _OUT3)
Definition: loongson_intrinsics.h:76
common_vt_2t_32w_lsx
static av_always_inline void common_vt_2t_32w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
Definition: hevc_mc_uni_lsx.c:628
common_vt_8t_16w_lsx
static av_always_inline void common_vt_8t_16w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height, int32_t width)
Definition: hevc_mc_uni_lsx.c:200
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
common_vt_2t_24w_lsx
static av_always_inline void common_vt_2t_24w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
Definition: hevc_mc_uni_lsx.c:548
hevc_hv_4t_8x2_lsx
static av_always_inline void hevc_hv_4t_8x2_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y)
Definition: hevc_mc_uni_lsx.c:722
hevc_hv_8t_8x2_lsx
static av_always_inline void hevc_hv_8t_8x2_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height, int32_t width)
Definition: hevc_mc_uni_lsx.c:338
DUP4_ARG1
#define DUP4_ARG1(_INS, _IN0, _IN1, _IN2, _IN3, _OUT0, _OUT1, _OUT2, _OUT3)
Definition: loongson_intrinsics.h:70
hevc_hv_8t_64w_lsx
static void hevc_hv_8t_64w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:539
height
#define height
Definition: dsp.h:89
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
common_vt_8t_32w_lsx
static void common_vt_8t_32w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
Definition: hevc_mc_uni_lsx.c:316
UNI_MC_HV
#define UNI_MC_HV(PEL, WIDTH, TAP)
Definition: hevc_mc_uni_lsx.c:1243
DUP2_ARG3
#define DUP2_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _OUT0, _OUT1)
Definition: loongson_intrinsics.h:64
hevc_hv_4t_24w_lsx
static void hevc_hv_4t_24w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:1201
ff_hevc_mask_arr
static const uint8_t ff_hevc_mask_arr[16 *3]
Definition: hevc_mc_uni_lsx.c:26
common_vt_8t_64w_lsx
static void common_vt_8t_64w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
Definition: hevc_mc_uni_lsx.c:330
hevc_hv_4t_32w_lsx
static void hevc_hv_4t_32w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:1208
src2
const pixel * src2
Definition: h264pred_template.c:421
av_always_inline
#define av_always_inline
Definition: attributes.h:49
hevc_hv_4t_8w_lsx
static void hevc_hv_4t_8w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:1008
hevc_hv_4t_12w_lsx
static av_always_inline void hevc_hv_4t_12w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:1022
common_vt_8t_8w_lsx
static av_always_inline void common_vt_8t_8w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
Definition: hevc_mc_uni_lsx.c:128
common_hz_8t_64w_lsx
static av_always_inline void common_hz_8t_64w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
Definition: hevc_mc_uni_lsx.c:36
src0
const pixel *const src0
Definition: h264pred_template.c:419
loongson_intrinsics.h
int32_t
int32_t
Definition: audioconvert.c:56
common_vt_8t_24w_lsx
static void common_vt_8t_24w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
Definition: hevc_mc_uni_lsx.c:307
hevc_hv_4t_8mult_lsx
static av_always_inline void hevc_hv_4t_8mult_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height, int32_t width8mult)
Definition: hevc_mc_uni_lsx.c:890
h
h
Definition: vp9dsp_template.c:2070
hevc_hv_8t_8w_lsx
static void hevc_hv_8t_8w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:499
width
#define width
Definition: dsp.h:89
hevcdsp_lsx.h
common_vt_8t_48w_lsx
static void common_vt_8t_48w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
Definition: hevc_mc_uni_lsx.c:323
hevc_hv_4t_16w_lsx
static void hevc_hv_4t_16w_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
Definition: hevc_mc_uni_lsx.c:1194
src
#define src
Definition: vp8dsp.c:248
DUP4_ARG3
#define DUP4_ARG3(_INS, _IN0, _IN1, _IN2, _IN3, _IN4, _IN5, _IN6, _IN7, _IN8, _IN9, _IN10, _IN11, _OUT0, _OUT1, _OUT2, _OUT3)
Definition: loongson_intrinsics.h:83