FFmpeg
vvc_intra_template.c
Go to the documentation of this file.
1 /*
2  * VVC intra prediction DSP
3  *
4  * Copyright (C) 2021-2023 Nuomi
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
24 
25 #include "vvc_intra.h"
26 
27 #define POS(x, y) src[(x) + stride * (y)]
28 
29 static av_always_inline void FUNC(cclm_linear_pred)(VVCFrameContext *fc, const int x0, const int y0,
30  const int w, const int h, const pixel* pdsy, const int *a, const int *b, const int *k)
31 {
32  const VVCSPS *sps = fc->ps.sps;
33  for (int i = 0; i < VVC_MAX_SAMPLE_ARRAYS - 1; i++) {
34  const int c_idx = i + 1;
35  const int x = x0 >> sps->hshift[c_idx];
36  const int y = y0 >> sps->vshift[c_idx];
37  const ptrdiff_t stride = fc->frame->linesize[c_idx] / sizeof(pixel);
38  pixel *src = (pixel*)fc->frame->data[c_idx] + x + y * stride;
39  for (int y = 0; y < h; y++) {
40  for (int x = 0; x < w; x++) {
41  const int dsy = pdsy[y * w + x];
42  const int pred = ((dsy * a[i]) >> k[i]) + b[i];
43  POS(x, y) = CLIP(pred);
44  }
45  }
46  }
47 }
48 
49 #define MAX_PICK_POS 4
50 #define TOP 0
51 #define LEFT 1
52 
53 static av_always_inline void FUNC(cclm_get_params_default)(int *a, int *b, int *k)
54 {
55  for (int i = 0; i < 2; i++) {
56  a[i] = k[i] = 0;
57  b[i] = 1 << (BIT_DEPTH - 1);
58  }
59 }
60 
62  const int x, const int y, const int w, const int h, const int avail_t, const int avail_l,
63  int cnt[2], int pos[2][MAX_PICK_POS])
64 {
65  const enum IntraPredMode mode = lc->cu->intra_pred_mode_c;
66  const int num_is4 = !avail_t || !avail_l || mode != INTRA_LT_CCLM;
67  int num_samp[2];
68 
69  if (mode == INTRA_LT_CCLM) {
70  num_samp[TOP] = avail_t ? w : 0;
71  num_samp[LEFT] = avail_l ? h : 0;
72  } else {
73  num_samp[TOP] = (avail_t && mode == INTRA_T_CCLM) ? ff_vvc_get_top_available(lc, x, y, w + FFMIN(w, h), 1) : 0;
74  num_samp[LEFT] = (avail_l && mode == INTRA_L_CCLM) ? ff_vvc_get_left_available(lc, x, y, h + FFMIN(w, h), 1) : 0;
75  }
76  if (!num_samp[TOP] && !num_samp[LEFT]) {
77  return 0;
78  }
79  for (int i = TOP; i <= LEFT; i++) {
80  const int start = num_samp[i] >> (2 + num_is4);
81  const int step = FFMAX(1, num_samp[i] >> (1 + num_is4)) ;
82  cnt[i] = FFMIN(num_samp[i], (1 + num_is4) << 1);
83  for (int c = 0; c < cnt[i]; c++)
84  pos[i][c] = start + c * step;
85  }
86  return 1;
87 }
88 
89 static av_always_inline void FUNC(cclm_select_luma_444)(const pixel *src, const int step,
90  const int cnt, const int pos[MAX_PICK_POS], pixel *sel_luma)
91 {
92  for (int i = 0; i < cnt; i++)
93  sel_luma[i] = src[pos[i] * step];
94 }
95 
97  const int x0, const int y0, const int avail_t, const int avail_l, const int cnt[2], const int pos[2][MAX_PICK_POS],
98  pixel *sel_luma)
99 {
100  const VVCSPS *sps = fc->ps.sps;
101 
102  const int b_ctu_boundary = !av_mod_uintp2(y0, sps->ctb_log2_size_y);
103  const int hs = sps->hshift[1];
104  const int vs = sps->vshift[1];
105  const ptrdiff_t stride = fc->frame->linesize[0] / sizeof(pixel);
106 
107  if (!hs && !vs) {
108  const pixel* src = (pixel*)fc->frame->data[0] + x0 + y0 * stride;
109  FUNC(cclm_select_luma_444)(src - avail_t * stride, 1, cnt[TOP], pos[TOP], sel_luma);
110  FUNC(cclm_select_luma_444)(src - avail_l, stride, cnt[LEFT], pos[LEFT], sel_luma + cnt[TOP]);
111  } else {
112  // top
113  if (vs && !b_ctu_boundary) {
114  const pixel *source = (pixel *)fc->frame->data[0] + x0 + (y0 - 2) * stride;
115  for (int i = 0; i < cnt[TOP]; i++) {
116  const int x = pos[TOP][i] << hs;
117  const pixel *src = source + x;
118  const int has_left = x || avail_l;
119  const pixel l = has_left ? POS(-1, 0) : POS(0, 0);
120  if (sps->r->sps_chroma_vertical_collocated_flag) {
121  sel_luma[i] = (POS(0, -1) + l + 4 * POS(0, 0) + POS(1, 0) + POS(0, 1) + 4) >> 3;
122  } else {
123  const pixel l1 = has_left ? POS(-1, 1) : POS(0, 1);
124  sel_luma[i] = (l + l1 + 2 * (POS(0, 0) + POS(0, 1)) + POS(1, 0) + POS(1, 1) + 4) >> 3;
125  }
126  }
127  } else {
128  const pixel *source = (pixel*)fc->frame->data[0] + x0 + (y0 - 1) * stride;
129  for (int i = 0; i < cnt[TOP]; i++) {
130  const int x = pos[TOP][i] << hs;
131  const pixel *src = source + x;
132  const int has_left = x || avail_l;
133  const pixel l = has_left ? POS(-1, 0) : POS(0, 0);
134  sel_luma[i] = (l + 2 * POS(0, 0) + POS(1, 0) + 2) >> 2;
135  }
136  }
137 
138  // left
139  {
140  const pixel *left;
141  const pixel *source = (pixel *)fc->frame->data[0] + x0 + y0 * stride - (1 + hs) * avail_l;
142  left = source - avail_l;
143 
144  for (int i = 0; i < cnt[LEFT]; i++) {
145  const int y = pos[LEFT][i] << vs;
146  const int offset = y * stride;
147  const pixel *l = left + offset;
148  const pixel *src = source + offset;
149  pixel pred;
150  if (!vs) {
151  pred = (*l + 2 * POS(0, 0) + POS(1, 0) + 2) >> 2;
152  } else {
153  if (sps->r->sps_chroma_vertical_collocated_flag) {
154  const int has_top = y || avail_t;
155  const pixel t = has_top ? POS(0, -1) : POS(0, 0);
156  pred = (*l + t + 4 * POS(0, 0) + POS(1, 0) + POS(0, 1) + 4) >> 3;
157  } else {
158  pred = (*l + *(l + stride) + 2 * POS(0, 0) + 2 * POS(0, 1) + POS(1, 0) + POS(1, 1) + 4) >> 3;
159  }
160  }
161  sel_luma[i + cnt[TOP]] = pred;
162  }
163  }
164  }
165 }
166 
168  const int x, const int y, const int cnt[2], const int pos[2][MAX_PICK_POS],
169  pixel sel[][MAX_PICK_POS * 2])
170 {
171  for (int c_idx = 1; c_idx < VVC_MAX_SAMPLE_ARRAYS; c_idx++) {
172  const ptrdiff_t stride = fc->frame->linesize[c_idx] / sizeof(pixel);
173 
174  //top
175  const pixel *src = (pixel*)fc->frame->data[c_idx] + x + (y - 1)* stride;
176  for (int i = 0; i < cnt[TOP]; i++) {
177  sel[c_idx][i] = src[pos[TOP][i]];
178  }
179 
180  //left
181  src = (pixel*)fc->frame->data[c_idx] + x - 1 + y * stride;
182  for (int i = 0; i < cnt[LEFT]; i++) {
183  sel[c_idx][i + cnt[TOP]] = src[pos[LEFT][i] * stride];
184  }
185  }
186 }
187 
189  const int x0, const int y0, const int w, const int h, const int avail_t, const int avail_l,
190  pixel sel[][MAX_PICK_POS * 2])
191 {
192  const VVCFrameContext *fc = lc->fc;
193  const VVCSPS *sps = fc->ps.sps;
194  const int x = x0 >> sps->hshift[1];
195  const int y = y0 >> sps->vshift[1];
196  int cnt[2], pos[2][MAX_PICK_POS];
197 
198  if (!FUNC(cclm_get_select_pos)(lc, x, y, w, h, avail_t, avail_l, cnt, pos))
199  return 0;
200 
201  FUNC(cclm_select_luma)(fc, x0, y0, avail_t, avail_l, cnt, pos, sel[LUMA]);
202  FUNC(cclm_select_chroma)(fc, x, y, cnt, pos, sel);
203 
204  if (cnt[TOP] + cnt[LEFT] == 2) {
205  for (int c_idx = 0; c_idx < VVC_MAX_SAMPLE_ARRAYS; c_idx++) {
206  sel[c_idx][3] = sel[c_idx][0];
207  sel[c_idx][2] = sel[c_idx][1];
208  sel[c_idx][0] = sel[c_idx][1];
209  sel[c_idx][1] = sel[c_idx][3];
210  }
211  }
212  return 1;
213 }
214 
216  const pixel sel[][MAX_PICK_POS * 2], int *min, int *max)
217 {
218  int min_grp_idx[] = { 0, 2 };
219  int max_grp_idx[] = { 1, 3 };
220 
221  if (sel[LUMA][min_grp_idx[0]] > sel[LUMA][min_grp_idx[1]])
222  FFSWAP(int, min_grp_idx[0], min_grp_idx[1]);
223  if (sel[LUMA][max_grp_idx[0]] > sel[LUMA][max_grp_idx[1]])
224  FFSWAP(int, max_grp_idx[0], max_grp_idx[1]);
225  if (sel[LUMA][min_grp_idx[0]] > sel[LUMA][max_grp_idx[1]]) {
226  FFSWAP(int, min_grp_idx[0], max_grp_idx[0]);
227  FFSWAP(int, min_grp_idx[1], max_grp_idx[1]);
228  }
229  if (sel[LUMA][min_grp_idx[1]] > sel[LUMA][max_grp_idx[0]])
230  FFSWAP(int, min_grp_idx[1], max_grp_idx[0]);
231  for (int c_idx = 0; c_idx < VVC_MAX_SAMPLE_ARRAYS; c_idx++) {
232  max[c_idx] = (sel[c_idx][max_grp_idx[0]] + sel[c_idx][max_grp_idx[1]] + 1) >> 1;
233  min[c_idx] = (sel[c_idx][min_grp_idx[0]] + sel[c_idx][min_grp_idx[1]] + 1) >> 1;
234  }
235 }
236 
238  const int x0, const int y0, const int w, const int h, const int avail_t, const int avail_l,
239  int *a, int *b, int *k)
240 {
243  int diff;
244 
245  if (!FUNC(cclm_select_samples)(lc, x0, y0, w, h, avail_t, avail_l, sel)) {
247  return;
248  }
249 
250  FUNC(cclm_get_min_max)(sel, min, max);
251 
252  diff = max[LUMA] - min[LUMA];
253  if (diff == 0) {
254  for (int i = 0; i < 2; i++) {
255  a[i] = k[i] = 0;
256  b[i] = min[i + 1];
257  }
258  return;
259  }
260  for (int i = 0; i < 2; i++) {
261  const static int div_sig_table[] = {0, 7, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 1, 1, 0};
262  const int diffc = max[i + 1] - min[i + 1];
263  int x = av_log2(diff);
264  int y, v, sign, add;
265  const int norm_diff = ((diff << 4) >> x) & 15;
266  x += (norm_diff) ? 1 : 0;
267  y = abs(diffc) > 0 ? av_log2(abs(diffc)) + 1 : 0;
268  v = div_sig_table[norm_diff] | 8;
269  add = (1 << y >> 1);
270  a[i] = (diffc * v + add) >> y;
271  k[i] = FFMAX(1, 3 + x -y);
272  sign = a[i] < 0 ? -1 : (a[i] > 0);
273  a[i] = ((3 + x - y) < 1) ? sign * 15 : a[i];
274  b[i] = min[i + 1] - ((a[i] * min[0]) >> k[i]);
275  }
276 
277 }
278 
279 #undef TOP
280 #undef LEFT
281 
283  const int x0, const int y0, const int w, const int h, const int avail_t, const int avail_l,
284  pixel *pdsy)
285 {
286  const int hs = fc->ps.sps->hshift[1];
287  const int vs = fc->ps.sps->vshift[1];
288  const ptrdiff_t stride = fc->frame->linesize[0] / sizeof(pixel);
289  const pixel *source = (pixel*)fc->frame->data[0] + x0 + y0 * stride;
290  const pixel *left = source - avail_l;
291  const pixel *top = source - avail_t * stride;
292 
293  const VVCSPS *sps = fc->ps.sps;
294  if (!hs && !vs) {
295  for (int i = 0; i < h; i++)
296  memcpy(pdsy + i * w, source + i * stride, w * sizeof(pixel));
297  return;
298  }
299  for (int i = 0; i < h; i++) {
300  const pixel *src = source;
301  const pixel *l = left;
302  const pixel *t = top;
303  if (!vs) {
304  for (int j = 0; j < w; j++) {
305  pixel pred = (*l + 2 * POS(0, 0) + POS(1, 0) + 2) >> 2;
306  pdsy[i * w + j] = pred;
307  src += 2;
308  l = src - 1;
309  }
310 
311  } else {
312  if (sps->r->sps_chroma_vertical_collocated_flag) {
313  for (int j = 0; j < w; j++) {
314  pixel pred = (*l + *t + 4 * POS(0, 0) + POS(1, 0) + POS(0, 1) + 4) >> 3;
315  pdsy[i * w + j] = pred;
316  src += 2;
317  t += 2;
318  l = src - 1;
319  }
320  } else {
321  for (int j = 0; j < w; j++) {
322  pixel pred = (*l + *(l + stride) + 2 * POS(0, 0) + 2 * POS(0, 1) + POS(1, 0) + POS(1, 1) + 4) >> 3;
323 
324  pdsy[i * w + j] = pred;
325  src += 2;
326  l = src - 1;
327  }
328  }
329  }
330  source += (stride << vs);
331  left += (stride << vs);
332  top = source - stride;
333  }
334 }
335 
337  const int x, const int y, const int w, const int h, const int avail_t, const int avail_l)
338 {
339  for (int c_idx = 1; c_idx < VVC_MAX_SAMPLE_ARRAYS; c_idx++) {
340  const ptrdiff_t stride = fc->frame->linesize[c_idx] / sizeof(pixel);
341  pixel *dst = (pixel*)fc->frame->data[c_idx] + x + y * stride;
342  for (int i = 0; i < h; i++) {
343  for (int j = 0; j < w; j++) {
344  dst[j] = 1 << (BIT_DEPTH - 1);
345  }
346  dst += stride;
347  }
348  }
349 }
350 
351 //8.4.5.2.14 Specification of INTRA_LT_CCLM, INTRA_L_CCLM and INTRA_T_CCLM intra prediction mode
352 static void FUNC(intra_cclm_pred)(const VVCLocalContext *lc, const int x0, const int y0,
353  const int width, const int height)
354 {
355  VVCFrameContext *fc = lc->fc;
356  const VVCSPS *sps = fc->ps.sps;
357  const int avail_t = ff_vvc_get_top_available(lc, x0, y0, 1, 0);
358  const int avail_l = ff_vvc_get_left_available(lc, x0, y0, 1, 0);
359  const int hs = sps->hshift[1];
360  const int vs = sps->vshift[1];
361  const int x = x0 >> hs;
362  const int y = y0 >> vs;
363  const int w = width >> hs;
364  const int h = height >> vs;
365  int a[2], b[2], k[2];
366 
368  if (!avail_t && !avail_l) {
369  FUNC(cclm_pred_default)(fc, x, y, w, h, avail_t, avail_l);
370  return;
371  }
372  FUNC(cclm_get_luma_rec_pixels)(fc, x0, y0, w, h, avail_t, avail_l, dsy);
373  FUNC(cclm_get_params) (lc, x0, y0, w, h, avail_t, avail_l, a, b, k);
374  FUNC(cclm_linear_pred)(fc, x0, y0, w, h, dsy, a, b, k);
375 }
376 
377 static int FUNC(lmcs_sum_samples)(const pixel *start, ptrdiff_t stride, const int avail, const int target_size)
378 {
379  const int size = FFMIN(avail, target_size);
380  int sum = 0;
381  for (int i = 0; i < size; i++) {
382  sum += *start;
383  start += stride;
384  }
385  sum += *(start - stride) * (target_size - size);
386  return sum;
387 }
388 
389 // 8.7.5.3 Picture reconstruction with luma dependent chroma residual scaling process for chroma samples
390 static int FUNC(lmcs_derive_chroma_scale)(VVCLocalContext *lc, const int x0, const int y0)
391 {
392  VVCFrameContext *fc = lc->fc;
393  const VVCLMCS *lmcs = &fc->ps.lmcs;
394  const int size_y = FFMIN(fc->ps.sps->ctb_size_y, 64);
395 
396  const int x = x0 & ~(size_y - 1);
397  const int y = y0 & ~(size_y - 1);
398  if (lc->lmcs.x_vpdu != x || lc->lmcs.y_vpdu != y) {
399  int cnt = 0, luma = 0, i;
400  const pixel *src = (const pixel *)(fc->frame->data[LUMA] + y * fc->frame->linesize[LUMA] + (x << fc->ps.sps->pixel_shift));
401  const ptrdiff_t stride = fc->frame->linesize[LUMA] / sizeof(pixel);
402  const int avail_t = ff_vvc_get_top_available (lc, x, y, 1, 0);
403  const int avail_l = ff_vvc_get_left_available(lc, x, y, 1, 0);
404  if (avail_l) {
405  luma += FUNC(lmcs_sum_samples)(src - 1, stride, fc->ps.pps->height - y, size_y);
406  cnt = size_y;
407  }
408  if (avail_t) {
409  luma += FUNC(lmcs_sum_samples)(src - stride, 1, fc->ps.pps->width - x, size_y);
410  cnt += size_y;
411  }
412  if (cnt)
413  luma = (luma + (cnt >> 1)) >> av_log2(cnt);
414  else
415  luma = 1 << (BIT_DEPTH - 1);
416 
417  for (i = lmcs->min_bin_idx; i <= lmcs->max_bin_idx; i++) {
418  if (luma < lmcs->pivot[i + 1])
419  break;
420  }
421  i = FFMIN(i, LMCS_MAX_BIN_SIZE - 1);
422 
423  lc->lmcs.chroma_scale = lmcs->chroma_scale_coeff[i];
424  lc->lmcs.x_vpdu = x;
425  lc->lmcs.y_vpdu = y;
426  }
427  return lc->lmcs.chroma_scale;
428 }
429 
430 // 8.7.5.3 Picture reconstruction with luma dependent chroma residual scaling process for chroma samples
431 static void FUNC(lmcs_scale_chroma)(VVCLocalContext *lc, int *dst, const int *coeff,
432  const int width, const int height, const int x0_cu, const int y0_cu)
433 {
434  const int chroma_scale = FUNC(lmcs_derive_chroma_scale)(lc, x0_cu, y0_cu);
435 
436  for (int y = 0; y < height; y++) {
437  for (int x = 0; x < width; x++) {
438  const int c = av_clip_intp2(*coeff, BIT_DEPTH);
439 
440  if (c > 0)
441  *dst = (c * chroma_scale + (1 << 10)) >> 11;
442  else
443  *dst = -((-c * chroma_scale + (1 << 10)) >> 11);
444  coeff++;
445  dst++;
446  }
447  }
448 }
449 
450 static av_always_inline void FUNC(ref_filter)(const pixel *left, const pixel *top,
451  pixel *filtered_left, pixel *filtered_top, const int left_size, const int top_size,
452  const int unfilter_last_one)
453 {
454  filtered_left[-1] = filtered_top[-1] = (left[0] + 2 * left[-1] + top[0] + 2 ) >> 2;
455  for (int i = 0; i < left_size - unfilter_last_one; i++) {
456  filtered_left[i] = (left[i- 1] + 2 * left[i] + left[i + 1] + 2) >> 2;
457  }
458  for (int i = 0; i < top_size - unfilter_last_one; i++) {
459  filtered_top[i] = (top[i-1] + 2 * top[i] + top[i + 1] + 2) >> 2;
460  }
461  if (unfilter_last_one) {
462  filtered_top[top_size - 1] = top[top_size - 1];
463  filtered_left[left_size - 1] = left[left_size - 1];
464  }
465 }
466 
468  IntraEdgeParams* edge, const pixel *src, const ptrdiff_t stride,
469  const int x, int y, int w, int h, int c_idx, const int is_intra_mip,
470  const int mode, const int ref_idx, const int need_pdpc)
471 {
472 #define EXTEND(ptr, val, len) \
473 do { \
474  for (i = 0; i < (len); i++) \
475  *(ptr + i) = val; \
476 } while (0)
477  const CodingUnit *cu = lc->cu;
478  const int ref_filter_flag = is_intra_mip ? 0 : ff_vvc_ref_filter_flag_derive(mode);
479  const int filter_flag = !ref_idx && w * h > 32 && !c_idx &&
480  cu->isp_split_type == ISP_NO_SPLIT && ref_filter_flag;
481  int cand_up_left = lc->na.cand_up_left;
482  pixel *left = (pixel*)edge->left_array + MAX_TB_SIZE + 3;
483  pixel *top = (pixel*)edge->top_array + MAX_TB_SIZE + 3;
484  pixel *filtered_left = (pixel*)edge->filtered_left_array + MAX_TB_SIZE + 3;
485  pixel *filtered_top = (pixel*)edge->filtered_top_array + MAX_TB_SIZE + 3;
486  const int ref_line = ref_idx == 3 ? -4 : (-1 - ref_idx);
487  int left_size, top_size, unfilter_left_size, unfilter_top_size;
488  int left_available, top_available;
489  int refw, refh;
490  int intra_pred_angle, inv_angle;
491  int i;
492 
493  if (is_intra_mip || mode == INTRA_PLANAR) {
494  left_size = h + 1;
495  top_size = w + 1;
496  unfilter_left_size = left_size + filter_flag;
497  unfilter_top_size = top_size + filter_flag;
498  } else if (mode == INTRA_DC) {
499  unfilter_left_size = left_size = h;
500  unfilter_top_size = top_size = w;
501  } else if (mode == INTRA_VERT) {
502  //we may need 1 pixel to predict the top left.
503  unfilter_left_size = left_size = need_pdpc ? h : 1;
504  unfilter_top_size = top_size = w;
505  } else if (mode == INTRA_HORZ) {
506  unfilter_left_size = left_size = h;
507  //even need_pdpc == 0, we may need 1 pixel to predict the top left.
508  unfilter_top_size = top_size = need_pdpc ? w : 1;
509  } else {
510  if (cu->isp_split_type == ISP_NO_SPLIT || c_idx) {
511  refw = w * 2;
512  refh = h * 2;
513  } else {
514  refw = cu->cb_width + w;
515  refh = cu->cb_height + h;
516  }
517  intra_pred_angle = ff_vvc_intra_pred_angle_derive(mode);
518  inv_angle = ff_vvc_intra_inv_angle_derive(intra_pred_angle);
519  unfilter_top_size = top_size = refw;
520  unfilter_left_size = left_size = refh;
521  }
522 
523  left_available = ff_vvc_get_left_available(lc, x, y, unfilter_left_size, c_idx);
524  for (i = 0; i < left_available; i++)
525  left[i] = POS(ref_line, i);
526 
527  top_available = ff_vvc_get_top_available(lc, x, y, unfilter_top_size, c_idx);
528  memcpy(top, src + ref_line * stride, top_available * sizeof(pixel));
529 
530  for (int i = -1; i >= ref_line; i--) {
531  if (cand_up_left) {
532  left[i] = POS(ref_line, i);
533  top[i] = POS(i, ref_line);
534  } else if (left_available) {
535  left[i] = top[i] = left[0];
536  } else if (top_available) {
537  left[i] = top[i] = top[0];
538  } else {
539  left[i] = top[i] = 1 << (BIT_DEPTH - 1);
540  }
541  }
542 
543  EXTEND(top + top_available, top[top_available-1], unfilter_top_size - top_available);
544  EXTEND(left + left_available, left[left_available-1], unfilter_left_size - left_available);
545 
546  if (ref_filter_flag) {
547  if (!ref_idx && w * h > 32 && !c_idx && cu->isp_split_type == ISP_NO_SPLIT ) {
548  const int unfilter_last_one = left_size == unfilter_left_size;
549  FUNC(ref_filter)(left, top, filtered_left, filtered_top, unfilter_left_size, unfilter_top_size, unfilter_last_one);
550  left = filtered_left;
551  top = filtered_top;
552  }
553  }
554  if (!is_intra_mip && mode != INTRA_PLANAR && mode != INTRA_DC) {
555  if (ref_filter_flag || ref_idx || cu->isp_split_type != ISP_NO_SPLIT) {
556  edge->filter_flag = 0;
557  } else {
558  const int min_dist_ver_hor = FFMIN(abs(mode - 50), abs(mode - 18));
559  const int intra_hor_ver_dist_thres[] = {24, 14, 2, 0, 0};
560  const int ntbs = (av_log2(w) + av_log2(h)) >> 1;
561  edge->filter_flag = min_dist_ver_hor > intra_hor_ver_dist_thres[ntbs - 2];
562  }
563 
564  if (mode != INTRA_VERT && mode != INTRA_HORZ) {
565  if (mode >= INTRA_DIAG) {
566  if (intra_pred_angle < 0) {
567  pixel *p = top - (ref_idx + 1);
568  for (int x = -h; x < 0; x++) {
569  const int idx = -1 - ref_idx + FFMIN((x*inv_angle + 256) >> 9, h);
570  p[x] = left[idx];
571  }
572  } else {
573  for (int i = refw; i <= refw + FFMAX(1, w/h) * ref_idx + 1; i++)
574  top[i] = top[refw - 1];
575  }
576  } else {
577  if (intra_pred_angle < 0) {
578  pixel *p = left - (ref_idx + 1);
579  for (int x = -w; x < 0; x++) {
580  const int idx = -1 - ref_idx + FFMIN((x*inv_angle + 256) >> 9, w);
581  p[x] = top[idx];
582  }
583  } else {
584  for (int i = refh; i <= refh + FFMAX(1, h/w) * ref_idx + 1; i++)
585  left[i] = left[refh - 1];
586  }
587  }
588  }
589  }
590  edge->left = (uint8_t*)left;
591  edge->top = (uint8_t*)top;
592 }
593 
594 //8.4.1 General decoding process for coding units coded in intra prediction mode
595 static void FUNC(intra_pred)(const VVCLocalContext *lc, int x0, int y0,
596  const int width, const int height, int c_idx)
597 {
598  VVCFrameContext *fc = lc->fc;
599  const VVCSPS *sps = fc->ps.sps;
600  const VVCPPS *pps = fc->ps.pps;
601  const CodingUnit *cu = lc->cu;
602  const int log2_min_cb_size = sps->min_cb_log2_size_y;
603  const int min_cb_width = pps->min_cb_width;
604  const int x_cb = x0 >> log2_min_cb_size;
605  const int y_cb = y0 >> log2_min_cb_size;
606 
607  const int hshift = fc->ps.sps->hshift[c_idx];
608  const int vshift = fc->ps.sps->vshift[c_idx];
609  const int x = x0 >> hshift;
610  const int y = y0 >> vshift;
611  const int w = width >> hshift;
612  const int h = height >> vshift;
613  const ptrdiff_t stride = fc->frame->linesize[c_idx] / sizeof(pixel);
614 
615  const int pred_mode = c_idx ? cu->intra_pred_mode_c : cu->intra_pred_mode_y;
616  const int mode = ff_vvc_wide_angle_mode_mapping(cu, w, h, c_idx, pred_mode);
617 
618  const int intra_mip_flag = SAMPLE_CTB(fc->tab.imf, x_cb, y_cb);
619  const int is_intra_mip = intra_mip_flag && (!c_idx || cu->mip_chroma_direct_flag);
620  const int ref_idx = c_idx ? 0 : cu->intra_luma_ref_idx;
621  const int need_pdpc = ff_vvc_need_pdpc(w, h, cu->bdpcm_flag[c_idx], mode, ref_idx);
622 
623 
624  pixel *src = (pixel*)fc->frame->data[c_idx] + x + y * stride;
625  IntraEdgeParams edge;
626 
627  FUNC(prepare_intra_edge_params)(lc, &edge, src, stride, x, y, w, h, c_idx, is_intra_mip, mode, ref_idx, need_pdpc);
628 
629  if (is_intra_mip) {
630  int intra_mip_transposed_flag = SAMPLE_CTB(fc->tab.imtf, x_cb, y_cb);
631  int intra_mip_mode = SAMPLE_CTB(fc->tab.imm, x_cb, y_cb);
632 
633  fc->vvcdsp.intra.pred_mip((uint8_t *)src, edge.top, edge.left,
634  w, h, stride, intra_mip_mode, intra_mip_transposed_flag);
635  } else if (mode == INTRA_PLANAR) {
636  fc->vvcdsp.intra.pred_planar((uint8_t *)src, edge.top, edge.left, w, h, stride);
637  } else if (mode == INTRA_DC) {
638  fc->vvcdsp.intra.pred_dc((uint8_t *)src, edge.top, edge.left, w, h, stride);
639  } else if (mode == INTRA_VERT) {
640  fc->vvcdsp.intra.pred_v((uint8_t *)src, edge.top, w, h, stride);
641  } else if (mode == INTRA_HORZ) {
642  fc->vvcdsp.intra.pred_h((uint8_t *)src, edge.left, w, h, stride);
643  } else {
644  if (mode >= INTRA_DIAG) {
645  fc->vvcdsp.intra.pred_angular_v((uint8_t *)src, edge.top, edge.left,
646  w, h, stride, c_idx, mode, ref_idx,
647  edge.filter_flag, need_pdpc);
648  } else {
649  fc->vvcdsp.intra.pred_angular_h((uint8_t *)src, edge.top, edge.left,
650  w, h, stride, c_idx, mode, ref_idx,
651  edge.filter_flag, need_pdpc);
652  }
653  }
654  if (need_pdpc) {
655  //8.4.5.2.15 Position-dependent intra prediction sample filtering process
656  if (!is_intra_mip && (mode == INTRA_PLANAR || mode == INTRA_DC ||
657  mode == INTRA_VERT || mode == INTRA_HORZ)) {
658  const int scale = (av_log2(w) + av_log2(h) - 2) >> 2;
659  const pixel *left = (pixel*)edge.left;
660  const pixel *top = (pixel*)edge.top;
661  for (int y = 0; y < h; y++) {
662  for (int x = 0; x < w; x++) {
663  int l, t, wl, wt, pred;
664  pixel val;
665  if (mode == INTRA_PLANAR || mode == INTRA_DC) {
666  l = left[y];
667  t = top[x];
668  wl = 32 >> FFMIN((x << 1) >> scale, 31);
669  wt = 32 >> FFMIN((y << 1) >> scale, 31);
670  } else {
671  l = left[y] - left[-1] + POS(x,y);
672  t = top[x] - top[-1] + POS(x,y);
673  wl = (mode == INTRA_VERT) ? (32 >> FFMIN((x << 1) >> scale, 31)) : 0;
674  wt = (mode == INTRA_HORZ) ? (32 >> FFMIN((y << 1) >> scale, 31)) : 0;
675  }
676  val = POS(x, y);
677  pred = val + ((wl * (l - val) + wt * (t - val) + 32) >> 6);
678  POS(x, y) = CLIP(pred);
679  }
680  }
681  }
682  }
683 }
684 
685 //8.4.5.2.11 Specification of INTRA_PLANAR intra prediction mode
686 static av_always_inline void FUNC(pred_planar)(uint8_t *_src, const uint8_t *_top,
687  const uint8_t *_left, const int w, const int h, const ptrdiff_t stride)
688 {
689  int x, y;
690  pixel *src = (pixel *)_src;
691  const pixel *top = (const pixel *)_top;
692  const pixel *left = (const pixel *)_left;
693  const int logw = av_log2(w);
694  const int logh = av_log2(h);
695  const int size = w * h;
696  const int shift = (logw + logh + 1);
697  for (y = 0; y < h; y++) {
698  for (x = 0; x < w; x++) {
699  const int pred_v = ((h - 1 - y) * top[x] + (y + 1) * left[h]) << logw;
700  const int pred_h = ((w - 1 - x) * left[y] + (x + 1) * top[w]) << logh;
701  const int pred = (pred_v + pred_h + size) >> shift;
702  POS(x, y) = pred;
703  }
704  }
705 }
706 
707 //8.4.5.2.3 MIP boundary sample downsampling process
708 static av_always_inline void FUNC(mip_downsampling)(int *reduced, const int boundary_size,
709  const pixel *ref, const int n_tb_s)
710 {
711  const int b_dwn = n_tb_s / boundary_size;
712  const int log2 = av_log2(b_dwn);
713 
714  if (boundary_size == n_tb_s) {
715  for (int i = 0; i < n_tb_s; i++)
716  reduced[i] = ref[i];
717  return;
718  }
719  for (int i = 0; i < boundary_size; i++) {
720  int r;
721  r = *ref++;
722  for (int j = 1; j < b_dwn; j++)
723  r += *ref++;
724  reduced[i] = (r + (1 << (log2 - 1))) >> log2;
725  }
726 }
727 
728 static av_always_inline void FUNC(mip_reduced_pred)(pixel *src, const ptrdiff_t stride,
729  const int up_hor, const int up_ver, const int pred_size, const int *reduced, const int reduced_size,
730  const int ow, const int temp0, const uint8_t *matrix, int is_transposed)
731 {
732  src = &POS(up_hor - 1, up_ver - 1);
733  for (int y = 0; y < pred_size; y++) {
734  for (int x = 0; x < pred_size; x++) {
735  int pred = 0;
736  for (int i = 0; i < reduced_size; i++)
737  pred += reduced[i] * matrix[i];
738  matrix += reduced_size;
739  pred = ((pred + ow) >> 6) + temp0;
740  pred = av_clip(pred, 0, (1<<BIT_DEPTH) - 1);
741  if (is_transposed)
742  POS(y * up_hor, x * up_ver) = pred;
743  else
744  POS(x * up_hor, y * up_ver) = pred;
745  }
746  }
747 }
748 
749 static av_always_inline void FUNC(mip_upsampling_1d)(pixel *dst, const int dst_step, const int dst_stride, const int dst_height, const int factor,
750  const pixel *boundary, const int boundary_step, const int pred_size)
751 {
752 
753  for (int i = 0; i < dst_height; i++) {
754  const pixel *before = boundary;
755  const pixel *after = dst - dst_step;
756  pixel *d = dst;
757  for (int j = 0; j < pred_size; j++) {
758  after += dst_step * factor;
759  for (int k = 1; k < factor; k++) {
760  int mid = (factor - k) * (*before) + k * (*after);
761  *d = (mid + factor / 2) / factor;
762  d += dst_step;
763  }
764  before = after;
765  d += dst_step;
766  }
767  boundary += boundary_step;
768  dst += dst_stride;
769  }
770 }
771 
772 //8.4.5.2.2 Matrix-based intra sample prediction
773 static av_always_inline void FUNC(pred_mip)(uint8_t *_src, const uint8_t *_top,
774  const uint8_t *_left, const int w, const int h, const ptrdiff_t stride,
775  int mode_id, int is_transposed)
776 {
777  pixel *src = (pixel *)_src;
778  const pixel *top = (const pixel *)_top;
779  const pixel *left = (const pixel *)_left;
780 
781  const int size_id = ff_vvc_get_mip_size_id(w, h);
782  static const int boundary_sizes[] = {2, 4, 4};
783  static const int pred_sizes[] = {4, 4, 8};
784  const int boundary_size = boundary_sizes[size_id];
785  const int pred_size = pred_sizes[size_id];
786  const int in_size = 2 * boundary_size - ((size_id == 2) ? 1 : 0);
787  const uint8_t *matrix = ff_vvc_get_mip_matrix(size_id, mode_id);
788  const int up_hor = w / pred_size;
789  const int up_ver = h / pred_size;
790 
791  int reduced[16];
792  int *red_t = reduced;
793  int *red_l = reduced + boundary_size;
794  int off = 1, ow = 0;
795  int temp0;
796 
797  if (is_transposed) {
798  FFSWAP(int*, red_t, red_l);
799  }
800  FUNC(mip_downsampling)(red_t, boundary_size, top, w);
801  FUNC(mip_downsampling)(red_l, boundary_size, left, h);
802 
803  temp0 = reduced[0];
804  if (size_id != 2) {
805  off = 0;
806  ow = (1 << (BIT_DEPTH - 1)) - temp0;
807  } else {
808  ow = reduced[1] - temp0;
809  }
810  reduced[0] = ow;
811  for (int i = 1; i < in_size; i++) {
812  reduced[i] = reduced[i + off] - temp0;
813  ow += reduced[i];
814  }
815  ow = 32 - 32 * ow;
816 
817  FUNC(mip_reduced_pred)(src, stride, up_hor, up_ver, pred_size, reduced, in_size, ow, temp0, matrix, is_transposed);
818  if (up_hor > 1 || up_ver > 1) {
819  if (up_hor > 1)
820  FUNC(mip_upsampling_1d)(&POS(0, up_ver - 1), 1, up_ver * stride, pred_size, up_hor, left + up_ver - 1, up_ver, pred_size);
821  if (up_ver > 1)
822  FUNC(mip_upsampling_1d)(src, stride, 1, w, up_ver, top, 1, pred_size);
823  }
824 }
825 
826 static av_always_inline pixel FUNC(pred_dc_val)(const pixel *top, const pixel *left,
827  const int w, const int h)
828 {
829  pixel dc_val;
830  int sum = 0;
831  unsigned int offset = (w == h) ? (w << 1) : FFMAX(w, h);
832  const int shift = av_log2(offset);
833  offset >>= 1;
834  if (w >= h) {
835  for (int i = 0; i < w; i++)
836  sum += top[i];
837  }
838  if (w <= h) {
839  for (int i = 0; i < h; i++)
840  sum += left[i];
841  }
842  dc_val = (sum + offset) >> shift;
843  return dc_val;
844 }
845 
846 //8.4.5.2.12 Specification of INTRA_DC intra prediction mode
847 static av_always_inline void FUNC(pred_dc)(uint8_t *_src, const uint8_t *_top,
848  const uint8_t *_left, const int w, const int h, const ptrdiff_t stride)
849 {
850  int x, y;
851  pixel *src = (pixel *)_src;
852  const pixel *top = (const pixel *)_top;
853  const pixel *left = (const pixel *)_left;
854  const pixel dc = FUNC(pred_dc_val)(top, left, w, h);
855  const pixel4 a = PIXEL_SPLAT_X4(dc);
856  for (y = 0; y < h; y++) {
857  pixel *s = src;
858  for (x = 0; x < w; x += 4) {
859  AV_WN4P(s, a);
860  s += 4;
861  }
862  src += stride;
863  }
864 }
865 
866 static av_always_inline void FUNC(pred_v)(uint8_t *_src, const uint8_t *_top,
867  const int w, const int h, const ptrdiff_t stride)
868 {
869  pixel *src = (pixel *)_src;
870  const pixel *top = (const pixel *)_top;
871  for (int y = 0; y < h; y++) {
872  memcpy(src, top, sizeof(pixel) * w);
873  src += stride;
874  }
875 }
876 
877 static void FUNC(pred_h)(uint8_t *_src, const uint8_t *_left, const int w, const int h,
878  const ptrdiff_t stride)
879 {
880  pixel *src = (pixel *)_src;
881  const pixel *left = (const pixel *)_left;
882  for (int y = 0; y < h; y++) {
883  const pixel4 a = PIXEL_SPLAT_X4(left[y]);
884  for (int x = 0; x < w; x += 4) {
885  AV_WN4P(&POS(x, y), a);
886  }
887  }
888 }
889 
890 #define INTRA_LUMA_FILTER(p) CLIP((p[0] * f[0] + p[1] * f[1] + p[2] * f[2] + p[3] * f[3] + 32) >> 6)
891 #define INTRA_CHROMA_FILTER(p) (((32 - fact) * p[1] + fact * p[2] + 16) >> 5)
892 
893 //8.4.5.2.13 Specification of INTRA_ANGULAR2..INTRA_ANGULAR66 intra prediction modes
894 static void FUNC(pred_angular_v)(uint8_t *_src, const uint8_t *_top, const uint8_t *_left,
895  const int w, const int h, const ptrdiff_t stride, const int c_idx, const int mode,
896  const int ref_idx, const int filter_flag, const int need_pdpc)
897 {
898  pixel *src = (pixel *)_src;
899  const pixel *left = (const pixel *)_left;
900  const pixel *top = (const pixel *)_top - (1 + ref_idx);
901  const int intra_pred_angle = ff_vvc_intra_pred_angle_derive(mode);
902  int pos = (1 + ref_idx) * intra_pred_angle;
903  const int dp = intra_pred_angle;
904  const int is_luma = !c_idx;
905  int nscale, inv_angle;
906 
907  if (need_pdpc) {
908  inv_angle = ff_vvc_intra_inv_angle_derive(intra_pred_angle);
909  nscale = ff_vvc_nscale_derive(w, h, mode);
910  }
911 
912  for (int y = 0; y < h; y++) {
913  const int idx = (pos >> 5) + ref_idx;
914  const int fact = pos & 31;
915  if (!fact && (!is_luma || !filter_flag)) {
916  for (int x = 0; x < w; x++) {
917  const pixel *p = top + x + idx + 1;
918  POS(x, y) = *p;
919  }
920  } else {
921  if (!c_idx) {
922  const int8_t *f = ff_vvc_intra_luma_filter[filter_flag][fact];
923  for (int x = 0; x < w; x++) {
924  const pixel *p = top + x + idx;
925  POS(x, y) = INTRA_LUMA_FILTER(p);
926  }
927  } else {
928  for (int x = 0; x < w; x++) {
929  const pixel *p = top + x + idx;
930  POS(x, y) = INTRA_CHROMA_FILTER(p);
931  }
932  }
933  }
934  if (need_pdpc) {
935  int inv_angle_sum = 256 + inv_angle;
936  for (int x = 0; x < FFMIN(w, 3 << nscale); x++) {
937  const pixel l = left[y + (inv_angle_sum >> 9)];
938  const pixel val = POS(x, y);
939  const int wl = 32 >> ((x << 1) >> nscale);
940  const int pred = val + (((l - val) * wl + 32) >> 6);
941  POS(x, y) = CLIP(pred);
942  inv_angle_sum += inv_angle;
943  }
944  }
945  pos += dp;
946  }
947 }
948 
949 //8.4.5.2.13 Specification of INTRA_ANGULAR2..INTRA_ANGULAR66 intra prediction modes
950 static void FUNC(pred_angular_h)(uint8_t *_src, const uint8_t *_top, const uint8_t *_left,
951  const int w, const int h, const ptrdiff_t stride, const int c_idx, const int mode,
952  const int ref_idx, const int filter_flag, const int need_pdpc)
953 {
954  pixel *src = (pixel *)_src;
955  const pixel *left = (const pixel *)_left - (1 + ref_idx);
956  const pixel *top = (const pixel *)_top;
957  const int is_luma = !c_idx;
958  const int intra_pred_angle = ff_vvc_intra_pred_angle_derive(mode);
959  const int dp = intra_pred_angle;
960  int nscale = 0, inv_angle, inv_angle_sum;
961 
962  if (need_pdpc) {
963  inv_angle = ff_vvc_intra_inv_angle_derive(intra_pred_angle);
964  inv_angle_sum = 256 + inv_angle;
965  nscale = ff_vvc_nscale_derive(w, h, mode);
966  }
967 
968  for (int y = 0; y < h; y++) {
969  int pos = (1 + ref_idx) * intra_pred_angle;
970  int wt;
971  if (need_pdpc)
972  wt = (32 >> FFMIN(31, (y * 2) >> nscale));
973 
974  for (int x = 0; x < w; x++) {
975  const int idx = (pos >> 5) + ref_idx;
976  const int fact = pos & 31;
977  const pixel *p = left + y + idx;
978  int pred;
979  if (!fact && (!is_luma || !filter_flag)) {
980  pred = p[1];
981  } else {
982  if (!c_idx) {
983  const int8_t *f = ff_vvc_intra_luma_filter[filter_flag][fact];
984  pred = INTRA_LUMA_FILTER(p);
985  } else {
987  }
988  }
989  if (need_pdpc) {
990  if (y < (3 << nscale)) {
991  const pixel t = top[x + (inv_angle_sum >> 9)];
992  pred = CLIP(pred + (((t - pred) * wt + 32) >> 6));
993  }
994  }
995  POS(x, y) = pred;
996  pos += dp;
997  }
998  if (need_pdpc)
999  inv_angle_sum += inv_angle;
1000  }
1001 }
1002 
1004 {
1005  intra->lmcs_scale_chroma = FUNC(lmcs_scale_chroma);
1006  intra->intra_cclm_pred = FUNC(intra_cclm_pred);
1007  intra->intra_pred = FUNC(intra_pred);
1008  intra->pred_planar = FUNC(pred_planar);
1009  intra->pred_mip = FUNC(pred_mip);
1010  intra->pred_dc = FUNC(pred_dc);
1011  intra->pred_v = FUNC(pred_v);
1012  intra->pred_h = FUNC(pred_h);
1013  intra->pred_angular_v = FUNC(pred_angular_v);
1014  intra->pred_angular_h = FUNC(pred_angular_h);
1015 }
VVCSPS
Definition: vvc_ps.h:58
ff_vvc_intra_luma_filter
const int8_t ff_vvc_intra_luma_filter[VVC_INTRA_LUMA_TYPES][VVC_INTRA_LUMA_FACTS][VVC_INTRA_LUMA_TAPS]
Definition: vvc_data.c:1927
VVCPPS
Definition: vvc_ps.h:92
av_clip
#define av_clip
Definition: common.h:98
VVCLMCS::min_bin_idx
uint8_t min_bin_idx
Definition: vvc_ps.h:191
r
const char * r
Definition: vf_curves.c:126
ff_vvc_need_pdpc
int ff_vvc_need_pdpc(int w, int h, uint8_t bdpcm_flag, int mode, int ref_idx)
Definition: vvc_intra_utils.c:61
BIT_DEPTH
#define BIT_DEPTH
Definition: aom_film_grain.c:61
pred_dc
static av_always_inline void FUNC() pred_dc(uint8_t *_src, const uint8_t *_top, const uint8_t *_left, const int w, const int h, const ptrdiff_t stride)
Definition: vvc_intra_template.c:847
intra_pred
static void FUNC() intra_pred(const VVCLocalContext *lc, int x0, int y0, const int width, const int height, int c_idx)
Definition: vvc_intra_template.c:595
VVC_MAX_SAMPLE_ARRAYS
@ VVC_MAX_SAMPLE_ARRAYS
Definition: vvc.h:77
matrix
Definition: vc1dsp.c:42
CodingUnit
Definition: hevcdec.h:282
CodingUnit::bdpcm_flag
int bdpcm_flag[VVC_MAX_SAMPLE_ARRAYS]
BdpcmFlag.
Definition: vvc_ctu.h:312
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:125
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
w
uint8_t w
Definition: llviddspenc.c:38
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:121
cclm_get_params
static av_always_inline void FUNC() cclm_get_params(const VVCLocalContext *lc, const int x0, const int y0, const int w, const int h, const int avail_t, const int avail_l, int *a, int *b, int *k)
Definition: vvc_intra_template.c:237
b
#define b
Definition: input.c:41
mip_reduced_pred
static av_always_inline void FUNC() mip_reduced_pred(pixel *src, const ptrdiff_t stride, const int up_hor, const int up_ver, const int pred_size, const int *reduced, const int reduced_size, const int ow, const int temp0, const uint8_t *matrix, int is_transposed)
Definition: vvc_intra_template.c:728
INTRA_L_CCLM
@ INTRA_L_CCLM
Definition: vvc_ctu.h:232
INTRA_DIAG
@ INTRA_DIAG
Definition: vvc_ctu.h:228
cclm_get_params_default
static av_always_inline void FUNC() cclm_get_params_default(int *a, int *b, int *k)
Definition: vvc_intra_template.c:53
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:472
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VVCLMCS
Definition: vvc_ps.h:190
cclm_select_samples
static av_always_inline int FUNC() cclm_select_samples(const VVCLocalContext *lc, const int x0, const int y0, const int w, const int h, const int avail_t, const int avail_l, pixel sel[][MAX_PICK_POS *2])
Definition: vvc_intra_template.c:188
cclm_get_select_pos
static av_always_inline int FUNC() cclm_get_select_pos(const VVCLocalContext *lc, const int x, const int y, const int w, const int h, const int avail_t, const int avail_l, int cnt[2], int pos[2][MAX_PICK_POS])
Definition: vvc_intra_template.c:61
mip_upsampling_1d
static av_always_inline void FUNC() mip_upsampling_1d(pixel *dst, const int dst_step, const int dst_stride, const int dst_height, const int factor, const pixel *boundary, const int boundary_step, const int pred_size)
Definition: vvc_intra_template.c:749
vvc_intra.h
VVCLMCS::chroma_scale_coeff
uint16_t chroma_scale_coeff[LMCS_MAX_BIN_SIZE]
Definition: vvc_ps.h:200
pixel4
#define pixel4
Definition: bit_depth_template.c:83
ff_vvc_wide_angle_mode_mapping
int ff_vvc_wide_angle_mode_mapping(const CodingUnit *cu, int tb_width, int tb_height, int c_idx, int pred_mode_intra)
Definition: vvc_intra_utils.c:197
cclm_linear_pred
static av_always_inline void FUNC() cclm_linear_pred(VVCFrameContext *fc, const int x0, const int y0, const int w, const int h, const pixel *pdsy, const int *a, const int *b, const int *k)
Definition: vvc_intra_template.c:29
ref_filter
static av_always_inline void FUNC() ref_filter(const pixel *left, const pixel *top, pixel *filtered_left, pixel *filtered_top, const int left_size, const int top_size, const int unfilter_last_one)
Definition: vvc_intra_template.c:450
prepare_intra_edge_params
static av_always_inline void FUNC() prepare_intra_edge_params(const VVCLocalContext *lc, IntraEdgeParams *edge, const pixel *src, const ptrdiff_t stride, const int x, int y, int w, int h, int c_idx, const int is_intra_mip, const int mode, const int ref_idx, const int need_pdpc)
Definition: vvc_intra_template.c:467
val
static double val(void *priv, double ch)
Definition: aeval.c:78
ff_vvc_get_left_available
int ff_vvc_get_left_available(const VVCLocalContext *lc, int x0, int y0, int target_size, int c_idx)
Definition: vvc_intra_utils.c:126
CodingUnit::cb_width
int cb_width
Definition: vvc_ctu.h:278
pred_angular_h
static void FUNC() pred_angular_h(uint8_t *_src, const uint8_t *_top, const uint8_t *_left, const int w, const int h, const ptrdiff_t stride, const int c_idx, const int mode, const int ref_idx, const int filter_flag, const int need_pdpc)
Definition: vvc_intra_template.c:950
ff_vvc_nscale_derive
int ff_vvc_nscale_derive(int w, int h, int mode)
Definition: vvc_intra_utils.c:42
lmcs_scale_chroma
static void FUNC() lmcs_scale_chroma(VVCLocalContext *lc, int *dst, const int *coeff, const int width, const int height, const int x0_cu, const int y0_cu)
Definition: vvc_intra_template.c:431
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
cclm_get_min_max
static av_always_inline void FUNC() cclm_get_min_max(const pixel sel[][MAX_PICK_POS *2], int *min, int *max)
Definition: vvc_intra_template.c:215
INTRA_HORZ
@ INTRA_HORZ
Definition: vvc_ctu.h:227
INTRA_LUMA_FILTER
#define INTRA_LUMA_FILTER(p)
Definition: vvc_intra_template.c:890
MAX_PICK_POS
#define MAX_PICK_POS
Definition: vvc_intra_template.c:49
ff_vvc_get_top_available
int ff_vvc_get_top_available(const VVCLocalContext *lc, int x0, int y0, int target_size, int c_idx)
Definition: vvc_intra_utils.c:95
ISP_NO_SPLIT
@ ISP_NO_SPLIT
Definition: vvc_ctu.h:114
EXTEND
#define EXTEND(ptr, val, len)
VVCIntraDSPContext
Definition: vvcdsp.h:96
LEFT
#define LEFT
Definition: vvc_intra_template.c:51
PIXEL_SPLAT_X4
#define PIXEL_SPLAT_X4(x)
Definition: bit_depth_template.c:96
pred_angular_v
static void FUNC() pred_angular_v(uint8_t *_src, const uint8_t *_top, const uint8_t *_left, const int w, const int h, const ptrdiff_t stride, const int c_idx, const int mode, const int ref_idx, const int filter_flag, const int need_pdpc)
Definition: vvc_intra_template.c:894
ff_vvc_intra_dsp_init
static void FUNC() ff_vvc_intra_dsp_init(VVCIntraDSPContext *const intra)
Definition: vvc_intra_template.c:1003
if
if(ret)
Definition: filter_design.txt:179
av_clip_intp2
#define av_clip_intp2
Definition: common.h:119
cclm_select_luma
static av_always_inline void FUNC() cclm_select_luma(const VVCFrameContext *fc, const int x0, const int y0, const int avail_t, const int avail_l, const int cnt[2], const int pos[2][MAX_PICK_POS], pixel *sel_luma)
Definition: vvc_intra_template.c:96
ff_vvc_get_mip_size_id
int ff_vvc_get_mip_size_id(int w, int h)
Definition: vvc_intra_utils.c:33
pixel
uint8_t pixel
Definition: tiny_ssim.c:41
VVCLocalContext
Definition: vvc_ctu.h:368
pred_mip
static av_always_inline void FUNC() pred_mip(uint8_t *_src, const uint8_t *_top, const uint8_t *_left, const int w, const int h, const ptrdiff_t stride, int mode_id, int is_transposed)
Definition: vvc_intra_template.c:773
CodingUnit::intra_luma_ref_idx
uint8_t intra_luma_ref_idx
IntraLumaRefLineIdx[][].
Definition: vvc_ctu.h:294
bit_depth_template.c
abs
#define abs(x)
Definition: cuda_runtime.h:35
lmcs_sum_samples
static int FUNC() lmcs_sum_samples(const pixel *start, ptrdiff_t stride, const int avail, const int target_size)
Definition: vvc_intra_template.c:377
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
SAMPLE_CTB
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:73
source
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
Definition: filter_design.txt:255
f
f
Definition: af_crystalizer.c:121
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: vvc_intra.c:291
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
CodingUnit::intra_pred_mode_y
IntraPredMode intra_pred_mode_y
IntraPredModeY.
Definition: vvc_ctu.h:308
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
shift
static int shift(int a, int b)
Definition: bonk.c:262
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
INTRA_PLANAR
@ INTRA_PLANAR
Definition: hevcdec.h:120
size
int size
Definition: twinvq_data.h:10344
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
cclm_select_luma_444
static av_always_inline void FUNC() cclm_select_luma_444(const pixel *src, const int step, const int cnt, const int pos[MAX_PICK_POS], pixel *sel_luma)
Definition: vvc_intra_template.c:89
TOP
#define TOP
Definition: vvc_intra_template.c:50
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_vvc_get_mip_matrix
const uint8_t * ff_vvc_get_mip_matrix(const int size_id, const int mode_id)
Definition: vvc_data.c:1147
IntraEdgeParams
Definition: vvcdsp.c:67
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
POS
#define POS(x, y)
Definition: vvc_intra_template.c:27
fact
static double fact(double i)
Definition: af_aiir.c:943
INTRA_VERT
@ INTRA_VERT
Definition: vvc_ctu.h:229
CodingUnit::intra_pred_mode_c
IntraPredMode intra_pred_mode_c
IntraPredModeC.
Definition: vvc_ctu.h:309
pred_h
static void FUNC() pred_h(uint8_t *_src, const uint8_t *_left, const int w, const int h, const ptrdiff_t stride)
Definition: vvc_intra_template.c:877
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
pred_v
static av_always_inline void FUNC() pred_v(uint8_t *_src, const uint8_t *_top, const int w, const int h, const ptrdiff_t stride)
Definition: vvc_intra_template.c:866
av_always_inline
#define av_always_inline
Definition: attributes.h:49
INTRA_T_CCLM
@ INTRA_T_CCLM
Definition: vvc_ctu.h:233
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cclm_select_chroma
static av_always_inline void FUNC() cclm_select_chroma(const VVCFrameContext *fc, const int x, const int y, const int cnt[2], const int pos[2][MAX_PICK_POS], pixel sel[][MAX_PICK_POS *2])
Definition: vvc_intra_template.c:167
cclm_pred_default
static av_always_inline void FUNC() cclm_pred_default(VVCFrameContext *fc, const int x, const int y, const int w, const int h, const int avail_t, const int avail_l)
Definition: vvc_intra_template.c:336
ff_vvc_ref_filter_flag_derive
int ff_vvc_ref_filter_flag_derive(int mode)
Definition: vvc_intra_utils.c:159
log2
#define log2(x)
Definition: libm.h:404
ff_vvc_intra_pred_angle_derive
int ff_vvc_intra_pred_angle_derive(int pred_mode)
Definition: vvc_intra_utils.c:165
stride
#define stride
Definition: h264pred_template.c:537
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
IntraPredMode
IntraPredMode
Definition: hevcdec.h:119
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
pos
unsigned int pos
Definition: spdifenc.c:413
LMCS_MAX_BIN_SIZE
#define LMCS_MAX_BIN_SIZE
Definition: vvc_ps.h:48
CodingUnit::cb_height
int cb_height
Definition: vvc_ctu.h:279
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
pred_planar
static av_always_inline void FUNC() pred_planar(uint8_t *_src, const uint8_t *_top, const uint8_t *_left, const int w, const int h, const ptrdiff_t stride)
Definition: vvc_intra_template.c:686
INTRA_LT_CCLM
@ INTRA_LT_CCLM
Definition: vvc_ctu.h:231
FUNC
#define FUNC(a)
Definition: bit_depth_template.c:104
mode
mode
Definition: ebur128.h:83
AV_WN4P
#define AV_WN4P
Definition: bit_depth_template.c:94
ff_vvc_intra_inv_angle_derive
int ff_vvc_intra_inv_angle_derive(int pred_mode)
Definition: vvc_intra_utils.c:188
cclm_get_luma_rec_pixels
static av_always_inline void FUNC() cclm_get_luma_rec_pixels(const VVCFrameContext *fc, const int x0, const int y0, const int w, const int h, const int avail_t, const int avail_l, pixel *pdsy)
Definition: vvc_intra_template.c:282
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
intra_cclm_pred
static void FUNC() intra_cclm_pred(const VVCLocalContext *lc, const int x0, const int y0, const int width, const int height)
Definition: vvc_intra_template.c:352
factor
static const int factor[16]
Definition: vf_pp7.c:78
pred_dc_val
static av_always_inline pixel FUNC() pred_dc_val(const pixel *top, const pixel *left, const int w, const int h)
Definition: vvc_intra_template.c:826
mip_downsampling
static av_always_inline void FUNC() mip_downsampling(int *reduced, const int boundary_size, const pixel *ref, const int n_tb_s)
Definition: vvc_intra_template.c:708
CLIP
@ CLIP
Definition: qdrw.c:37
INTRA_CHROMA_FILTER
#define INTRA_CHROMA_FILTER(p)
Definition: vvc_intra_template.c:891
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
CodingUnit::isp_split_type
enum IspType isp_split_type
IntraSubPartitionsSplitType.
Definition: vvc_ctu.h:302
VVCFrameContext
Definition: vvcdec.h:92
CodingUnit::mip_chroma_direct_flag
int mip_chroma_direct_flag
MipChromaDirectFlag.
Definition: vvc_ctu.h:310
lmcs_derive_chroma_scale
static int FUNC() lmcs_derive_chroma_scale(VVCLocalContext *lc, const int x0, const int y0)
Definition: vvc_intra_template.c:390
d
d
Definition: ffmpeg_filter.c:409
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:79
MAX_TB_SIZE
#define MAX_TB_SIZE
Definition: hevcdec.h:48
h
h
Definition: vp9dsp_template.c:2038
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
LUMA
#define LUMA
Definition: hevc_filter.c:31
min
float min
Definition: vorbis_enc_data.h:429