FFmpeg
vf_nnedi.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2010-2011 Kevin Stone
3  * Copyright (C) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 #include <float.h>
23 
24 #include "libavutil/common.h"
25 #include "libavutil/file_open.h"
26 #include "libavutil/float_dsp.h"
27 #include "libavutil/imgutils.h"
28 #include "libavutil/mem_internal.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/pixdesc.h"
31 #include "avfilter.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 static const size_t NNEDI_WEIGHTS_SIZE = 13574928;
36 static const uint8_t NNEDI_XDIM[] = { 8, 16, 32, 48, 8, 16, 32 };
37 static const uint8_t NNEDI_YDIM[] = { 6, 6, 6, 6, 4, 4, 4 };
38 static const uint16_t NNEDI_NNS[] = { 16, 32, 64, 128, 256 };
39 
40 typedef struct PrescreenerCoefficients {
41  DECLARE_ALIGNED(32, float, kernel_l0)[4][16 * 4];
42  DECLARE_ALIGNED(32, float, bias_l0)[4];
43 
44  DECLARE_ALIGNED(32, float, kernel_l1)[4][4];
45  DECLARE_ALIGNED(32, float, bias_l1)[4];
46 
47  DECLARE_ALIGNED(32, float, kernel_l2)[4][8];
48  DECLARE_ALIGNED(32, float, bias_l2)[4];
50 
51 typedef struct PredictorCoefficients {
52  int xdim, ydim, nns, nsize;
53  float *data;
54  float *softmax_q1;
55  float *elliott_q1;
58  float *softmax_q2;
59  float *elliott_q2;
63 
64 typedef struct NNEDIContext {
65  const AVClass *class;
66 
67  char *weights_file;
68 
70  int eof;
72 
74  int depth;
75  int nb_planes;
77  int linesize[4];
78  int planewidth[4];
79  int planeheight[4];
80  int field_n;
81 
84 
85  float half;
86  float in_scale;
87  float out_scale;
88 
89  // Parameters
90  int deint;
91  int field;
93  int nsize;
94  int nnsparam;
95  int qual;
96  int etype;
97  int pscrn;
98 
100  uint8_t **prescreen_buf;
101  float **input_buf;
102  float **output_buf;
103 
104  void (*read)(const uint8_t *src, float *dst,
105  int src_stride, int dst_stride,
106  int width, int height, float scale);
107  void (*write)(const float *src, uint8_t *dst,
108  int src_stride, int dst_stride,
109  int width, int height, int depth, float scale);
111  const void *src, ptrdiff_t src_stride,
112  uint8_t *prescreen, int N,
113  const PrescreenerCoefficients *const coeffs);
114 } NNEDIContext;
115 
116 #define OFFSET(x) offsetof(NNEDIContext, x)
117 #define RFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
118 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
119 
120 static const AVOption nnedi_options[] = {
121  {"weights", "set weights file", OFFSET(weights_file), AV_OPT_TYPE_STRING, {.str="nnedi3_weights.bin"}, 0, 0, FLAGS },
122  {"deint", "set which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, RFLAGS, .unit = "deint" },
123  {"all", "deinterlace all frames", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "deint" },
124  {"interlaced", "only deinterlace frames marked as interlaced", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "deint" },
125  {"field", "set mode of operation", OFFSET(field), AV_OPT_TYPE_INT, {.i64=-1}, -2, 3, RFLAGS, .unit = "field" },
126  {"af", "use frame flags, both fields", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, RFLAGS, .unit = "field" },
127  {"a", "use frame flags, single field", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, RFLAGS, .unit = "field" },
128  {"t", "use top field only", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "field" },
129  {"b", "use bottom field only", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "field" },
130  {"tf", "use both fields, top first", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "field" },
131  {"bf", "use both fields, bottom first", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, RFLAGS, .unit = "field" },
132  {"planes", "set which planes to process", OFFSET(process_plane), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, RFLAGS },
133  {"nsize", "set size of local neighborhood around each pixel, used by the predictor neural network", OFFSET(nsize), AV_OPT_TYPE_INT, {.i64=6}, 0, 6, RFLAGS, .unit = "nsize" },
134  {"s8x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "nsize" },
135  {"s16x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "nsize" },
136  {"s32x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "nsize" },
137  {"s48x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, RFLAGS, .unit = "nsize" },
138  {"s8x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, RFLAGS, .unit = "nsize" },
139  {"s16x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, RFLAGS, .unit = "nsize" },
140  {"s32x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, RFLAGS, .unit = "nsize" },
141  {"nns", "set number of neurons in predictor neural network", OFFSET(nnsparam), AV_OPT_TYPE_INT, {.i64=1}, 0, 4, RFLAGS, .unit = "nns" },
142  {"n16", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "nns" },
143  {"n32", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "nns" },
144  {"n64", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "nns" },
145  {"n128", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, RFLAGS, .unit = "nns" },
146  {"n256", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, RFLAGS, .unit = "nns" },
147  {"qual", "set quality", OFFSET(qual), AV_OPT_TYPE_INT, {.i64=1}, 1, 2, RFLAGS, .unit = "qual" },
148  {"fast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "qual" },
149  {"slow", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "qual" },
150  {"etype", "set which set of weights to use in the predictor", OFFSET(etype), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, RFLAGS, .unit = "etype" },
151  {"a", "weights trained to minimize absolute error", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "etype" },
152  {"abs","weights trained to minimize absolute error", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "etype" },
153  {"s", "weights trained to minimize squared error", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "etype" },
154  {"mse","weights trained to minimize squared error", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "etype" },
155  {"pscrn", "set prescreening", OFFSET(pscrn), AV_OPT_TYPE_INT, {.i64=2}, 0, 4, RFLAGS, .unit = "pscrn" },
156  {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, RFLAGS, .unit = "pscrn" },
157  {"original", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, RFLAGS, .unit = "pscrn" },
158  {"new", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, RFLAGS, .unit = "pscrn" },
159  {"new2", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, RFLAGS, .unit = "pscrn" },
160  {"new3", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, RFLAGS, .unit = "pscrn" },
161  { NULL }
162 };
163 
164 AVFILTER_DEFINE_CLASS(nnedi);
165 
166 static int config_output(AVFilterLink *outlink)
167 {
168  AVFilterContext *ctx = outlink->src;
169  const NNEDIContext *const s = ctx->priv;
170 
171  outlink->time_base = av_mul_q(ctx->inputs[0]->time_base, (AVRational){1, 2});
172  outlink->w = ctx->inputs[0]->w;
173  outlink->h = ctx->inputs[0]->h;
174 
175  if (s->field == -2 || s->field > 1)
176  outlink->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
177  (AVRational){2, 1});
178 
179  return 0;
180 }
181 
182 static const enum AVPixelFormat pix_fmts[] = {
206 };
207 
208 static float dot_dsp(const NNEDIContext *const s, const float *kernel, const float *input,
209  int n, float scale, float bias)
210 {
211  float sum, y;
212 
213  sum = s->fdsp->scalarproduct_float(kernel, input, n);
214 
215  y = sum * scale + bias + 1e-20f;
216 
217  return y;
218 }
219 
220 static float elliott(float x)
221 {
222  return x / (1.0f + fabsf(x));
223 }
224 
225 static void transform_elliott(float *input, int size)
226 {
227  for (int i = 0; i < size; i++)
228  input[i] = elliott(input[i]);
229 }
230 
232  const void *src, ptrdiff_t src_stride,
233  uint8_t *prescreen, int N,
234  const PrescreenerCoefficients *const m_data)
235 {
236  NNEDIContext *s = ctx->priv;
237  const float *src_p = src;
238 
239  // Adjust source pointer to point to top-left of filter window.
240  const float *window = src_p - 2 * src_stride - 5;
241 
242  for (int j = 0; j < N; j++) {
243  LOCAL_ALIGNED_32(float, input, [48]);
244  float state[12];
245 
246  for (int i = 0; i < 4; i++)
247  memcpy(input + i * 12, window + i * src_stride + j, 12 * sizeof(float));
248 
249  // Layer 0.
250  for (int n = 0; n < 4; n++)
251  state[n] = dot_dsp(s, m_data->kernel_l0[n], input, 48, 1.0f, m_data->bias_l0[n]);
252  transform_elliott(state + 1, 3);
253 
254  // Layer 1.
255  for (int n = 0; n < 4; n++)
256  state[n + 4] = dot_dsp(s, m_data->kernel_l1[n], state, 4, 1.0f, m_data->bias_l1[n]);
257  transform_elliott(state + 4, 3);
258 
259  // Layer 2.
260  for (int n = 0; n < 4; n++)
261  state[n + 8] = dot_dsp(s, m_data->kernel_l2[n], state, 8, 1.0f, m_data->bias_l2[n]);
262 
263  prescreen[j] = FFMAX(state[10], state[11]) <= FFMAX(state[8], state[9]) ? 255 : 0;
264  }
265 }
266 
268  const void *src, ptrdiff_t src_stride,
269  uint8_t *prescreen, int N,
270  const PrescreenerCoefficients *const m_data)
271 {
272  NNEDIContext *s = ctx->priv;
273  const float *src_p = src;
274 
275  // Adjust source pointer to point to top-left of filter window.
276  const float *window = src_p - 2 * src_stride - 6;
277 
278  for (int j = 0; j < N; j += 4) {
279  LOCAL_ALIGNED_32(float, input, [64]);
280  float state[8];
281 
282  for (int i = 0; i < 4; i++)
283  memcpy(input + i * 16, window + i * src_stride + j, 16 * sizeof(float));
284 
285  for (int n = 0; n < 4; n++)
286  state[n] = dot_dsp(s, m_data->kernel_l0[n], input, 64, 1.0f, m_data->bias_l0[n]);
288 
289  for (int n = 0; n < 4; n++)
290  state[n + 4] = dot_dsp(s, m_data->kernel_l1[n], state, 4, 1.0f, m_data->bias_l1[n]);
291 
292  for (int n = 0; n < 4; n++)
293  prescreen[j + n] = state[n + 4] > 0.f;
294  }
295 }
296 
297 static int filter_offset(int nn, const PredictorCoefficients *const model)
298 {
299  return nn * model->nsize;
300 }
301 
302 static const float *softmax_q1_filter(int nn,
303  const PredictorCoefficients *const model)
304 {
305  return model->softmax_q1 + filter_offset(nn, model);
306 }
307 
308 static const float *elliott_q1_filter(int nn,
309  const PredictorCoefficients *const model)
310 {
311  return model->elliott_q1 + filter_offset(nn, model);
312 }
313 
314 static const float *softmax_q2_filter(int nn,
315  const PredictorCoefficients *const model)
316 {
317  return model->softmax_q2 + filter_offset(nn, model);
318 }
319 
320 static const float *elliott_q2_filter(int nn,
321  const PredictorCoefficients *const model)
322 {
323  return model->elliott_q2 + filter_offset(nn, model);
324 }
325 
326 static void gather_input(const float *src, ptrdiff_t src_stride,
327  float *buf, float mstd[4],
328  const PredictorCoefficients *const model)
329 {
330  const float scale = 1.f / model->nsize;
331  float sum = 0.f;
332  float sum_sq = 0.f;
333  float tmp;
334 
335  for (int i = 0; i < model->ydim; i++) {
336  memcpy(buf, src, model->xdim * sizeof(float));
337 
338  for (int j = 0; j < model->xdim; j++) {
339  const float val = src[j];
340 
341  sum += val;
342  sum_sq += val * val;
343  }
344 
345  src += src_stride;
346  buf += model->xdim;
347  }
348 
349  mstd[0] = sum * scale;
350  mstd[3] = 0.f;
351 
352  tmp = sum_sq * scale - mstd[0] * mstd[0];
353  if (tmp < FLT_EPSILON) {
354  mstd[1] = 0.0f;
355  mstd[2] = 0.0f;
356  } else {
357  mstd[1] = sqrtf(tmp);
358  mstd[2] = 1.0f / mstd[1];
359  }
360 }
361 
362 static float softmax_exp(float x)
363 {
364  return expf(av_clipf(x, -80.f, 80.f));
365 }
366 
367 static void transform_softmax_exp(float *input, int size)
368 {
369  for (int i = 0; i < size; i++)
370  input[i] = softmax_exp(input[i]);
371 }
372 
373 static void wae5(const float *softmax, const float *el,
374  int n, float mstd[4])
375 {
376  float vsum = 0.0f, wsum = 0.0f;
377 
378  for (int i = 0; i < n; i++) {
379  vsum += softmax[i] * elliott(el[i]);
380  wsum += softmax[i];
381  }
382 
383  if (wsum > 1e-10f)
384  mstd[3] += (5.0f * vsum) / wsum * mstd[1] + mstd[0];
385  else
386  mstd[3] += mstd[0];
387 }
388 
390  const void *src, ptrdiff_t src_stride, void *dst,
391  const uint8_t *prescreen, int N,
392  const PredictorCoefficients *const model, int use_q2)
393 {
394  const NNEDIContext *const s = ctx->priv;
395  const float *src_p = src;
396  float *dst_p = dst;
397 
398  // Adjust source pointer to point to top-left of filter window.
399  const float *window = src_p - (model->ydim / 2) * src_stride - (model->xdim / 2 - 1);
400  const int filter_size = model->nsize;
401  const int nns = model->nns;
402 
403  for (int i = 0; i < N; i++) {
404  LOCAL_ALIGNED_32(float, input, [48 * 6]);
405  float activation[256 * 2];
406  float mstd[4];
407  float scale;
408 
409  if (prescreen[i])
410  continue;
411 
412  gather_input(window + i, src_stride, input, mstd, model);
413  scale = mstd[2];
414 
415  for (int nn = 0; nn < nns; nn++)
416  activation[nn] = dot_dsp(s, softmax_q1_filter(nn, model), input, filter_size, scale, model->softmax_bias_q1[nn]);
417 
418  for (int nn = 0; nn < nns; nn++)
419  activation[nns + nn] = dot_dsp(s, elliott_q1_filter(nn, model), input, filter_size, scale, model->elliott_bias_q1[nn]);
420 
421  transform_softmax_exp(activation, nns);
422  wae5(activation, activation + nns, nns, mstd);
423 
424  if (use_q2) {
425  for (int nn = 0; nn < nns; nn++)
426  activation[nn] = dot_dsp(s, softmax_q2_filter(nn, model), input, filter_size, scale, model->softmax_bias_q2[nn]);
427 
428  for (int nn = 0; nn < nns; nn++)
429  activation[nns + nn] = dot_dsp(s, elliott_q2_filter(nn, model), input, filter_size, scale, model->elliott_bias_q2[nn]);
430 
431  transform_softmax_exp(activation, nns);
432  wae5(activation, activation + nns, nns, mstd);
433  }
434 
435  dst_p[i] = mstd[3] * (use_q2 ? 0.5f : 1.f);
436  }
437 }
438 
439 static void read_bytes(const uint8_t *src, float *dst,
440  int src_stride, int dst_stride,
441  int width, int height, float scale)
442 {
443  for (int y = 0; y < height; y++) {
444  for (int x = 0; x < 32; x++)
445  dst[-x - 1] = src[x];
446 
447  for (int x = 0; x < width; x++)
448  dst[x] = src[x];
449 
450  for (int x = 0; x < 32; x++)
451  dst[width + x] = src[width - x - 1];
452 
453  dst += dst_stride;
454  src += src_stride;
455  }
456 }
457 
458 static void read_words(const uint8_t *srcp, float *dst,
459  int src_stride, int dst_stride,
460  int width, int height, float scale)
461 {
462  const uint16_t *src = (const uint16_t *)srcp;
463 
464  src_stride /= 2;
465 
466  for (int y = 0; y < height; y++) {
467  for (int x = 0; x < 32; x++)
468  dst[-x - 1] = src[x] * scale;
469 
470  for (int x = 0; x < width; x++)
471  dst[x] = src[x] * scale;
472 
473  for (int x = 0; x < 32; x++)
474  dst[width + x] = src[width - x - 1] * scale;
475 
476  dst += dst_stride;
477  src += src_stride;
478  }
479 }
480 
481 static void write_bytes(const float *src, uint8_t *dst,
482  int src_stride, int dst_stride,
483  int width, int height, int depth,
484  float scale)
485 {
486  for (int y = 0; y < height; y++) {
487  for (int x = 0; x < width; x++)
488  dst[x] = av_clip_uint8(src[x]);
489 
490  dst += dst_stride;
491  src += src_stride;
492  }
493 }
494 
495 static void write_words(const float *src, uint8_t *dstp,
496  int src_stride, int dst_stride,
497  int width, int height, int depth,
498  float scale)
499 {
500  uint16_t *dst = (uint16_t *)dstp;
501 
502  dst_stride /= 2;
503 
504  for (int y = 0; y < height; y++) {
505  for (int x = 0; x < width; x++)
506  dst[x] = av_clip_uintp2_c(src[x] * scale, depth);
507 
508  dst += dst_stride;
509  src += src_stride;
510  }
511 }
512 
513 static void interpolation(const void *src, ptrdiff_t src_stride,
514  void *dst, const uint8_t *prescreen, int n)
515 {
516  const float *src_p = src;
517  float *dst_p = dst;
518  const float *window = src_p - 2 * src_stride;
519 
520  for (int i = 0; i < n; i++) {
521  float accum = 0.0f;
522 
523  if (!prescreen[i])
524  continue;
525 
526  accum += (-3.0f / 32.0f) * window[0 * src_stride + i];
527  accum += (19.0f / 32.0f) * window[1 * src_stride + i];
528  accum += (19.0f / 32.0f) * window[2 * src_stride + i];
529  accum += (-3.0f / 32.0f) * window[3 * src_stride + i];
530 
531  dst_p[i] = accum;
532  }
533 }
534 
535 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
536 {
537  const NNEDIContext *const s = ctx->priv;
538  AVFrame *out = arg;
539  AVFrame *in = s->prev;
540  const float in_scale = s->in_scale;
541  const float out_scale = s->out_scale;
542  const int depth = s->depth;
543  const int interlaced = !!(in->flags & AV_FRAME_FLAG_INTERLACED);
544  const int tff = s->field_n == (s->field < 0 ? interlaced ? (in->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST) : 1 :
545  (s->field & 1) ^ 1);
546 
547 
548  for (int p = 0; p < s->nb_planes; p++) {
549  const int height = s->planeheight[p];
550  const int width = s->planewidth[p];
551  const int slice_start = 2 * ((height / 2 * jobnr) / nb_jobs);
552  const int slice_end = 2 * ((height / 2 * (jobnr+1)) / nb_jobs);
553  const uint8_t *src_data = in->data[p];
554  uint8_t *dst_data = out->data[p];
555  uint8_t *dst = out->data[p] + slice_start * out->linesize[p];
556  const int src_linesize = in->linesize[p];
557  const int dst_linesize = out->linesize[p];
558  uint8_t *prescreen_buf = s->prescreen_buf[jobnr];
559  float *srcbuf = s->input_buf[jobnr];
560  const int srcbuf_stride = width + 64;
561  float *dstbuf = s->output_buf[jobnr];
562  const int dstbuf_stride = width;
563  const int slice_height = (slice_end - slice_start) / 2;
564  const int last_slice = slice_end == height;
565  const uint8_t *in_line;
566  uint8_t *out_line;
567  int y_out;
568 
569  if (!(s->process_plane & (1 << p))) {
570  av_image_copy_plane(dst, out->linesize[p],
571  in->data[p] + slice_start * in->linesize[p],
572  in->linesize[p],
573  s->linesize[p], slice_end - slice_start);
574  continue;
575  }
576 
577  y_out = slice_start + (tff ^ (slice_start & 1));
578  in_line = src_data + (y_out * src_linesize);
579  out_line = dst_data + (y_out * dst_linesize);
580 
581  while (y_out < slice_end) {
582  memcpy(out_line, in_line, s->linesize[p]);
583  y_out += 2;
584  in_line += src_linesize * 2;
585  out_line += dst_linesize * 2;
586  }
587 
588  y_out = slice_start + ((!tff) ^ (slice_start & 1));
589 
590  s->read(src_data + FFMAX(y_out - 5, tff) * src_linesize,
591  srcbuf + 32,
592  src_linesize * 2, srcbuf_stride,
593  width, 1, in_scale);
594  srcbuf += srcbuf_stride;
595 
596  s->read(src_data + FFMAX(y_out - 3, tff) * src_linesize,
597  srcbuf + 32,
598  src_linesize * 2, srcbuf_stride,
599  width, 1, in_scale);
600  srcbuf += srcbuf_stride;
601 
602  s->read(src_data + FFMAX(y_out - 1, tff) * src_linesize,
603  srcbuf + 32,
604  src_linesize * 2, srcbuf_stride,
605  width, 1, in_scale);
606  srcbuf += srcbuf_stride;
607 
608  in_line = src_data + FFMIN(y_out + 1, height - 1 - !tff) * src_linesize;
609  out_line = dst_data + (y_out * dst_linesize);
610 
611  s->read(in_line, srcbuf + 32, src_linesize * 2, srcbuf_stride,
612  width, slice_height - last_slice, in_scale);
613 
614  y_out += (slice_height - last_slice) * 2;
615 
616  s->read(src_data + FFMIN(y_out + 1, height - 1 - !tff) * src_linesize,
617  srcbuf + 32 + srcbuf_stride * (slice_height - last_slice),
618  src_linesize * 2, srcbuf_stride,
619  width, 1, in_scale);
620 
621  s->read(src_data + FFMIN(y_out + 3, height - 1 - !tff) * src_linesize,
622  srcbuf + 32 + srcbuf_stride * (slice_height + 1 - last_slice),
623  src_linesize * 2, srcbuf_stride,
624  width, 1, in_scale);
625 
626  s->read(src_data + FFMIN(y_out + 5, height - 1 - !tff) * src_linesize,
627  srcbuf + 32 + srcbuf_stride * (slice_height + 2 - last_slice),
628  src_linesize * 2, srcbuf_stride,
629  width, 1, in_scale);
630 
631  for (int y = 0; y < slice_end - slice_start; y += 2) {
632  if (s->pscrn > 0)
633  s->prescreen[s->pscrn > 1](ctx, srcbuf + (y / 2) * srcbuf_stride + 32,
634  srcbuf_stride, prescreen_buf, width,
635  &s->prescreener[s->pscrn - 1]);
636 
637  predictor(ctx,
638  srcbuf + (y / 2) * srcbuf_stride + 32,
639  srcbuf_stride,
640  dstbuf + (y / 2) * dstbuf_stride,
641  prescreen_buf, width,
642  &s->coeffs[s->etype][s->nnsparam][s->nsize], s->qual == 2);
643 
644  if (s->pscrn > 0)
645  interpolation(srcbuf + (y / 2) * srcbuf_stride + 32,
646  srcbuf_stride,
647  dstbuf + (y / 2) * dstbuf_stride,
648  prescreen_buf, width);
649  }
650 
651  s->write(dstbuf, out_line, dstbuf_stride, dst_linesize * 2,
652  width, slice_height, depth, out_scale);
653  }
654 
655  return 0;
656 }
657 
658 static int get_frame(AVFilterContext *ctx, int is_second)
659 {
660  NNEDIContext *s = ctx->priv;
661  AVFilterLink *outlink = ctx->outputs[0];
662  AVFrame *dst;
663 
664  dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
665  if (!dst)
666  return AVERROR(ENOMEM);
667  av_frame_copy_props(dst, s->prev);
668 #if FF_API_INTERLACED_FRAME
670  dst->interlaced_frame = 0;
672 #endif
674  dst->pts = s->pts;
675 
677  FFMIN(s->planeheight[1] / 2, s->nb_threads));
678 
679  if (s->field == -2 || s->field > 1)
680  s->field_n = !s->field_n;
681 
682  return ff_filter_frame(outlink, dst);
683 }
684 
686 {
687  AVFilterContext *ctx = inlink->dst;
688  NNEDIContext *s = ctx->priv;
689  int ret;
690 
691  if (!s->prev) {
692  s->prev = in;
693  return 0;
694  }
695 
696  if ((s->deint && !(s->prev->flags & AV_FRAME_FLAG_INTERLACED)) || ctx->is_disabled) {
697  s->prev->pts *= 2;
698  ret = ff_filter_frame(ctx->outputs[0], s->prev);
699  s->prev = in;
700  return ret;
701  }
702 
703  s->pts = s->prev->pts * 2;
704  ret = get_frame(ctx, 0);
705  if (ret < 0 || (s->field > -2 && s->field < 2)) {
706  av_frame_free(&s->prev);
707  s->prev = in;
708  return ret;
709  }
710 
711  s->pts = s->prev->pts + in->pts;
712  ret = get_frame(ctx, 1);
713  av_frame_free(&s->prev);
714  s->prev = in;
715  return ret;
716 }
717 
719 {
720  AVFilterContext *ctx = link->src;
721  NNEDIContext *s = ctx->priv;
722  int ret;
723 
724  if (s->eof)
725  return AVERROR_EOF;
726 
727  ret = ff_request_frame(ctx->inputs[0]);
728 
729  if (ret == AVERROR_EOF && s->prev) {
730  AVFrame *next = av_frame_clone(s->prev);
731 
732  if (!next)
733  return AVERROR(ENOMEM);
734 
735  next->pts = s->prev->pts + av_rescale_q(1, av_inv_q(ctx->outputs[0]->frame_rate),
736  ctx->outputs[0]->time_base);
737  s->eof = 1;
738 
739  ret = filter_frame(ctx->inputs[0], next);
740  } else if (ret < 0) {
741  return ret;
742  }
743 
744  return ret;
745 }
746 
747 static void copy_weights(float *dst, int n, const float **data)
748 {
749  memcpy(dst, *data, n * sizeof(float));
750  *data += n;
751 }
752 
753 static float *allocate(float **ptr, int size)
754 {
755  float *ret = *ptr;
756 
757  *ptr += size;
758 
759  return ret;
760 }
761 
762 static int allocate_model(PredictorCoefficients *coeffs, int xdim, int ydim, int nns)
763 {
764  int filter_size = nns * xdim * ydim;
765  int bias_size = nns;
766  float *data;
767 
768  data = av_calloc(filter_size + bias_size, 4 * sizeof(float));
769  if (!data)
770  return AVERROR(ENOMEM);
771 
772  coeffs->data = data;
773  coeffs->xdim = xdim;
774  coeffs->ydim = ydim;
775  coeffs->nsize = xdim * ydim;
776  coeffs->nns = nns;
777 
778  coeffs->softmax_q1 = allocate(&data, filter_size);
779  coeffs->elliott_q1 = allocate(&data, filter_size);
780  coeffs->softmax_bias_q1 = allocate(&data, bias_size);
781  coeffs->elliott_bias_q1 = allocate(&data, bias_size);
782 
783  coeffs->softmax_q2 = allocate(&data, filter_size);
784  coeffs->elliott_q2 = allocate(&data, filter_size);
785  coeffs->softmax_bias_q2 = allocate(&data, bias_size);
786  coeffs->elliott_bias_q2 = allocate(&data, bias_size);
787 
788  return 0;
789 }
790 
791 static int read_weights(AVFilterContext *ctx, const float *bdata)
792 {
793  NNEDIContext *s = ctx->priv;
794  int ret;
795 
796  copy_weights(&s->prescreener[0].kernel_l0[0][0], 4 * 48, &bdata);
797  copy_weights(s->prescreener[0].bias_l0, 4, &bdata);
798 
799  copy_weights(&s->prescreener[0].kernel_l1[0][0], 4 * 4, &bdata);
800  copy_weights(s->prescreener[0].bias_l1, 4, &bdata);
801 
802  copy_weights(&s->prescreener[0].kernel_l2[0][0], 4 * 8, &bdata);
803  copy_weights(s->prescreener[0].bias_l2, 4, &bdata);
804 
805  for (int i = 0; i < 3; i++) {
806  PrescreenerCoefficients *data = &s->prescreener[i + 1];
807  float kernel_l0_shuffled[4 * 64];
808  float kernel_l1_shuffled[4 * 4];
809 
810  copy_weights(kernel_l0_shuffled, 4 * 64, &bdata);
811  copy_weights(data->bias_l0, 4, &bdata);
812 
813  copy_weights(kernel_l1_shuffled, 4 * 4, &bdata);
814  copy_weights(data->bias_l1, 4, &bdata);
815 
816  for (int n = 0; n < 4; n++) {
817  for (int k = 0; k < 64; k++)
818  data->kernel_l0[n][k] = kernel_l0_shuffled[(k / 8) * 32 + n * 8 + k % 8];
819  for (int k = 0; k < 4; k++)
820  data->kernel_l1[n][k] = kernel_l1_shuffled[k * 4 + n];
821  }
822  }
823 
824  for (int m = 0; m < 2; m++) {
825  // Grouping by neuron count.
826  for (int i = 0; i < 5; i++) {
827  const int nns = NNEDI_NNS[i];
828 
829  // Grouping by window size.
830  for (int j = 0; j < 7; j++) {
831  PredictorCoefficients *model = &s->coeffs[m][i][j];
832  const int xdim = NNEDI_XDIM[j];
833  const int ydim = NNEDI_YDIM[j];
834  const int filter_size = xdim * ydim;
835 
836  ret = allocate_model(model, xdim, ydim, nns);
837  if (ret < 0)
838  return ret;
839 
840  // Quality 1 model. NNS[i] * (XDIM[j] * YDIM[j]) * 2 coefficients.
841  copy_weights(model->softmax_q1, nns * filter_size, &bdata);
842  copy_weights(model->elliott_q1, nns * filter_size, &bdata);
843 
844  // Quality 1 model bias. NNS[i] * 2 coefficients.
845  copy_weights(model->softmax_bias_q1, nns, &bdata);
846  copy_weights(model->elliott_bias_q1, nns, &bdata);
847 
848  // Quality 2 model. NNS[i] * (XDIM[j] * YDIM[j]) * 2 coefficients.
849  copy_weights(model->softmax_q2, nns * filter_size, &bdata);
850  copy_weights(model->elliott_q2, nns * filter_size, &bdata);
851 
852  // Quality 2 model bias. NNS[i] * 2 coefficients.
853  copy_weights(model->softmax_bias_q2, nns, &bdata);
854  copy_weights(model->elliott_bias_q2, nns, &bdata);
855  }
856  }
857  }
858 
859  return 0;
860 }
861 
862 static float mean(const float *input, int size)
863 {
864  float sum = 0.f;
865 
866  for (int i = 0; i < size; i++)
867  sum += input[i];
868 
869  return sum / size;
870 }
871 
872 static void transform(float *input, int size, float mean, float half)
873 {
874  for (int i = 0; i < size; i++)
875  input[i] = (input[i] - mean) / half;
876 }
877 
878 static void subtract_mean_old(PrescreenerCoefficients *coeffs, float half)
879 {
880  for (int n = 0; n < 4; n++) {
881  float m = mean(coeffs->kernel_l0[n], 48);
882 
883  transform(coeffs->kernel_l0[n], 48, m, half);
884  }
885 }
886 
887 static void subtract_mean_new(PrescreenerCoefficients *coeffs, float half)
888 {
889  for (int n = 0; n < 4; n++) {
890  float m = mean(coeffs->kernel_l0[n], 64);
891 
892  transform(coeffs->kernel_l0[n], 64, m, half);
893  }
894 }
895 
897 {
898  const int filter_size = model->nsize;
899  const int nns = model->nns;
900  const float scale = 1.f / nns;
901 
902  double softmax_means[256]; // Average of individual softmax filters.
903  double elliott_means[256]; // Average of individual elliott filters.
904  double mean_filter[48 * 6] = { 0 }; // Pointwise average of all softmax filters.
905  double mean_bias;
906 
907  // Quality 1.
908  for (int nn = 0; nn < nns; nn++) {
909  softmax_means[nn] = mean(model->softmax_q1 + nn * filter_size, filter_size);
910  elliott_means[nn] = mean(model->elliott_q1 + nn * filter_size, filter_size);
911 
912  for (int k = 0; k < filter_size; k++)
913  mean_filter[k] += model->softmax_q1[nn * filter_size + k] - softmax_means[nn];
914  }
915 
916  for (int k = 0; k < filter_size; k++)
917  mean_filter[k] *= scale;
918 
919  mean_bias = mean(model->softmax_bias_q1, nns);
920 
921  for (int nn = 0; nn < nns; nn++) {
922  for (int k = 0; k < filter_size; k++) {
923  model->softmax_q1[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
924  model->elliott_q1[nn * filter_size + k] -= elliott_means[nn];
925  }
926  model->softmax_bias_q1[nn] -= mean_bias;
927  }
928 
929  // Quality 2.
930  memset(mean_filter, 0, sizeof(mean_filter));
931 
932  for (int nn = 0; nn < nns; nn++) {
933  softmax_means[nn] = mean(model->softmax_q2 + nn * filter_size, filter_size);
934  elliott_means[nn] = mean(model->elliott_q2 + nn * filter_size, filter_size);
935 
936  for (int k = 0; k < filter_size; k++) {
937  mean_filter[k] += model->softmax_q2[nn * filter_size + k] - softmax_means[nn];
938  }
939  }
940 
941  for (int k = 0; k < filter_size; k++)
942  mean_filter[k] *= scale;
943 
944  mean_bias = mean(model->softmax_bias_q2, nns);
945 
946  for (int nn = 0; nn < nns; nn++) {
947  for (int k = 0; k < filter_size; k++) {
948  model->softmax_q2[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
949  model->elliott_q2[nn * filter_size + k] -= elliott_means[nn];
950  }
951 
952  model->softmax_bias_q2[nn] -= mean_bias;
953  }
954 }
955 
957 {
958  NNEDIContext *s = ctx->priv;
959  FILE *weights_file = NULL;
960  int64_t weights_size;
961  float *bdata;
962  size_t bytes_read;
963  int ret = 0;
964 
965  weights_file = avpriv_fopen_utf8(s->weights_file, "rb");
966  if (!weights_file) {
967  av_log(ctx, AV_LOG_ERROR, "No weights file provided, aborting!\n");
968  return AVERROR(EINVAL);
969  }
970 
971  if (fseek(weights_file, 0, SEEK_END)) {
972  av_log(ctx, AV_LOG_ERROR, "Couldn't seek to the end of weights file.\n");
973  fclose(weights_file);
974  return AVERROR(EINVAL);
975  }
976 
977  weights_size = ftell(weights_file);
978 
979  if (weights_size == -1) {
980  fclose(weights_file);
981  av_log(ctx, AV_LOG_ERROR, "Couldn't get size of weights file.\n");
982  return AVERROR(EINVAL);
983  } else if (weights_size != NNEDI_WEIGHTS_SIZE) {
984  fclose(weights_file);
985  av_log(ctx, AV_LOG_ERROR, "Unexpected weights file size.\n");
986  return AVERROR(EINVAL);
987  }
988 
989  if (fseek(weights_file, 0, SEEK_SET)) {
990  fclose(weights_file);
991  av_log(ctx, AV_LOG_ERROR, "Couldn't seek to the start of weights file.\n");
992  return AVERROR(EINVAL);
993  }
994 
995  bdata = av_malloc(NNEDI_WEIGHTS_SIZE);
996  if (!bdata) {
997  fclose(weights_file);
998  return AVERROR(ENOMEM);
999  }
1000 
1001  bytes_read = fread(bdata, 1, NNEDI_WEIGHTS_SIZE, weights_file);
1002  if (bytes_read != NNEDI_WEIGHTS_SIZE) {
1003  fclose(weights_file);
1005  av_log(ctx, AV_LOG_ERROR, "Couldn't read weights file.\n");
1006  goto fail;
1007  }
1008 
1009  fclose(weights_file);
1010 
1011  s->fdsp = avpriv_float_dsp_alloc(0);
1012  if (!s->fdsp) {
1013  ret = AVERROR(ENOMEM);
1014  goto fail;
1015  }
1016 
1017  ret = read_weights(ctx, bdata);
1018  if (ret < 0)
1019  goto fail;
1020 
1021 fail:
1022  av_free(bdata);
1023  return ret;
1024 }
1025 
1027 {
1028  AVFilterContext *ctx = inlink->dst;
1029  NNEDIContext *s = ctx->priv;
1031  int ret;
1032 
1033  s->depth = desc->comp[0].depth;
1034  s->nb_threads = ff_filter_get_nb_threads(ctx);
1035  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
1036  if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
1037  return ret;
1038 
1039  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
1040  s->planewidth[0] = s->planewidth[3] = inlink->w;
1041  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
1042  s->planeheight[0] = s->planeheight[3] = inlink->h;
1043 
1044  s->half = ((1 << 8) - 1) / 2.f;
1045  s->out_scale = 1 << (s->depth - 8);
1046  s->in_scale = 1.f / s->out_scale;
1047 
1048  switch (s->depth) {
1049  case 8:
1050  s->read = read_bytes;
1051  s->write = write_bytes;
1052  break;
1053  default:
1054  s->read = read_words;
1055  s->write = write_words;
1056  break;
1057  }
1058 
1059  subtract_mean_old(&s->prescreener[0], s->half);
1060  subtract_mean_new(&s->prescreener[1], s->half);
1061  subtract_mean_new(&s->prescreener[2], s->half);
1062  subtract_mean_new(&s->prescreener[3], s->half);
1063 
1064  s->prescreen[0] = process_old;
1065  s->prescreen[1] = process_new;
1066 
1067  for (int i = 0; i < 2; i++) {
1068  for (int j = 0; j < 5; j++) {
1069  for (int k = 0; k < 7; k++)
1070  subtract_mean_predictor(&s->coeffs[i][j][k]);
1071  }
1072  }
1073 
1074  s->input_size = (s->planewidth[0] + 64) * (s->planeheight[0] + 6);
1075  s->input_buf = av_calloc(s->nb_threads, sizeof(*s->input_buf));
1076  if (!s->input_buf)
1077  return AVERROR(ENOMEM);
1078 
1079  for (int i = 0; i < s->nb_threads; i++) {
1080  s->input_buf[i] = av_calloc(s->input_size, sizeof(**s->input_buf));
1081  if (!s->input_buf[i])
1082  return AVERROR(ENOMEM);
1083  }
1084 
1085  s->output_buf = av_calloc(s->nb_threads, sizeof(*s->output_buf));
1086  if (!s->output_buf)
1087  return AVERROR(ENOMEM);
1088 
1089  for (int i = 0; i < s->nb_threads; i++) {
1090  s->output_buf[i] = av_calloc(s->input_size, sizeof(**s->output_buf));
1091  if (!s->output_buf[i])
1092  return AVERROR(ENOMEM);
1093  }
1094 
1095  s->prescreen_buf = av_calloc(s->nb_threads, sizeof(*s->prescreen_buf));
1096  if (!s->prescreen_buf)
1097  return AVERROR(ENOMEM);
1098 
1099  for (int i = 0; i < s->nb_threads; i++) {
1100  s->prescreen_buf[i] = av_calloc(s->planewidth[0], sizeof(**s->prescreen_buf));
1101  if (!s->prescreen_buf[i])
1102  return AVERROR(ENOMEM);
1103  }
1104 
1105  return 0;
1106 }
1107 
1109 {
1110  NNEDIContext *s = ctx->priv;
1111 
1112  for (int i = 0; i < s->nb_threads && s->prescreen_buf; i++)
1113  av_freep(&s->prescreen_buf[i]);
1114 
1115  av_freep(&s->prescreen_buf);
1116 
1117  for (int i = 0; i < s->nb_threads && s->input_buf; i++)
1118  av_freep(&s->input_buf[i]);
1119 
1120  av_freep(&s->input_buf);
1121 
1122  for (int i = 0; i < s->nb_threads && s->output_buf; i++)
1123  av_freep(&s->output_buf[i]);
1124 
1125  av_freep(&s->output_buf);
1126  av_freep(&s->fdsp);
1127 
1128  for (int i = 0; i < 2; i++) {
1129  for (int j = 0; j < 5; j++) {
1130  for (int k = 0; k < 7; k++) {
1131  av_freep(&s->coeffs[i][j][k].data);
1132  }
1133  }
1134  }
1135 
1136  av_frame_free(&s->prev);
1137 }
1138 
1139 static const AVFilterPad inputs[] = {
1140  {
1141  .name = "default",
1142  .type = AVMEDIA_TYPE_VIDEO,
1143  .filter_frame = filter_frame,
1144  .config_props = config_input,
1145  },
1146 };
1147 
1148 static const AVFilterPad outputs[] = {
1149  {
1150  .name = "default",
1151  .type = AVMEDIA_TYPE_VIDEO,
1152  .config_props = config_output,
1153  .request_frame = request_frame,
1154  },
1155 };
1156 
1158  .name = "nnedi",
1159  .description = NULL_IF_CONFIG_SMALL("Apply neural network edge directed interpolation intra-only deinterlacer."),
1160  .priv_size = sizeof(NNEDIContext),
1161  .priv_class = &nnedi_class,
1162  .init = init,
1163  .uninit = uninit,
1168  .process_command = ff_filter_process_command,
1169 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:522
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:501
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
dot_dsp
static float dot_dsp(const NNEDIContext *const s, const float *kernel, const float *input, int n, float scale, float bias)
Definition: vf_nnedi.c:208
NNEDIContext::prescreen_buf
uint8_t ** prescreen_buf
Definition: vf_nnedi.c:100
NNEDIContext::fdsp
AVFloatDSPContext * fdsp
Definition: vf_nnedi.c:73
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_frame
static int get_frame(AVFilterContext *ctx, int is_second)
Definition: vf_nnedi.c:658
mem_internal.h
NNEDIContext::pscrn
int pscrn
Definition: vf_nnedi.c:97
subtract_mean_predictor
static void subtract_mean_predictor(PredictorCoefficients *model)
Definition: vf_nnedi.c:896
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
PredictorCoefficients::softmax_bias_q2
float * softmax_bias_q2
Definition: vf_nnedi.c:60
NNEDIContext::field
int field
Definition: vf_nnedi.c:91
NNEDIContext::weights_file
char * weights_file
Definition: vf_nnedi.c:67
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:162
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
softmax_q2_filter
static const float * softmax_q2_filter(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:314
PrescreenerCoefficients::kernel_l2
float kernel_l2[4][8]
Definition: vf_nnedi.c:47
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
NNEDIContext::qual
int qual
Definition: vf_nnedi.c:95
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:514
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:278
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:456
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:521
NNEDIContext
Definition: vf_nnedi.c:64
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:516
read_bytes
static void read_bytes(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
Definition: vf_nnedi.c:439
AVOption
AVOption.
Definition: opt.h:346
data
const char data[16]
Definition: mxf.c:148
expf
#define expf(x)
Definition: libm.h:283
half
static uint8_t half(int a, int b)
Definition: mobiclip.c:538
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:462
process_old
static void process_old(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
Definition: vf_nnedi.c:231
NNEDIContext::prescreen
void(* prescreen[2])(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const coeffs)
Definition: vf_nnedi.c:110
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(nnedi)
OFFSET
#define OFFSET(x)
Definition: vf_nnedi.c:116
float.h
NNEDIContext::nsize
int nsize
Definition: vf_nnedi.c:93
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_nnedi.c:1108
NNEDIContext::depth
int depth
Definition: vf_nnedi.c:74
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:616
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:517
PrescreenerCoefficients::bias_l2
float bias_l2[4]
Definition: vf_nnedi.c:48
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:458
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:608
elliott
static float elliott(float x)
Definition: vf_nnedi.c:220
NNEDIContext::pts
int64_t pts
Definition: vf_nnedi.c:71
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3002
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:513
window
static SDL_Window * window
Definition: ffplay.c:364
elliott_q2_filter
static const float * elliott_q2_filter(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:320
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
fail
#define fail()
Definition: checkasm.h:179
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:523
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:476
PredictorCoefficients::softmax_q1
float * softmax_q1
Definition: vf_nnedi.c:54
ff_vf_nnedi
const AVFilter ff_vf_nnedi
Definition: vf_nnedi.c:1157
NNEDIContext::nnsparam
int nnsparam
Definition: vf_nnedi.c:94
val
static double val(void *priv, double ch)
Definition: aeval.c:78
PredictorCoefficients::data
float * data
Definition: vf_nnedi.c:53
NNEDI_XDIM
static const uint8_t NNEDI_XDIM[]
Definition: vf_nnedi.c:36
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:462
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
write_words
static void write_words(const float *src, uint8_t *dstp, int src_stride, int dst_stride, int width, int height, int depth, float scale)
Definition: vf_nnedi.c:495
softmax_exp
static float softmax_exp(float x)
Definition: vf_nnedi.c:362
NNEDIContext::eof
int eof
Definition: vf_nnedi.c:70
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: vvcdec.c:694
AVFrame::interlaced_frame
attribute_deprecated int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:521
filter_slice
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_nnedi.c:535
inputs
static const AVFilterPad inputs[]
Definition: vf_nnedi.c:1139
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:490
RFLAGS
#define RFLAGS
Definition: vf_nnedi.c:117
allocate
static float * allocate(float **ptr, int size)
Definition: vf_nnedi.c:753
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
NNEDI_NNS
static const uint16_t NNEDI_NNS[]
Definition: vf_nnedi.c:38
width
#define width
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:499
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:491
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
NNEDIContext::planewidth
int planewidth[4]
Definition: vf_nnedi.c:78
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1725
PredictorCoefficients::softmax_bias_q1
float * softmax_bias_q1
Definition: vf_nnedi.c:56
PrescreenerCoefficients::kernel_l0
float kernel_l0[4][16 *4]
Definition: vf_nnedi.c:41
request_frame
static int request_frame(AVFilterLink *link)
Definition: vf_nnedi.c:718
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:520
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:475
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:489
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:461
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:563
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FLAGS
#define FLAGS
Definition: vf_nnedi.c:118
PredictorCoefficients::nns
int nns
Definition: vf_nnedi.c:52
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
PredictorCoefficients::ydim
int ydim
Definition: vf_nnedi.c:52
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
file_open.h
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
arg
const char * arg
Definition: jacosubdec.c:67
NNEDIContext::planeheight
int planeheight[4]
Definition: vf_nnedi.c:79
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:459
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
softmax_q1_filter
static const float * softmax_q1_filter(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:302
interpolation
static void interpolation(const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int n)
Definition: vf_nnedi.c:513
NULL
#define NULL
Definition: coverity.c:32
PrescreenerCoefficients::bias_l0
float bias_l0[4]
Definition: vf_nnedi.c:42
LOCAL_ALIGNED_32
#define LOCAL_ALIGNED_32(t, v,...)
Definition: mem_internal.h:156
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:679
NNEDIContext::coeffs
PredictorCoefficients coeffs[2][5][7]
Definition: vf_nnedi.c:83
PrescreenerCoefficients::bias_l1
float bias_l1[4]
Definition: vf_nnedi.c:45
bias
static int bias(int x, int c)
Definition: vqcdec.c:114
subtract_mean_old
static void subtract_mean_old(PrescreenerCoefficients *coeffs, float half)
Definition: vf_nnedi.c:878
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
transform_elliott
static void transform_elliott(float *input, int size)
Definition: vf_nnedi.c:225
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:480
NNEDIContext::write
void(* write)(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
Definition: vf_nnedi.c:107
NNEDI_YDIM
static const uint8_t NNEDI_YDIM[]
Definition: vf_nnedi.c:37
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
state
static struct @385 state
nnedi_options
static const AVOption nnedi_options[]
Definition: vf_nnedi.c:120
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
PrescreenerCoefficients::kernel_l1
float kernel_l1[4][4]
Definition: vf_nnedi.c:44
av_clipf
av_clipf
Definition: af_crystalizer.c:121
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
NNEDIContext::etype
int etype
Definition: vf_nnedi.c:96
float_dsp.h
NNEDIContext::half
float half
Definition: vf_nnedi.c:85
NNEDIContext::nb_threads
int nb_threads
Definition: vf_nnedi.c:76
f
f
Definition: af_crystalizer.c:121
NNEDIContext::process_plane
int process_plane
Definition: vf_nnedi.c:92
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: vvc_intra.c:291
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
wae5
static void wae5(const float *softmax, const float *el, int n, float mstd[4])
Definition: vf_nnedi.c:373
size
int size
Definition: twinvq_data.h:10344
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
NNEDIContext::nb_planes
int nb_planes
Definition: vf_nnedi.c:75
elliott_q1_filter
static const float * elliott_q1_filter(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:308
AVFloatDSPContext
Definition: float_dsp.h:22
PrescreenerCoefficients
Definition: vf_nnedi.c:40
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:890
PredictorCoefficients
Definition: vf_nnedi.c:51
predictor
static void predictor(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int N, const PredictorCoefficients *const model, int use_q2)
Definition: vf_nnedi.c:389
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:518
NNEDIContext::linesize
int linesize[4]
Definition: vf_nnedi.c:77
transform_softmax_exp
static void transform_softmax_exp(float *input, int size)
Definition: vf_nnedi.c:367
PredictorCoefficients::xdim
int xdim
Definition: vf_nnedi.c:52
N
#define N
Definition: af_mcompand.c:53
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
internal.h
outputs
static const AVFilterPad outputs[]
Definition: vf_nnedi.c:1148
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_nnedi.c:956
interlaced
uint8_t interlaced
Definition: mxfenc.c:2263
filter_offset
static int filter_offset(int nn, const PredictorCoefficients *const model)
Definition: vf_nnedi.c:297
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
common.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:825
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_nnedi.c:182
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
PredictorCoefficients::softmax_q2
float * softmax_q2
Definition: vf_nnedi.c:58
avpriv_fopen_utf8
FILE * avpriv_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
Definition: file_open.c:159
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:603
write_bytes
static void write_bytes(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
Definition: vf_nnedi.c:481
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
NNEDIContext::read
void(* read)(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
Definition: vf_nnedi.c:104
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:477
NNEDIContext::input_size
int input_size
Definition: vf_nnedi.c:99
AVFilter
Filter definition.
Definition: avfilter.h:166
PredictorCoefficients::nsize
int nsize
Definition: vf_nnedi.c:52
ret
ret
Definition: filter_design.txt:187
PredictorCoefficients::elliott_bias_q2
float * elliott_bias_q2
Definition: vf_nnedi.c:61
subtract_mean_new
static void subtract_mean_new(PrescreenerCoefficients *coeffs, float half)
Definition: vf_nnedi.c:887
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:515
gather_input
static void gather_input(const float *src, ptrdiff_t src_stride, float *buf, float mstd[4], const PredictorCoefficients *const model)
Definition: vf_nnedi.c:326
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:487
PredictorCoefficients::elliott_q2
float * elliott_q2
Definition: vf_nnedi.c:59
allocate_model
static int allocate_model(PredictorCoefficients *coeffs, int xdim, int ydim, int nns)
Definition: vf_nnedi.c:762
NNEDIContext::deint
int deint
Definition: vf_nnedi.c:90
NNEDIContext::in_scale
float in_scale
Definition: vf_nnedi.c:86
NNEDI_WEIGHTS_SIZE
static const size_t NNEDI_WEIGHTS_SIZE
Definition: vf_nnedi.c:35
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:519
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
PredictorCoefficients::elliott_bias_q1
float * elliott_bias_q1
Definition: vf_nnedi.c:57
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_nnedi.c:685
process_new
static void process_new(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
Definition: vf_nnedi.c:267
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:862
NNEDIContext::input_buf
float ** input_buf
Definition: vf_nnedi.c:101
read_words
static void read_words(const uint8_t *srcp, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
Definition: vf_nnedi.c:458
transform
static void transform(float *input, int size, float mean, float half)
Definition: vf_nnedi.c:872
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
av_clip_uint8
#define av_clip_uint8
Definition: common.h:104
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
PredictorCoefficients::elliott_q1
float * elliott_q1
Definition: vf_nnedi.c:55
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
NNEDIContext::out_scale
float out_scale
Definition: vf_nnedi.c:87
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
read_weights
static int read_weights(AVFilterContext *ctx, const float *bdata)
Definition: vf_nnedi.c:791
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_nnedi.c:1026
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
copy_weights
static void copy_weights(float *dst, int n, const float **data)
Definition: vf_nnedi.c:747
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:484
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:488
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:460
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
NNEDIContext::field_n
int field_n
Definition: vf_nnedi.c:80
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
NNEDIContext::prev
AVFrame * prev
Definition: vf_nnedi.c:69
NNEDIContext::output_buf
float ** output_buf
Definition: vf_nnedi.c:102
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:486
NNEDIContext::prescreener
PrescreenerCoefficients prescreener[4]
Definition: vf_nnedi.c:82
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_nnedi.c:166