FFmpeg
vf_convolve.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "config_components.h"
22 
23 #include <float.h>
24 
25 #include "libavutil/opt.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/tx.h"
28 
29 #include "avfilter.h"
30 #include "framesync.h"
31 #include "internal.h"
32 
33 #define MAX_THREADS 16
34 
35 typedef struct ConvolveContext {
36  const AVClass *class;
38 
41 
44 
45  int fft_len[4];
46  int planewidth[4];
47  int planeheight[4];
48 
49  int primarywidth[4];
50  int primaryheight[4];
51 
54 
63 
64  int depth;
65  int planes;
66  int impulse;
67  float noise;
68  int nb_planes;
69  int got_impulse[4];
70 
71  void (*get_input)(struct ConvolveContext *s, AVComplexFloat *fft_hdata,
72  AVFrame *in, int w, int h, int n, int plane, float scale);
73 
75  int w, int h, int n, int plane, float scale);
76  void (*prepare_impulse)(AVFilterContext *ctx, AVFrame *impulsepic, int plane);
77 
78  int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
80 
81 #define OFFSET(x) offsetof(ConvolveContext, x)
82 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
83 
84 static const AVOption convolve_options[] = {
85  { "planes", "set planes to convolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
86  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
87  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
88  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
89  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
90  { NULL },
91 };
92 
93 static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
112 };
113 
115 {
116  ConvolveContext *s = inlink->dst->priv;
118  const int w = inlink->w;
119  const int h = inlink->h;
120 
121  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(w, desc->log2_chroma_w);
122  s->planewidth[0] = s->planewidth[3] = w;
123  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
124  s->planeheight[0] = s->planeheight[3] = h;
125 
126  s->nb_planes = desc->nb_components;
127  s->depth = desc->comp[0].depth;
128 
129  for (int i = 0; i < s->nb_planes; i++) {
130  int w = s->planewidth[i];
131  int h = s->planeheight[i];
132  int n = FFMAX(w, h);
133 
134  s->fft_len[i] = 1 << (av_log2(2 * n - 1));
135 
136  if (!(s->fft_hdata_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
137  return AVERROR(ENOMEM);
138 
139  if (!(s->fft_hdata_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
140  return AVERROR(ENOMEM);
141 
142  if (!(s->fft_vdata_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
143  return AVERROR(ENOMEM);
144 
145  if (!(s->fft_vdata_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
146  return AVERROR(ENOMEM);
147 
148  if (!(s->fft_hdata_impulse_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
149  return AVERROR(ENOMEM);
150 
151  if (!(s->fft_vdata_impulse_in[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
152  return AVERROR(ENOMEM);
153 
154  if (!(s->fft_hdata_impulse_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
155  return AVERROR(ENOMEM);
156 
157  if (!(s->fft_vdata_impulse_out[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(AVComplexFloat))))
158  return AVERROR(ENOMEM);
159  }
160 
161  return 0;
162 }
163 
165 {
166  AVFilterContext *ctx = inlink->dst;
167 
168  if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
169  ctx->inputs[0]->h != ctx->inputs[1]->h) {
170  av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
171  return AVERROR(EINVAL);
172  }
173 
174  return 0;
175 }
176 
177 typedef struct ThreadData {
180  int plane, n;
181 } ThreadData;
182 
183 static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
184 {
185  ConvolveContext *s = ctx->priv;
186  ThreadData *td = arg;
187  AVComplexFloat *hdata_in = td->hdata_in;
188  AVComplexFloat *hdata_out = td->hdata_out;
189  const int plane = td->plane;
190  const int n = td->n;
191  int start = (n * jobnr) / nb_jobs;
192  int end = (n * (jobnr+1)) / nb_jobs;
193  int y;
194 
195  for (y = start; y < end; y++) {
196  s->tx_fn[plane](s->fft[plane][jobnr], hdata_out + y * n, hdata_in + y * n, sizeof(AVComplexFloat));
197  }
198 
199  return 0;
200 }
201 
202 #define SQR(x) ((x) * (x))
203 
205  AVComplexFloat *fft_hdata,
206  AVFrame *in, int w, int h,
207  int n, int plane, float scale)
208 {
209  float sum = 0.f;
210  float mean, dev;
211  int y, x;
212 
213  if (s->depth == 8) {
214  for (y = 0; y < h; y++) {
215  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
216 
217  for (x = 0; x < w; x++)
218  sum += src[x];
219  }
220 
221  mean = sum / (w * h);
222  sum = 0.f;
223  for (y = 0; y < h; y++) {
224  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
225 
226  for (x = 0; x < w; x++)
227  sum += SQR(src[x] - mean);
228  }
229 
230  dev = sqrtf(sum / (w * h));
231  scale /= dev;
232  for (y = 0; y < h; y++) {
233  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
234 
235  for (x = 0; x < w; x++) {
236  fft_hdata[y * n + x].re = (src[x] - mean) * scale;
237  fft_hdata[y * n + x].im = 0;
238  }
239 
240  for (x = w; x < n; x++) {
241  fft_hdata[y * n + x].re = 0;
242  fft_hdata[y * n + x].im = 0;
243  }
244  }
245 
246  for (y = h; y < n; y++) {
247  for (x = 0; x < n; x++) {
248  fft_hdata[y * n + x].re = 0;
249  fft_hdata[y * n + x].im = 0;
250  }
251  }
252  } else {
253  for (y = 0; y < h; y++) {
254  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
255 
256  for (x = 0; x < w; x++)
257  sum += src[x];
258  }
259 
260  mean = sum / (w * h);
261  sum = 0.f;
262  for (y = 0; y < h; y++) {
263  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
264 
265  for (x = 0; x < w; x++)
266  sum += SQR(src[x] - mean);
267  }
268 
269  dev = sqrtf(sum / (w * h));
270  scale /= dev;
271  for (y = 0; y < h; y++) {
272  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
273 
274  for (x = 0; x < w; x++) {
275  fft_hdata[y * n + x].re = (src[x] - mean) * scale;
276  fft_hdata[y * n + x].im = 0;
277  }
278 
279  for (x = w; x < n; x++) {
280  fft_hdata[y * n + x].re = 0;
281  fft_hdata[y * n + x].im = 0;
282  }
283  }
284 
285  for (y = h; y < n; y++) {
286  for (x = 0; x < n; x++) {
287  fft_hdata[y * n + x].re = 0;
288  fft_hdata[y * n + x].im = 0;
289  }
290  }
291  }
292 }
293 
294 static void get_input(ConvolveContext *s, AVComplexFloat *fft_hdata,
295  AVFrame *in, int w, int h, int n, int plane, float scale)
296 {
297  const int iw = (n - w) / 2, ih = (n - h) / 2;
298  int y, x;
299 
300  if (s->depth == 8) {
301  for (y = 0; y < h; y++) {
302  const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
303 
304  for (x = 0; x < w; x++) {
305  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
306  fft_hdata[(y + ih) * n + iw + x].im = 0;
307  }
308 
309  for (x = 0; x < iw; x++) {
310  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
311  fft_hdata[(y + ih) * n + x].im = 0;
312  }
313 
314  for (x = n - iw; x < n; x++) {
315  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
316  fft_hdata[(y + ih) * n + x].im = 0;
317  }
318  }
319 
320  for (y = 0; y < ih; y++) {
321  for (x = 0; x < n; x++) {
322  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
323  fft_hdata[y * n + x].im = 0;
324  }
325  }
326 
327  for (y = n - ih; y < n; y++) {
328  for (x = 0; x < n; x++) {
329  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
330  fft_hdata[y * n + x].im = 0;
331  }
332  }
333  } else {
334  for (y = 0; y < h; y++) {
335  const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
336 
337  for (x = 0; x < w; x++) {
338  fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
339  fft_hdata[(y + ih) * n + iw + x].im = 0;
340  }
341 
342  for (x = 0; x < iw; x++) {
343  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
344  fft_hdata[(y + ih) * n + x].im = 0;
345  }
346 
347  for (x = n - iw; x < n; x++) {
348  fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
349  fft_hdata[(y + ih) * n + x].im = 0;
350  }
351  }
352 
353  for (y = 0; y < ih; y++) {
354  for (x = 0; x < n; x++) {
355  fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
356  fft_hdata[y * n + x].im = 0;
357  }
358  }
359 
360  for (y = n - ih; y < n; y++) {
361  for (x = 0; x < n; x++) {
362  fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
363  fft_hdata[y * n + x].im = 0;
364  }
365  }
366  }
367 }
368 
369 static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
370 {
371  ConvolveContext *s = ctx->priv;
372  ThreadData *td = arg;
373  AVComplexFloat *hdata = td->hdata_out;
374  AVComplexFloat *vdata_in = td->vdata_in;
375  AVComplexFloat *vdata_out = td->vdata_out;
376  const int plane = td->plane;
377  const int n = td->n;
378  int start = (n * jobnr) / nb_jobs;
379  int end = (n * (jobnr+1)) / nb_jobs;
380  int y, x;
381 
382  for (y = start; y < end; y++) {
383  for (x = 0; x < n; x++) {
384  vdata_in[y * n + x].re = hdata[x * n + y].re;
385  vdata_in[y * n + x].im = hdata[x * n + y].im;
386  }
387 
388  s->tx_fn[plane](s->fft[plane][jobnr], vdata_out + y * n, vdata_in + y * n, sizeof(AVComplexFloat));
389  }
390 
391  return 0;
392 }
393 
394 static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
395 {
396  ConvolveContext *s = ctx->priv;
397  ThreadData *td = arg;
398  AVComplexFloat *hdata = td->hdata_out;
399  AVComplexFloat *vdata_out = td->vdata_out;
400  AVComplexFloat *vdata_in = td->vdata_in;
401  const int plane = td->plane;
402  const int n = td->n;
403  int start = (n * jobnr) / nb_jobs;
404  int end = (n * (jobnr+1)) / nb_jobs;
405  int y, x;
406 
407  for (y = start; y < end; y++) {
408  s->itx_fn[plane](s->ifft[plane][jobnr], vdata_out + y * n, vdata_in + y * n, sizeof(AVComplexFloat));
409 
410  for (x = 0; x < n; x++) {
411  hdata[x * n + y].re = vdata_out[y * n + x].re;
412  hdata[x * n + y].im = vdata_out[y * n + x].im;
413  }
414  }
415 
416  return 0;
417 }
418 
419 static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
420 {
421  ConvolveContext *s = ctx->priv;
422  ThreadData *td = arg;
423  AVComplexFloat *hdata_out = td->hdata_out;
424  AVComplexFloat *hdata_in = td->hdata_in;
425  const int plane = td->plane;
426  const int n = td->n;
427  int start = (n * jobnr) / nb_jobs;
428  int end = (n * (jobnr+1)) / nb_jobs;
429  int y;
430 
431  for (y = start; y < end; y++) {
432  s->itx_fn[plane](s->ifft[plane][jobnr], hdata_out + y * n, hdata_in + y * n, sizeof(AVComplexFloat));
433  }
434 
435  return 0;
436 }
437 
439  int w, int h, int n, int plane, float scale)
440 {
441  const int imax = (1 << s->depth) - 1;
442 
443  scale *= imax * 16;
444  if (s->depth == 8) {
445  for (int y = 0; y < h; y++) {
446  uint8_t *dst = out->data[plane] + y * out->linesize[plane];
447  for (int x = 0; x < w; x++)
448  dst[x] = av_clip_uint8(input[y * n + x].re * scale);
449  }
450  } else {
451  for (int y = 0; y < h; y++) {
452  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
453  for (int x = 0; x < w; x++)
454  dst[x] = av_clip(input[y * n + x].re * scale, 0, imax);
455  }
456  }
457 }
458 
460  int w, int h, int n, int plane, float scale)
461 {
462  const int max = (1 << s->depth) - 1;
463  const int hh = h / 2;
464  const int hw = w / 2;
465  int y, x;
466 
467  if (s->depth == 8) {
468  for (y = 0; y < hh; y++) {
469  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane] + hw;
470  for (x = 0; x < hw; x++)
471  dst[x] = av_clip_uint8(input[y * n + x].re * scale);
472  }
473  for (y = 0; y < hh; y++) {
474  uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane];
475  for (x = 0; x < hw; x++)
476  dst[x] = av_clip_uint8(input[y * n + n - hw + x].re * scale);
477  }
478  for (y = 0; y < hh; y++) {
479  uint8_t *dst = out->data[plane] + y * out->linesize[plane] + hw;
480  for (x = 0; x < hw; x++)
481  dst[x] = av_clip_uint8(input[(n - hh + y) * n + x].re * scale);
482  }
483  for (y = 0; y < hh; y++) {
484  uint8_t *dst = out->data[plane] + y * out->linesize[plane];
485  for (x = 0; x < hw; x++)
486  dst[x] = av_clip_uint8(input[(n - hh + y) * n + n - hw + x].re * scale);
487  }
488  } else {
489  for (y = 0; y < hh; y++) {
490  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane] + hw * 2);
491  for (x = 0; x < hw; x++)
492  dst[x] = av_clip(input[y * n + x].re * scale, 0, max);
493  }
494  for (y = 0; y < hh; y++) {
495  uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane]);
496  for (x = 0; x < hw; x++)
497  dst[x] = av_clip(input[y * n + n - hw + x].re * scale, 0, max);
498  }
499  for (y = 0; y < hh; y++) {
500  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane] + hw * 2);
501  for (x = 0; x < hw; x++)
502  dst[x] = av_clip(input[(n - hh + y) * n + x].re * scale, 0, max);
503  }
504  for (y = 0; y < hh; y++) {
505  uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
506  for (x = 0; x < hw; x++)
507  dst[x] = av_clip(input[(n - hh + y) * n + n - hw + x].re * scale, 0, max);
508  }
509  }
510 }
511 
512 static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
513 {
514  ConvolveContext *s = ctx->priv;
515  ThreadData *td = arg;
516  AVComplexFloat *input = td->hdata_in;
517  AVComplexFloat *filter = td->vdata_in;
518  const float noise = s->noise;
519  const int n = td->n;
520  int start = (n * jobnr) / nb_jobs;
521  int end = (n * (jobnr+1)) / nb_jobs;
522  int y, x;
523 
524  for (y = start; y < end; y++) {
525  int yn = y * n;
526 
527  for (x = 0; x < n; x++) {
528  float re, im, ire, iim;
529 
530  re = input[yn + x].re;
531  im = input[yn + x].im;
532  ire = filter[yn + x].re + noise;
533  iim = filter[yn + x].im;
534 
535  input[yn + x].re = ire * re - iim * im;
536  input[yn + x].im = iim * re + ire * im;
537  }
538  }
539 
540  return 0;
541 }
542 
543 static int complex_xcorrelate(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
544 {
545  ThreadData *td = arg;
546  AVComplexFloat *input = td->hdata_in;
547  AVComplexFloat *filter = td->vdata_in;
548  const int n = td->n;
549  const float scale = 1.f / (n * n);
550  int start = (n * jobnr) / nb_jobs;
551  int end = (n * (jobnr+1)) / nb_jobs;
552 
553  for (int y = start; y < end; y++) {
554  int yn = y * n;
555 
556  for (int x = 0; x < n; x++) {
557  float re, im, ire, iim;
558 
559  re = input[yn + x].re;
560  im = input[yn + x].im;
561  ire = filter[yn + x].re * scale;
562  iim = -filter[yn + x].im * scale;
563 
564  input[yn + x].re = ire * re - iim * im;
565  input[yn + x].im = iim * re + ire * im;
566  }
567  }
568 
569  return 0;
570 }
571 
572 static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
573 {
574  ConvolveContext *s = ctx->priv;
575  ThreadData *td = arg;
576  AVComplexFloat *input = td->hdata_in;
577  AVComplexFloat *filter = td->vdata_in;
578  const float noise = s->noise;
579  const int n = td->n;
580  int start = (n * jobnr) / nb_jobs;
581  int end = (n * (jobnr+1)) / nb_jobs;
582  int y, x;
583 
584  for (y = start; y < end; y++) {
585  int yn = y * n;
586 
587  for (x = 0; x < n; x++) {
588  float re, im, ire, iim, div;
589 
590  re = input[yn + x].re;
591  im = input[yn + x].im;
592  ire = filter[yn + x].re;
593  iim = filter[yn + x].im;
594  div = ire * ire + iim * iim + noise;
595 
596  input[yn + x].re = (ire * re + iim * im) / div;
597  input[yn + x].im = (ire * im - iim * re) / div;
598  }
599  }
600 
601  return 0;
602 }
603 
604 static void prepare_impulse(AVFilterContext *ctx, AVFrame *impulsepic, int plane)
605 {
606  ConvolveContext *s = ctx->priv;
607  const int n = s->fft_len[plane];
608  const int w = s->secondarywidth[plane];
609  const int h = s->secondaryheight[plane];
610  ThreadData td;
611  float total = 0;
612 
613  if (s->depth == 8) {
614  for (int y = 0; y < h; y++) {
615  const uint8_t *src = (const uint8_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
616  for (int x = 0; x < w; x++) {
617  total += src[x];
618  }
619  }
620  } else {
621  for (int y = 0; y < h; y++) {
622  const uint16_t *src = (const uint16_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
623  for (int x = 0; x < w; x++) {
624  total += src[x];
625  }
626  }
627  }
628  total = FFMAX(1, total);
629 
630  s->get_input(s, s->fft_hdata_impulse_in[plane], impulsepic, w, h, n, plane, 1.f / total);
631 
632  td.n = n;
633  td.plane = plane;
634  td.hdata_in = s->fft_hdata_impulse_in[plane];
635  td.vdata_in = s->fft_vdata_impulse_in[plane];
636  td.hdata_out = s->fft_hdata_impulse_out[plane];
637  td.vdata_out = s->fft_vdata_impulse_out[plane];
638 
643 
644  s->got_impulse[plane] = 1;
645 }
646 
647 static void prepare_secondary(AVFilterContext *ctx, AVFrame *secondary, int plane)
648 {
649  ConvolveContext *s = ctx->priv;
650  const int n = s->fft_len[plane];
651  ThreadData td;
652 
653  s->get_input(s, s->fft_hdata_impulse_in[plane], secondary,
654  s->secondarywidth[plane],
655  s->secondaryheight[plane],
656  n, plane, 1.f);
657 
658  td.n = n;
659  td.plane = plane;
660  td.hdata_in = s->fft_hdata_impulse_in[plane];
661  td.vdata_in = s->fft_vdata_impulse_in[plane];
662  td.hdata_out = s->fft_hdata_impulse_out[plane];
663  td.vdata_out = s->fft_vdata_impulse_out[plane];
664 
669 
670  s->got_impulse[plane] = 1;
671 }
672 
674 {
675  AVFilterContext *ctx = fs->parent;
676  AVFilterLink *outlink = ctx->outputs[0];
677  ConvolveContext *s = ctx->priv;
678  AVFrame *mainpic = NULL, *impulsepic = NULL;
679  int ret, plane;
680 
681  ret = ff_framesync_dualinput_get(fs, &mainpic, &impulsepic);
682  if (ret < 0)
683  return ret;
684  if (!impulsepic)
685  return ff_filter_frame(outlink, mainpic);
686 
687  for (plane = 0; plane < s->nb_planes; plane++) {
688  AVComplexFloat *filter = s->fft_vdata_impulse_out[plane];
689  AVComplexFloat *input = s->fft_vdata_out[plane];
690  const int n = s->fft_len[plane];
691  const int w = s->primarywidth[plane];
692  const int h = s->primaryheight[plane];
693  const int ow = s->planewidth[plane];
694  const int oh = s->planeheight[plane];
695  ThreadData td;
696 
697  if (!(s->planes & (1 << plane))) {
698  continue;
699  }
700 
701  td.plane = plane, td.n = n;
702  s->get_input(s, s->fft_hdata_in[plane], mainpic, w, h, n, plane, 1.f);
703 
704  td.hdata_in = s->fft_hdata_in[plane];
705  td.vdata_in = s->fft_vdata_in[plane];
706  td.hdata_out = s->fft_hdata_out[plane];
707  td.vdata_out = s->fft_vdata_out[plane];
708 
713 
714  if ((!s->impulse && !s->got_impulse[plane]) || s->impulse) {
715  s->prepare_impulse(ctx, impulsepic, plane);
716  }
717 
718  td.hdata_in = input;
719  td.vdata_in = filter;
720 
721  ff_filter_execute(ctx, s->filter, &td, NULL,
723 
724  td.hdata_in = s->fft_hdata_out[plane];
725  td.vdata_in = s->fft_vdata_out[plane];
726  td.hdata_out = s->fft_hdata_in[plane];
727  td.vdata_out = s->fft_vdata_in[plane];
728 
731 
732  td.hdata_out = s->fft_hdata_out[plane];
733  td.hdata_in = s->fft_hdata_in[plane];
734 
737 
738  s->get_output(s, s->fft_hdata_out[plane], mainpic, ow, oh, n, plane, 1.f / (n * n));
739  }
740 
741  return ff_filter_frame(outlink, mainpic);
742 }
743 
744 static int config_output(AVFilterLink *outlink)
745 {
747  AVFilterContext *ctx = outlink->src;
748  ConvolveContext *s = ctx->priv;
749  AVFilterLink *mainlink = ctx->inputs[0];
750  AVFilterLink *secondlink = ctx->inputs[1];
751  int ret, i, j;
752 
753  s->primarywidth[1] = s->primarywidth[2] = AV_CEIL_RSHIFT(mainlink->w, desc->log2_chroma_w);
754  s->primarywidth[0] = s->primarywidth[3] = mainlink->w;
755  s->primaryheight[1] = s->primaryheight[2] = AV_CEIL_RSHIFT(mainlink->h, desc->log2_chroma_h);
756  s->primaryheight[0] = s->primaryheight[3] = mainlink->h;
757 
758  s->secondarywidth[1] = s->secondarywidth[2] = AV_CEIL_RSHIFT(secondlink->w, desc->log2_chroma_w);
759  s->secondarywidth[0] = s->secondarywidth[3] = secondlink->w;
760  s->secondaryheight[1] = s->secondaryheight[2] = AV_CEIL_RSHIFT(secondlink->h, desc->log2_chroma_h);
761  s->secondaryheight[0] = s->secondaryheight[3] = secondlink->h;
762 
763  s->fs.on_event = do_convolve;
765  if (ret < 0)
766  return ret;
767  outlink->w = mainlink->w;
768  outlink->h = mainlink->h;
769  outlink->time_base = mainlink->time_base;
770  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
771  outlink->frame_rate = mainlink->frame_rate;
772 
773  if ((ret = ff_framesync_configure(&s->fs)) < 0)
774  return ret;
775 
776  for (i = 0; i < s->nb_planes; i++) {
777  for (j = 0; j < MAX_THREADS; j++) {
778  float scale = 1.f;
779 
780  ret = av_tx_init(&s->fft[i][j], &s->tx_fn[i], AV_TX_FLOAT_FFT, 0, s->fft_len[i], &scale, 0);
781  if (ret < 0)
782  return ret;
783  ret = av_tx_init(&s->ifft[i][j], &s->itx_fn[i], AV_TX_FLOAT_FFT, 1, s->fft_len[i], &scale, 0);
784  if (ret < 0)
785  return ret;
786  }
787  }
788 
789  return 0;
790 }
791 
793 {
794  ConvolveContext *s = ctx->priv;
795  return ff_framesync_activate(&s->fs);
796 }
797 
799 {
800  ConvolveContext *s = ctx->priv;
801 
802  if (!strcmp(ctx->filter->name, "convolve")) {
803  s->filter = complex_multiply;
804  s->prepare_impulse = prepare_impulse;
805  s->get_input = get_input;
806  s->get_output = get_output;
807  } else if (!strcmp(ctx->filter->name, "xcorrelate")) {
808  s->filter = complex_xcorrelate;
809  s->prepare_impulse = prepare_secondary;
810  s->get_input = get_zeropadded_input;
811  s->get_output = get_xoutput;
812  } else if (!strcmp(ctx->filter->name, "deconvolve")) {
813  s->filter = complex_divide;
814  s->prepare_impulse = prepare_impulse;
815  s->get_input = get_input;
816  s->get_output = get_output;
817  } else {
818  return AVERROR_BUG;
819  }
820 
821  return 0;
822 }
823 
825 {
826  ConvolveContext *s = ctx->priv;
827  int i, j;
828 
829  for (i = 0; i < 4; i++) {
830  av_freep(&s->fft_hdata_in[i]);
831  av_freep(&s->fft_vdata_in[i]);
832  av_freep(&s->fft_hdata_out[i]);
833  av_freep(&s->fft_vdata_out[i]);
834  av_freep(&s->fft_hdata_impulse_in[i]);
835  av_freep(&s->fft_vdata_impulse_in[i]);
836  av_freep(&s->fft_hdata_impulse_out[i]);
837  av_freep(&s->fft_vdata_impulse_out[i]);
838 
839  for (j = 0; j < MAX_THREADS; j++) {
840  av_tx_uninit(&s->fft[i][j]);
841  av_tx_uninit(&s->ifft[i][j]);
842  }
843  }
844 
845  ff_framesync_uninit(&s->fs);
846 }
847 
848 static const AVFilterPad convolve_inputs[] = {
849  {
850  .name = "main",
851  .type = AVMEDIA_TYPE_VIDEO,
852  .config_props = config_input,
853  },{
854  .name = "impulse",
855  .type = AVMEDIA_TYPE_VIDEO,
856  .config_props = config_input_impulse,
857  },
858 };
859 
860 static const AVFilterPad convolve_outputs[] = {
861  {
862  .name = "default",
863  .type = AVMEDIA_TYPE_VIDEO,
864  .config_props = config_output,
865  },
866 };
867 
869 
870 #if CONFIG_CONVOLVE_FILTER
871 
873 
874 const AVFilter ff_vf_convolve = {
875  .name = "convolve",
876  .description = NULL_IF_CONFIG_SMALL("Convolve first video stream with second video stream."),
877  .preinit = convolve_framesync_preinit,
878  .init = init,
879  .uninit = uninit,
880  .activate = activate,
881  .priv_size = sizeof(ConvolveContext),
882  .priv_class = &convolve_class,
887 };
888 
889 #endif /* CONFIG_CONVOLVE_FILTER */
890 
891 #if CONFIG_DECONVOLVE_FILTER
892 
893 static const AVOption deconvolve_options[] = {
894  { "planes", "set planes to deconvolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
895  { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
896  { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
897  { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
898  { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
899  { NULL },
900 };
901 
902 FRAMESYNC_DEFINE_PURE_CLASS(deconvolve, "deconvolve", convolve, deconvolve_options);
903 
904 const AVFilter ff_vf_deconvolve = {
905  .name = "deconvolve",
906  .description = NULL_IF_CONFIG_SMALL("Deconvolve first video stream with second video stream."),
907  .preinit = convolve_framesync_preinit,
908  .init = init,
909  .uninit = uninit,
910  .activate = activate,
911  .priv_size = sizeof(ConvolveContext),
912  .priv_class = &deconvolve_class,
917 };
918 
919 #endif /* CONFIG_DECONVOLVE_FILTER */
920 
921 #if CONFIG_XCORRELATE_FILTER
922 
923 static const AVOption xcorrelate_options[] = {
924  { "planes", "set planes to cross-correlate", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
925  { "secondary", "when to process secondary frame", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
926  { "first", "process only first secondary frame, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
927  { "all", "process all secondary frames", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
928  { NULL },
929 };
930 
931 FRAMESYNC_DEFINE_PURE_CLASS(xcorrelate, "xcorrelate", convolve, xcorrelate_options);
932 
933 static int config_input_secondary(AVFilterLink *inlink)
934 {
935  AVFilterContext *ctx = inlink->dst;
936 
937  if (ctx->inputs[0]->w <= ctx->inputs[1]->w ||
938  ctx->inputs[0]->h <= ctx->inputs[1]->h) {
939  av_log(ctx, AV_LOG_ERROR, "Width and height of second input videos must be less than first input.\n");
940  return AVERROR(EINVAL);
941  }
942 
943  return 0;
944 }
945 
946 static const AVFilterPad xcorrelate_inputs[] = {
947  {
948  .name = "primary",
949  .type = AVMEDIA_TYPE_VIDEO,
950  .config_props = config_input,
951  },{
952  .name = "secondary",
953  .type = AVMEDIA_TYPE_VIDEO,
954  .config_props = config_input_secondary,
955  },
956 };
957 
958 static const AVFilterPad xcorrelate_outputs[] = {
959  {
960  .name = "default",
961  .type = AVMEDIA_TYPE_VIDEO,
962  .config_props = config_output,
963  },
964 };
965 
966 const AVFilter ff_vf_xcorrelate = {
967  .name = "xcorrelate",
968  .description = NULL_IF_CONFIG_SMALL("Cross-correlate first video stream with second video stream."),
969  .preinit = convolve_framesync_preinit,
970  .init = init,
971  .uninit = uninit,
972  .activate = activate,
973  .priv_size = sizeof(ConvolveContext),
974  .priv_class = &xcorrelate_class,
975  FILTER_INPUTS(xcorrelate_inputs),
976  FILTER_OUTPUTS(xcorrelate_outputs),
979 };
980 
981 #endif /* CONFIG_XCORRELATE_FILTER */
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:512
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:491
ThreadData::vdata_out
AVComplexFloat * vdata_out
Definition: vf_convolve.c:179
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:134
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_clip
#define av_clip
Definition: common.h:96
OFFSET
#define OFFSET(x)
Definition: vf_convolve.c:81
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
FRAMESYNC_AUXILIARY_FUNCS
#define FRAMESYNC_AUXILIARY_FUNCS(func_prefix, context, field)
Definition: framesync.h:334
ConvolveContext::filter
int(* filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:78
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:304
out
FILE * out
Definition: movenc.c:54
ff_vf_deconvolve
const AVFilter ff_vf_deconvolve
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:172
AVTXContext
Definition: tx_priv.h:235
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
complex_divide
static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:572
ConvolveContext::nb_planes
int nb_planes
Definition: vf_convolve.c:68
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:504
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:511
w
uint8_t w
Definition: llviddspenc.c:38
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:506
AVOption
AVOption.
Definition: opt.h:251
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
float.h
AVComplexFloat
Definition: tx.h:27
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
FFFrameSync
Frame sync structure.
Definition: framesync.h:168
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:507
ThreadData::hdata_in
AVComplexFloat * hdata_in
Definition: vf_convolve.c:178
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:901
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:448
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
ConvolveContext::primarywidth
int primarywidth[4]
Definition: vf_convolve.c:49
ThreadData::vdata_in
AVComplexFloat * vdata_in
Definition: vf_convolve.c:178
ConvolveContext::fft_hdata_out
AVComplexFloat * fft_hdata_out[4]
Definition: vf_convolve.c:57
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:503
AVComplexFloat::im
float im
Definition: tx.h:28
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:486
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:484
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:513
pixel_fmts_fftfilt
static enum AVPixelFormat pixel_fmts_fftfilt[]
Definition: vf_convolve.c:93
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:466
ifft_horizontal
static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:419
ConvolveContext::primaryheight
int primaryheight[4]
Definition: vf_convolve.c:50
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:452
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
ConvolveContext::get_input
void(* get_input)(struct ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:71
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:276
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:480
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:488
ThreadData::plane
int plane
Definition: vf_blend.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
ConvolveContext::fft_hdata_in
AVComplexFloat * fft_hdata_in[4]
Definition: vf_convolve.c:55
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:489
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:481
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
ConvolveContext::planes
int planes
Definition: vf_convolve.c:65
convolve_inputs
static const AVFilterPad convolve_inputs[]
Definition: vf_convolve.c:848
get_output
static void get_output(ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:459
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
Definition: tx.h:47
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:465
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:479
get_zeropadded_input
static void get_zeropadded_input(ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:204
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:451
ConvolveContext::secondarywidth
int secondarywidth[4]
Definition: vf_convolve.c:52
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
fft_vertical
static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:369
ConvolveContext::impulse
int impulse
Definition: vf_convolve.c:66
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
ThreadData::n
int n
Definition: vf_convolve.c:180
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:449
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_convolve.c:114
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:487
ConvolveContext::fft_len
int fft_len[4]
Definition: vf_convolve.c:45
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ConvolveContext::tx_fn
av_tx_fn tx_fn[4]
Definition: vf_convolve.c:42
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:200
ConvolveContext::ifft
AVTXContext * ifft[4][MAX_THREADS]
Definition: vf_convolve.c:40
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ConvolveContext::prepare_impulse
void(* prepare_impulse)(AVFilterContext *ctx, AVFrame *impulsepic, int plane)
Definition: vf_convolve.c:76
config_input_impulse
static int config_input_impulse(AVFilterLink *inlink)
Definition: vf_convolve.c:164
prepare_secondary
static void prepare_secondary(AVFilterContext *ctx, AVFrame *secondary, int plane)
Definition: vf_convolve.c:647
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:469
ConvolveContext::fft_hdata_impulse_out
AVComplexFloat * fft_hdata_impulse_out[4]
Definition: vf_convolve.c:61
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
FLAGS
#define FLAGS
Definition: vf_convolve.c:82
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:483
ConvolveContext::fft_vdata_impulse_in
AVComplexFloat * fft_vdata_impulse_in[4]
Definition: vf_convolve.c:60
ConvolveContext::fft
AVTXContext * fft[4][MAX_THREADS]
Definition: vf_convolve.c:39
ConvolveContext::planeheight
int planeheight[4]
Definition: vf_convolve.c:47
ConvolveContext::fft_vdata_out
AVComplexFloat * fft_vdata_out[4]
Definition: vf_convolve.c:58
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:372
ConvolveContext::get_output
void(* get_output)(struct ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:74
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ConvolveContext::secondaryheight
int secondaryheight[4]
Definition: vf_convolve.c:53
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:473
AVComplexFloat::re
float re
Definition: tx.h:28
FRAMESYNC_DEFINE_PURE_CLASS
#define FRAMESYNC_DEFINE_PURE_CLASS(name, desc, func_prefix, options)
Definition: framesync.h:320
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:475
convolve_outputs
static const AVFilterPad convolve_outputs[]
Definition: vf_convolve.c:860
ConvolveContext::itx_fn
av_tx_fn itx_fn[4]
Definition: vf_convolve.c:43
complex_multiply
static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:512
ConvolveContext::fs
FFFrameSync fs
Definition: vf_convolve.c:37
convolve
static void convolve(float *tgt, const float *src, int len, int n)
Definition: ra288.c:89
activate
static int activate(AVFilterContext *ctx)
Definition: vf_convolve.c:792
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:508
ConvolveContext
Definition: vf_convolve.c:35
ConvolveContext::fft_hdata_impulse_in
AVComplexFloat * fft_hdata_impulse_in[4]
Definition: vf_convolve.c:59
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:294
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
ThreadData::hdata_out
AVComplexFloat * hdata_out
Definition: vf_convolve.c:179
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_convolve.c:824
do_convolve
static int do_convolve(FFFrameSync *fs)
Definition: vf_convolve.c:673
fft_horizontal
static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:183
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
ifft_vertical
static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:394
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:485
ConvolveContext::depth
int depth
Definition: vf_convolve.c:64
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:786
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
planes
static const struct @363 planes[]
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
ConvolveContext::noise
float noise
Definition: vf_convolve.c:67
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:467
AVFilter
Filter definition.
Definition: avfilter.h:166
convolve_options
static const AVOption convolve_options[]
Definition: vf_convolve.c:84
ret
ret
Definition: filter_design.txt:187
prepare_impulse
static void prepare_impulse(AVFilterContext *ctx, AVFrame *impulsepic, int plane)
Definition: vf_convolve.c:604
SQR
#define SQR(x)
Definition: vf_convolve.c:202
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:505
get_input
static void get_input(ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:294
ff_vf_convolve
const AVFilter ff_vf_convolve
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:472
ConvolveContext::fft_vdata_in
AVComplexFloat * fft_vdata_in[4]
Definition: vf_convolve.c:56
complex_xcorrelate
static int complex_xcorrelate(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_convolve.c:543
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:477
ConvolveContext::fft_vdata_impulse_out
AVComplexFloat * fft_vdata_impulse_out[4]
Definition: vf_convolve.c:62
framesync.h
noise
static int noise(AVBSFContext *ctx, AVPacket *pkt)
Definition: noise_bsf.c:126
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
get_xoutput
static void get_xoutput(ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale)
Definition: vf_convolve.c:438
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:862
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:83
ff_vf_xcorrelate
const AVFilter ff_vf_xcorrelate
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ConvolveContext::planewidth
int planewidth[4]
Definition: vf_convolve.c:46
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_convolve.c:798
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:474
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:478
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:355
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_convolve.c:744
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:390
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:450
MAX_THREADS
#define MAX_THREADS
Definition: vf_convolve.c:33
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:144
int
int
Definition: ffmpeg_filter.c:368
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ConvolveContext::got_impulse
int got_impulse[4]
Definition: vf_convolve.c:69
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:476
tx.h