FFmpeg
vf_vif.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
3  * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
4  * Copyright (c) 2021 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Calculate VIF between two input videos.
26  */
27 
28 #include <float.h>
29 
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "avfilter.h"
33 #include "framesync.h"
34 #include "internal.h"
35 
36 #define NUM_DATA_BUFS 13
37 
38 typedef struct VIFContext {
39  const AVClass *class;
42  int width;
43  int height;
45  float factor;
47  float **temp;
48  float *ref_data;
49  float *main_data;
50  double vif_sum[4];
51  double vif_min[4];
52  double vif_max[4];
53  uint64_t nb_frames;
54 } VIFContext;
55 
56 #define OFFSET(x) offsetof(VIFContext, x)
57 
58 static const AVOption vif_options[] = {
59  { NULL }
60 };
61 
63 
64 static const uint8_t vif_filter1d_width1[4] = { 17, 9, 5, 3 };
65 
66 static const float vif_filter1d_table[4][17] =
67 {
68  {
69  0.00745626912, 0.0142655009, 0.0250313189, 0.0402820669, 0.0594526194,
70  0.0804751068, 0.0999041125, 0.113746084, 0.118773937, 0.113746084,
71  0.0999041125, 0.0804751068, 0.0594526194, 0.0402820669, 0.0250313189,
72  0.0142655009, 0.00745626912
73  },
74  {
75  0.0189780835, 0.0558981746, 0.120920904, 0.192116052, 0.224173605,
76  0.192116052, 0.120920904, 0.0558981746, 0.0189780835
77  },
78  {
79  0.054488685, 0.244201347, 0.402619958, 0.244201347, 0.054488685
80  },
81  {
82  0.166378498, 0.667243004, 0.166378498
83  }
84 };
85 
86 typedef struct ThreadData {
87  const float *filter;
88  const float *src;
89  float *dst;
90  int w, h;
94  float **temp;
95 } ThreadData;
96 
97 static void vif_dec2(const float *src, float *dst, int w, int h,
98  int src_stride, int dst_stride)
99 {
100  const int dst_px_stride = dst_stride / 2;
101 
102  for (int i = 0; i < h / 2; i++) {
103  for (int j = 0; j < w / 2; j++)
104  dst[i * dst_px_stride + j] = src[(i * 2) * src_stride + (j * 2)];
105  }
106 }
107 
108 static void vif_statistic(const float *mu1_sq, const float *mu2_sq,
109  const float *mu1_mu2, const float *xx_filt,
110  const float *yy_filt, const float *xy_filt,
111  float *num, float *den, int w, int h)
112 {
113  static const float sigma_nsq = 2;
114  float mu1_sq_val, mu2_sq_val, mu1_mu2_val, xx_filt_val, yy_filt_val, xy_filt_val;
115  float sigma1_sq, sigma2_sq, sigma12, g, sv_sq, eps = 1.0e-10f;
116  float gain_limit = 100.f;
117  float num_val, den_val;
118  float accum_num = 0.0f;
119  float accum_den = 0.0f;
120 
121  for (int i = 0; i < h; i++) {
122  float accum_inner_num = 0.f;
123  float accum_inner_den = 0.f;
124 
125  for (int j = 0; j < w; j++) {
126  mu1_sq_val = mu1_sq[i * w + j];
127  mu2_sq_val = mu2_sq[i * w + j];
128  mu1_mu2_val = mu1_mu2[i * w + j];
129  xx_filt_val = xx_filt[i * w + j];
130  yy_filt_val = yy_filt[i * w + j];
131  xy_filt_val = xy_filt[i * w + j];
132 
133  sigma1_sq = xx_filt_val - mu1_sq_val;
134  sigma2_sq = yy_filt_val - mu2_sq_val;
135  sigma12 = xy_filt_val - mu1_mu2_val;
136 
137  sigma1_sq = FFMAX(sigma1_sq, 0.0f);
138  sigma2_sq = FFMAX(sigma2_sq, 0.0f);
139  sigma12 = FFMAX(sigma12, 0.0f);
140 
141  g = sigma12 / (sigma1_sq + eps);
142  sv_sq = sigma2_sq - g * sigma12;
143 
144  if (sigma1_sq < eps) {
145  g = 0.0f;
146  sv_sq = sigma2_sq;
147  sigma1_sq = 0.0f;
148  }
149 
150  if (sigma2_sq < eps) {
151  g = 0.0f;
152  sv_sq = 0.0f;
153  }
154 
155  if (g < 0.0f) {
156  sv_sq = sigma2_sq;
157  g = 0.0f;
158  }
159  sv_sq = FFMAX(sv_sq, eps);
160 
161  g = FFMIN(g, gain_limit);
162 
163  num_val = log2f(1.0f + g * g * sigma1_sq / (sv_sq + sigma_nsq));
164  den_val = log2f(1.0f + sigma1_sq / sigma_nsq);
165 
166  if (isnan(den_val))
167  num_val = den_val = 1.f;
168 
169  accum_inner_num += num_val;
170  accum_inner_den += den_val;
171  }
172 
173  accum_num += accum_inner_num;
174  accum_den += accum_inner_den;
175  }
176 
177  num[0] = accum_num;
178  den[0] = accum_den;
179 }
180 
181 static void vif_xx_yy_xy(const float *x, const float *y, float *xx, float *yy,
182  float *xy, int w, int h)
183 {
184  for (int i = 0; i < h; i++) {
185  for (int j = 0; j < w; j++) {
186  float xval = x[j];
187  float yval = y[j];
188  float xxval = xval * xval;
189  float yyval = yval * yval;
190  float xyval = xval * yval;
191 
192  xx[j] = xxval;
193  yy[j] = yyval;
194  xy[j] = xyval;
195  }
196 
197  xx += w;
198  yy += w;
199  xy += w;
200  x += w;
201  y += w;
202  }
203 }
204 
205 static int vif_filter1d(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
206 {
207  ThreadData *td = arg;
208  const float *filter = td->filter;
209  const float *src = td->src;
210  float *dst = td->dst;
211  int w = td->w;
212  int h = td->h;
213  int src_stride = td->src_stride;
214  int dst_stride = td->dst_stride;
215  int filt_w = td->filter_width;
216  float *temp = td->temp[jobnr];
217  const int slice_start = (h * jobnr) / nb_jobs;
218  const int slice_end = (h * (jobnr+1)) / nb_jobs;
219 
220  for (int i = slice_start; i < slice_end; i++) {
221  /** Vertical pass. */
222  for (int j = 0; j < w; j++) {
223  float sum = 0.f;
224 
225  if (i >= filt_w / 2 && i < h - filt_w / 2 - 1) {
226  for (int filt_i = 0; filt_i < filt_w; filt_i++) {
227  const float filt_coeff = filter[filt_i];
228  float img_coeff;
229  int ii = i - filt_w / 2 + filt_i;
230 
231  img_coeff = src[ii * src_stride + j];
232  sum += filt_coeff * img_coeff;
233  }
234  } else {
235  for (int filt_i = 0; filt_i < filt_w; filt_i++) {
236  const float filt_coeff = filter[filt_i];
237  int ii = i - filt_w / 2 + filt_i;
238  float img_coeff;
239 
240  ii = ii < 0 ? -ii : (ii >= h ? 2 * h - ii - 1 : ii);
241 
242  img_coeff = src[ii * src_stride + j];
243  sum += filt_coeff * img_coeff;
244  }
245  }
246 
247  temp[j] = sum;
248  }
249 
250  /** Horizontal pass. */
251  for (int j = 0; j < w; j++) {
252  float sum = 0.f;
253 
254  if (j >= filt_w / 2 && j < w - filt_w / 2 - 1) {
255  for (int filt_j = 0; filt_j < filt_w; filt_j++) {
256  const float filt_coeff = filter[filt_j];
257  int jj = j - filt_w / 2 + filt_j;
258  float img_coeff;
259 
260  img_coeff = temp[jj];
261  sum += filt_coeff * img_coeff;
262  }
263  } else {
264  for (int filt_j = 0; filt_j < filt_w; filt_j++) {
265  const float filt_coeff = filter[filt_j];
266  int jj = j - filt_w / 2 + filt_j;
267  float img_coeff;
268 
269  jj = jj < 0 ? -jj : (jj >= w ? 2 * w - jj - 1 : jj);
270 
271  img_coeff = temp[jj];
272  sum += filt_coeff * img_coeff;
273  }
274  }
275 
276  dst[i * dst_stride + j] = sum;
277  }
278  }
279 
280  return 0;
281 }
282 
284  const float *ref, const float *main, int w, int h,
285  int ref_stride, int main_stride, float *score,
286  float *const data_buf[NUM_DATA_BUFS], float **temp,
287  int gnb_threads)
288 {
289  ThreadData td;
290  float *ref_scale = data_buf[0];
291  float *main_scale = data_buf[1];
292  float *ref_sq = data_buf[2];
293  float *main_sq = data_buf[3];
294  float *ref_main = data_buf[4];
295  float *mu1 = data_buf[5];
296  float *mu2 = data_buf[6];
297  float *mu1_sq = data_buf[7];
298  float *mu2_sq = data_buf[8];
299  float *mu1_mu2 = data_buf[9];
300  float *ref_sq_filt = data_buf[10];
301  float *main_sq_filt = data_buf[11];
302  float *ref_main_filt = data_buf[12];
303 
304  const float *curr_ref_scale = ref;
305  const float *curr_main_scale = main;
306  int curr_ref_stride = ref_stride;
307  int curr_main_stride = main_stride;
308 
309  float num = 0.f;
310  float den = 0.f;
311 
312  for (int scale = 0; scale < 4; scale++) {
313  const float *filter = vif_filter1d_table[scale];
314  int filter_width = vif_filter1d_width1[scale];
315  const int nb_threads = FFMIN(h, gnb_threads);
316  int buf_valid_w = w;
317  int buf_valid_h = h;
318 
319  td.filter = filter;
320  td.filter_width = filter_width;
321 
322  if (scale > 0) {
323  td.src = curr_ref_scale;
324  td.dst = mu1;
325  td.w = w;
326  td.h = h;
327  td.src_stride = curr_ref_stride;
328  td.dst_stride = w;
329  td.temp = temp;
330  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
331 
332  td.src = curr_main_scale;
333  td.dst = mu2;
334  td.src_stride = curr_main_stride;
335  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
336 
337  vif_dec2(mu1, ref_scale, buf_valid_w, buf_valid_h, w, w);
338  vif_dec2(mu2, main_scale, buf_valid_w, buf_valid_h, w, w);
339 
340  w = buf_valid_w / 2;
341  h = buf_valid_h / 2;
342 
343  buf_valid_w = w;
344  buf_valid_h = h;
345 
346  curr_ref_scale = ref_scale;
347  curr_main_scale = main_scale;
348 
349  curr_ref_stride = w;
350  curr_main_stride = w;
351  }
352 
353  td.src = curr_ref_scale;
354  td.dst = mu1;
355  td.w = w;
356  td.h = h;
357  td.src_stride = curr_ref_stride;
358  td.dst_stride = w;
359  td.temp = temp;
360  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
361 
362  td.src = curr_main_scale;
363  td.dst = mu2;
364  td.src_stride = curr_main_stride;
365  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
366 
367  vif_xx_yy_xy(mu1, mu2, mu1_sq, mu2_sq, mu1_mu2, w, h);
368 
369  vif_xx_yy_xy(curr_ref_scale, curr_main_scale, ref_sq, main_sq, ref_main, w, h);
370 
371  td.src = ref_sq;
372  td.dst = ref_sq_filt;
373  td.src_stride = w;
374  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
375 
376  td.src = main_sq;
377  td.dst = main_sq_filt;
378  td.src_stride = w;
379  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
380 
381  td.src = ref_main;
382  td.dst = ref_main_filt;
383  ff_filter_execute(ctx, vif_filter1d, &td, NULL, nb_threads);
384 
385  vif_statistic(mu1_sq, mu2_sq, mu1_mu2, ref_sq_filt, main_sq_filt,
386  ref_main_filt, &num, &den, w, h);
387 
388  score[scale] = den <= FLT_EPSILON ? 1.f : num / den;
389  }
390 
391  return 0;
392 }
393 
394 #define offset_fn(type, bits) \
395 static void offset_##bits##bit(VIFContext *s, \
396  const AVFrame *ref, \
397  AVFrame *main, int stride)\
398 { \
399  int w = s->width; \
400  int h = s->height; \
401  \
402  int ref_stride = ref->linesize[0]; \
403  int main_stride = main->linesize[0]; \
404  \
405  const type *ref_ptr = (const type *) ref->data[0]; \
406  const type *main_ptr = (const type *) main->data[0]; \
407  \
408  const float factor = s->factor; \
409  \
410  float *ref_ptr_data = s->ref_data; \
411  float *main_ptr_data = s->main_data; \
412  \
413  for (int i = 0; i < h; i++) { \
414  for (int j = 0; j < w; j++) { \
415  ref_ptr_data[j] = ref_ptr[j] * factor - 128.f; \
416  main_ptr_data[j] = main_ptr[j] * factor - 128.f; \
417  } \
418  ref_ptr += ref_stride / sizeof(type); \
419  ref_ptr_data += w; \
420  main_ptr += main_stride / sizeof(type); \
421  main_ptr_data += w; \
422  } \
423 }
424 
425 offset_fn(uint8_t, 8)
426 offset_fn(uint16_t, 16)
427 
428 static void set_meta(AVDictionary **metadata, const char *key, float d)
429 {
430  char value[257];
431  snprintf(value, sizeof(value), "%f", d);
432  av_dict_set(metadata, key, value, 0);
433 }
434 
436 {
437  VIFContext *s = ctx->priv;
438  AVDictionary **metadata = &main->metadata;
439  float score[4];
440 
441  s->factor = 1.f / (1 << (s->desc->comp[0].depth - 8));
442  if (s->desc->comp[0].depth <= 8) {
443  offset_8bit(s, ref, main, s->width);
444  } else {
445  offset_16bit(s, ref, main, s->width);
446  }
447 
448  compute_vif2(ctx, s->ref_data, s->main_data,
449  s->width, s->height, s->width, s->width,
450  score, s->data_buf, s->temp, s->nb_threads);
451 
452  set_meta(metadata, "lavfi.vif.scale.0", score[0]);
453  set_meta(metadata, "lavfi.vif.scale.1", score[1]);
454  set_meta(metadata, "lavfi.vif.scale.2", score[2]);
455  set_meta(metadata, "lavfi.vif.scale.3", score[3]);
456 
457  for (int i = 0; i < 4; i++) {
458  s->vif_min[i] = FFMIN(s->vif_min[i], score[i]);
459  s->vif_max[i] = FFMAX(s->vif_max[i], score[i]);
460  s->vif_sum[i] += score[i];
461  }
462 
463  s->nb_frames++;
464 
465  return main;
466 }
467 
468 static const enum AVPixelFormat pix_fmts[] = {
475 #define PF(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
476  PF(P9), PF(P10), PF(P12), PF(P14), PF(P16),
478 };
479 
481 {
482  AVFilterContext *ctx = inlink->dst;
483  VIFContext *s = ctx->priv;
484 
485  if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
486  ctx->inputs[0]->h != ctx->inputs[1]->h) {
487  av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
488  return AVERROR(EINVAL);
489  }
490 
491  s->desc = av_pix_fmt_desc_get(inlink->format);
492  s->width = ctx->inputs[0]->w;
493  s->height = ctx->inputs[0]->h;
494  s->nb_threads = ff_filter_get_nb_threads(ctx);
495 
496  for (int i = 0; i < 4; i++) {
497  s->vif_min[i] = DBL_MAX;
498  s->vif_max[i] = -DBL_MAX;
499  }
500 
501  for (int i = 0; i < NUM_DATA_BUFS; i++) {
502  if (!(s->data_buf[i] = av_calloc(s->width, s->height * sizeof(float))))
503  return AVERROR(ENOMEM);
504  }
505 
506  if (!(s->ref_data = av_calloc(s->width, s->height * sizeof(float))))
507  return AVERROR(ENOMEM);
508 
509  if (!(s->main_data = av_calloc(s->width, s->height * sizeof(float))))
510  return AVERROR(ENOMEM);
511 
512  if (!(s->temp = av_calloc(s->nb_threads, sizeof(s->temp[0]))))
513  return AVERROR(ENOMEM);
514 
515  for (int i = 0; i < s->nb_threads; i++) {
516  if (!(s->temp[i] = av_calloc(s->width, sizeof(float))))
517  return AVERROR(ENOMEM);
518  }
519 
520  return 0;
521 }
522 
524 {
525  AVFilterContext *ctx = fs->parent;
526  VIFContext *s = fs->opaque;
527  AVFilterLink *outlink = ctx->outputs[0];
528  AVFrame *out_frame, *main_frame = NULL, *ref_frame = NULL;
529  int ret;
530 
531  ret = ff_framesync_dualinput_get(fs, &main_frame, &ref_frame);
532  if (ret < 0)
533  return ret;
534 
535  if (ctx->is_disabled || !ref_frame) {
536  out_frame = main_frame;
537  } else {
538  out_frame = do_vif(ctx, main_frame, ref_frame);
539  }
540 
541  out_frame->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
542 
543  return ff_filter_frame(outlink, out_frame);
544 }
545 
546 
547 static int config_output(AVFilterLink *outlink)
548 {
549  AVFilterContext *ctx = outlink->src;
550  VIFContext *s = ctx->priv;
551  AVFilterLink *mainlink = ctx->inputs[0];
552  FFFrameSyncIn *in;
553  int ret;
554 
555  outlink->w = mainlink->w;
556  outlink->h = mainlink->h;
557  outlink->time_base = mainlink->time_base;
558  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
559  outlink->frame_rate = mainlink->frame_rate;
560  if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
561  return ret;
562 
563  in = s->fs.in;
564  in[0].time_base = mainlink->time_base;
565  in[1].time_base = ctx->inputs[1]->time_base;
566  in[0].sync = 2;
567  in[0].before = EXT_STOP;
568  in[0].after = EXT_STOP;
569  in[1].sync = 1;
570  in[1].before = EXT_STOP;
571  in[1].after = EXT_STOP;
572  s->fs.opaque = s;
573  s->fs.on_event = process_frame;
574 
575  return ff_framesync_configure(&s->fs);
576 }
577 
579 {
580  VIFContext *s = ctx->priv;
581  return ff_framesync_activate(&s->fs);
582 }
583 
585 {
586  VIFContext *s = ctx->priv;
587 
588  if (s->nb_frames > 0) {
589  for (int i = 0; i < 4; i++)
590  av_log(ctx, AV_LOG_INFO, "VIF scale=%d average:%f min:%f: max:%f\n",
591  i, s->vif_sum[i] / s->nb_frames, s->vif_min[i], s->vif_max[i]);
592  }
593 
594  for (int i = 0; i < NUM_DATA_BUFS; i++)
595  av_freep(&s->data_buf[i]);
596 
597  av_freep(&s->ref_data);
598  av_freep(&s->main_data);
599 
600  for (int i = 0; i < s->nb_threads && s->temp; i++)
601  av_freep(&s->temp[i]);
602 
603  av_freep(&s->temp);
604 
605  ff_framesync_uninit(&s->fs);
606 }
607 
608 static const AVFilterPad vif_inputs[] = {
609  {
610  .name = "main",
611  .type = AVMEDIA_TYPE_VIDEO,
612  },{
613  .name = "reference",
614  .type = AVMEDIA_TYPE_VIDEO,
615  .config_props = config_input_ref,
616  },
617 };
618 
619 static const AVFilterPad vif_outputs[] = {
620  {
621  .name = "default",
622  .type = AVMEDIA_TYPE_VIDEO,
623  .config_props = config_output,
624  },
625 };
626 
628  .name = "vif",
629  .description = NULL_IF_CONFIG_SMALL("Calculate the VIF between two video streams."),
630  .preinit = vif_framesync_preinit,
631  .uninit = uninit,
632  .priv_size = sizeof(VIFContext),
633  .priv_class = &vif_class,
634  .activate = activate,
641 };
VIFContext::vif_sum
double vif_sum[4]
Definition: vf_vif.c:50
ThreadData::src_stride
int src_stride
Definition: vf_vif.c:91
vif_outputs
static const AVFilterPad vif_outputs[]
Definition: vf_vif.c:619
FFFrameSyncIn::time_base
AVRational time_base
Time base for the incoming frames.
Definition: framesync.h:117
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:134
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ThreadData::filter_width
int filter_width
Definition: vf_vif.c:93
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:304
VIFContext::vif_max
double vif_max[4]
Definition: vf_vif.c:52
VIFContext::fs
FFFrameSync fs
Definition: vf_vif.c:40
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
log2f
#define log2f(x)
Definition: libm.h:409
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
ThreadData::dst_stride
int dst_stride
Definition: vf_vif.c:92
process_frame
static int process_frame(FFFrameSync *fs)
Definition: vf_vif.c:523
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:172
VIFContext::nb_threads
int nb_threads
Definition: vf_vif.c:44
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
offset_fn
#define offset_fn(type, bits)
Definition: vf_vif.c:394
vif_xx_yy_xy
static void vif_xx_yy_xy(const float *x, const float *y, float *xx, float *yy, float *xy, int w, int h)
Definition: vf_vif.c:181
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_vif.c:584
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:251
VIFContext::height
int height
Definition: vf_vif.c:43
vif_filter1d_table
static const float vif_filter1d_table[4][17]
Definition: vf_vif.c:66
float.h
ThreadData::w
int w
Definition: vf_blend.c:59
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:34
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
FFFrameSync
Frame sync structure.
Definition: framesync.h:168
vif_filter1d
static int vif_filter1d(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vif.c:205
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_vif.c:468
vif_filter1d_width1
static const uint8_t vif_filter1d_width1[4]
Definition: vf_vif.c:64
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:448
VIFContext::desc
const AVPixFmtDescriptor * desc
Definition: vf_vif.c:41
EXT_STOP
@ EXT_STOP
Completely stop all streams with this one.
Definition: framesync.h:65
FFFrameSyncIn
Input stream structure.
Definition: framesync.h:102
VIFContext::main_data
float * main_data
Definition: vf_vif.c:49
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
vif_dec2
static void vif_dec2(const float *src, float *dst, int w, int h, int src_stride, int dst_stride)
Definition: vf_vif.c:97
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:452
FFFrameSyncIn::sync
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events.
Definition: framesync.h:160
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:276
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_vf_vif
const AVFilter ff_vf_vif
Definition: vf_vif.c:627
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
ref_frame
static const AVFrame * ref_frame(const struct pl_frame_mix *mix)
Definition: vf_libplacebo.c:783
s
#define s(width, name)
Definition: cbs_vp9.c:198
g
const char * g
Definition: vf_curves.c:127
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1979
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:451
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
do_vif
static AVFrame * do_vif(AVFilterContext *ctx, AVFrame *main, const AVFrame *ref)
Definition: vf_vif.c:435
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
key
const char * key
Definition: hwcontext_opencl.c:174
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
ThreadData::h
int h
Definition: vf_blend.c:59
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:449
ThreadData::dst
AVFrame * dst
Definition: vf_blend.c:56
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:200
isnan
#define isnan(x)
Definition: libm.h:340
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
VIFContext::vif_min
double vif_min[4]
Definition: vf_vif.c:51
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
main
int main(int argc, char **argv)
Definition: avio_http_serve_files.c:99
f
f
Definition: af_crystalizer.c:121
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
ThreadData::src
const float * src
Definition: vf_vif.c:88
ThreadData::temp
float ** temp
Definition: vf_vif.c:94
VIFContext::factor
float factor
Definition: vf_vif.c:45
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
internal.h
compute_vif2
static int compute_vif2(AVFilterContext *ctx, const float *ref, const float *main, int w, int h, int ref_stride, int main_stride, float *score, float *const data_buf[NUM_DATA_BUFS], float **temp, int gnb_threads)
Definition: vf_vif.c:283
VIFContext::width
int width
Definition: vf_vif.c:42
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:786
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
NUM_DATA_BUFS
#define NUM_DATA_BUFS
Definition: vf_vif.c:36
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_vif.c:547
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
vif_statistic
static void vif_statistic(const float *mu1_sq, const float *mu2_sq, const float *mu1_mu2, const float *xx_filt, const float *yy_filt, const float *xy_filt, float *num, float *den, int w, int h)
Definition: vf_vif.c:108
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
VIFContext::nb_frames
uint64_t nb_frames
Definition: vf_vif.c:53
ff_framesync_init
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
Definition: framesync.c:86
VIFContext::ref_data
float * ref_data
Definition: vf_vif.c:48
set_meta
static void set_meta(AVDictionary **metadata, int chan, const char *key, const char *fmt, float val)
Definition: af_aspectralstats.c:183
FFFrameSyncIn::before
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
Definition: framesync.h:107
VIFContext::temp
float ** temp
Definition: vf_vif.c:47
PF
#define PF(suf)
framesync.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
vif_inputs
static const AVFilterPad vif_inputs[]
Definition: vf_vif.c:608
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
temp
else temp
Definition: vf_mcdeint.c:263
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
VIFContext::data_buf
float * data_buf[NUM_DATA_BUFS]
Definition: vf_vif.c:46
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
activate
static int activate(AVFilterContext *ctx)
Definition: vf_vif.c:578
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
d
d
Definition: ffmpeg_filter.c:368
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
FFFrameSyncIn::after
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
Definition: framesync.h:112
h
h
Definition: vp9dsp_template.c:2038
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:355
ff_framesync_dualinput_get
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Definition: framesync.c:390
vif_options
static const AVOption vif_options[]
Definition: vf_vif.c:58
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:450
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:144
ThreadData::filter
const float * filter
Definition: vf_vif.c:87
snprintf
#define snprintf
Definition: snprintf.h:34
VIFContext
Definition: vf_vif.c:38
FRAMESYNC_DEFINE_CLASS
FRAMESYNC_DEFINE_CLASS(vif, VIFContext, fs)
config_input_ref
static int config_input_ref(AVFilterLink *inlink)
Definition: vf_vif.c:480