FFmpeg
af_dynaudnorm.c
Go to the documentation of this file.
1 /*
2  * Dynamic Audio Normalizer
3  * Copyright (c) 2015 LoRd_MuldeR <mulder2@gmx.de>. Some rights reserved.
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Dynamic Audio Normalizer
25  */
26 
27 #include <float.h>
28 
29 #include "libavutil/avassert.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/opt.h"
34 
35 #define MIN_FILTER_SIZE 3
36 #define MAX_FILTER_SIZE 301
37 
38 #define FF_BUFQUEUE_SIZE (MAX_FILTER_SIZE + 1)
40 
41 #include "audio.h"
42 #include "avfilter.h"
43 #include "filters.h"
44 
45 static const char * const var_names[] = {
46  "ch", ///< the value of the current channel
47  "sn", ///< number of samples
48  "nb_channels",
49  "t", ///< timestamp expressed in seconds
50  "sr", ///< sample rate
51  "p", ///< peak value
52  NULL
53 };
54 
55 enum var_name {
63 };
64 
65 typedef struct local_gain {
66  double max_gain;
67  double threshold;
68 } local_gain;
69 
70 typedef struct cqueue {
71  double *elements;
72  int size;
73  int max_size;
75 } cqueue;
76 
78  const AVClass *class;
79 
80  struct FFBufQueue queue;
81 
82  int frame_len;
88  double overlap;
89  char *expr_str;
90 
91  double peak_value;
93  double target_rms;
95  double threshold;
99  double *weights;
100 
101  int channels;
103  int eof;
107 
112 
114 
116 
120 
121 typedef struct ThreadData {
122  AVFrame *in, *out;
123  int enabled;
124 } ThreadData;
125 
126 #define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x)
127 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
128 
129 static const AVOption dynaudnorm_options[] = {
130  { "framelen", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
131  { "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
132  { "gausssize", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
133  { "g", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
134  { "peak", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
135  { "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
136  { "maxgain", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
137  { "m", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
138  { "targetrms", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
139  { "r", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
140  { "coupling", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
141  { "n", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
142  { "correctdc", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
143  { "c", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
144  { "altboundary", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
145  { "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
146  { "compress", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
147  { "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
148  { "threshold", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
149  { "t", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
150  { "channels", "set channels to filter", OFFSET(channels_to_filter),AV_OPT_TYPE_STRING, {.str="all"}, 0, 0, FLAGS },
151  { "h", "set channels to filter", OFFSET(channels_to_filter),AV_OPT_TYPE_STRING, {.str="all"}, 0, 0, FLAGS },
152  { "overlap", "set the frame overlap", OFFSET(overlap), AV_OPT_TYPE_DOUBLE, {.dbl=.0}, 0.0, 1.0, FLAGS },
153  { "o", "set the frame overlap", OFFSET(overlap), AV_OPT_TYPE_DOUBLE, {.dbl=.0}, 0.0, 1.0, FLAGS },
154  { "curve", "set the custom peak mapping curve",OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
155  { "v", "set the custom peak mapping curve",OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
156  { NULL }
157 };
158 
159 AVFILTER_DEFINE_CLASS(dynaudnorm);
160 
162 {
164 
165  if (!(s->filter_size & 1)) {
166  av_log(ctx, AV_LOG_WARNING, "filter size %d is invalid. Changing to an odd value.\n", s->filter_size);
167  s->filter_size |= 1;
168  }
169 
170  return 0;
171 }
172 
173 static inline int frame_size(int sample_rate, int frame_len_msec)
174 {
175  const int frame_size = lrint((double)sample_rate * (frame_len_msec / 1000.0));
176  return frame_size + (frame_size % 2);
177 }
178 
179 static cqueue *cqueue_create(int size, int max_size)
180 {
181  cqueue *q;
182 
183  if (max_size < size)
184  return NULL;
185 
186  q = av_malloc(sizeof(cqueue));
187  if (!q)
188  return NULL;
189 
190  q->max_size = max_size;
191  q->size = size;
192  q->nb_elements = 0;
193 
194  q->elements = av_malloc_array(max_size, sizeof(double));
195  if (!q->elements) {
196  av_free(q);
197  return NULL;
198  }
199 
200  return q;
201 }
202 
203 static void cqueue_free(cqueue *q)
204 {
205  if (q)
206  av_free(q->elements);
207  av_free(q);
208 }
209 
210 static int cqueue_size(cqueue *q)
211 {
212  return q->nb_elements;
213 }
214 
215 static int cqueue_empty(cqueue *q)
216 {
217  return q->nb_elements <= 0;
218 }
219 
220 static int cqueue_enqueue(cqueue *q, double element)
221 {
223 
224  q->elements[q->nb_elements] = element;
225  q->nb_elements++;
226 
227  return 0;
228 }
229 
230 static double cqueue_peek(cqueue *q, int index)
231 {
232  av_assert2(index < q->nb_elements);
233  return q->elements[index];
234 }
235 
236 static int cqueue_dequeue(cqueue *q, double *element)
237 {
239 
240  *element = q->elements[0];
241  memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
242  q->nb_elements--;
243 
244  return 0;
245 }
246 
247 static int cqueue_pop(cqueue *q)
248 {
250 
251  memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
252  q->nb_elements--;
253 
254  return 0;
255 }
256 
257 static void cqueue_resize(cqueue *q, int new_size)
258 {
259  av_assert2(q->max_size >= new_size);
260  av_assert2(MIN_FILTER_SIZE <= new_size);
261 
262  if (new_size > q->nb_elements) {
263  const int side = (new_size - q->nb_elements) / 2;
264 
265  memmove(q->elements + side, q->elements, sizeof(double) * q->nb_elements);
266  for (int i = 0; i < side; i++)
267  q->elements[i] = q->elements[side];
268  q->nb_elements = new_size - 1 - side;
269  } else {
270  int count = (q->size - new_size + 1) / 2;
271 
272  while (count-- > 0)
273  cqueue_pop(q);
274  }
275 
276  q->size = new_size;
277 }
278 
280 {
281  double total_weight = 0.0;
282  const double sigma = (((s->filter_size / 2.0) - 1.0) / 3.0) + (1.0 / 3.0);
283  double adjust;
284 
285  // Pre-compute constants
286  const int offset = s->filter_size / 2;
287  const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
288  const double c2 = 2.0 * sigma * sigma;
289 
290  // Compute weights
291  for (int i = 0; i < s->filter_size; i++) {
292  const int x = i - offset;
293 
294  s->weights[i] = c1 * exp(-x * x / c2);
295  total_weight += s->weights[i];
296  }
297 
298  // Adjust weights
299  adjust = 1.0 / total_weight;
300  for (int i = 0; i < s->filter_size; i++) {
301  s->weights[i] *= adjust;
302  }
303 }
304 
306 {
308 
309  av_freep(&s->prev_amplification_factor);
310  av_freep(&s->dc_correction_value);
311  av_freep(&s->compress_threshold);
312 
313  for (int c = 0; c < s->channels; c++) {
314  if (s->gain_history_original)
315  cqueue_free(s->gain_history_original[c]);
316  if (s->gain_history_minimum)
317  cqueue_free(s->gain_history_minimum[c]);
318  if (s->gain_history_smoothed)
319  cqueue_free(s->gain_history_smoothed[c]);
320  if (s->threshold_history)
321  cqueue_free(s->threshold_history[c]);
322  }
323 
324  av_freep(&s->gain_history_original);
325  av_freep(&s->gain_history_minimum);
326  av_freep(&s->gain_history_smoothed);
327  av_freep(&s->threshold_history);
328 
329  cqueue_free(s->is_enabled);
330  s->is_enabled = NULL;
331 
332  av_freep(&s->weights);
333 
334  av_channel_layout_uninit(&s->ch_layout);
335 
336  ff_bufqueue_discard_all(&s->queue);
337 
338  av_frame_free(&s->window);
339  av_expr_free(s->expr);
340  s->expr = NULL;
341 }
342 
344 {
345  AVFilterContext *ctx = inlink->dst;
347  int ret = 0;
348 
349  uninit(ctx);
350 
351  s->channels = inlink->ch_layout.nb_channels;
352  s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
353  av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len);
354 
355  s->prev_amplification_factor = av_malloc_array(inlink->ch_layout.nb_channels, sizeof(*s->prev_amplification_factor));
356  s->dc_correction_value = av_calloc(inlink->ch_layout.nb_channels, sizeof(*s->dc_correction_value));
357  s->compress_threshold = av_calloc(inlink->ch_layout.nb_channels, sizeof(*s->compress_threshold));
358  s->gain_history_original = av_calloc(inlink->ch_layout.nb_channels, sizeof(*s->gain_history_original));
359  s->gain_history_minimum = av_calloc(inlink->ch_layout.nb_channels, sizeof(*s->gain_history_minimum));
360  s->gain_history_smoothed = av_calloc(inlink->ch_layout.nb_channels, sizeof(*s->gain_history_smoothed));
361  s->threshold_history = av_calloc(inlink->ch_layout.nb_channels, sizeof(*s->threshold_history));
362  s->weights = av_malloc_array(MAX_FILTER_SIZE, sizeof(*s->weights));
363  s->is_enabled = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
364  if (!s->prev_amplification_factor || !s->dc_correction_value ||
365  !s->compress_threshold ||
366  !s->gain_history_original || !s->gain_history_minimum ||
367  !s->gain_history_smoothed || !s->threshold_history ||
368  !s->is_enabled || !s->weights)
369  return AVERROR(ENOMEM);
370 
371  for (int c = 0; c < inlink->ch_layout.nb_channels; c++) {
372  s->prev_amplification_factor[c] = 1.0;
373 
374  s->gain_history_original[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
375  s->gain_history_minimum[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
376  s->gain_history_smoothed[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
377  s->threshold_history[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
378 
379  if (!s->gain_history_original[c] || !s->gain_history_minimum[c] ||
380  !s->gain_history_smoothed[c] || !s->threshold_history[c])
381  return AVERROR(ENOMEM);
382  }
383 
385 
386  s->window = ff_get_audio_buffer(ctx->outputs[0], s->frame_len * 2);
387  if (!s->window)
388  return AVERROR(ENOMEM);
389  s->sample_advance = FFMAX(1, lrint(s->frame_len * (1. - s->overlap)));
390 
391  s->var_values[VAR_SR] = inlink->sample_rate;
392  s->var_values[VAR_NB_CHANNELS] = s->channels;
393 
394  if (s->expr_str)
395  ret = av_expr_parse(&s->expr, s->expr_str, var_names, NULL, NULL,
396  NULL, NULL, 0, ctx);
397  return ret;
398 }
399 
400 static inline double fade(double prev, double next, int pos, int length)
401 {
402  const double step_size = 1.0 / length;
403  const double f0 = 1.0 - (step_size * (pos + 1.0));
404  const double f1 = 1.0 - f0;
405  return f0 * prev + f1 * next;
406 }
407 
408 static inline double pow_2(const double value)
409 {
410  return value * value;
411 }
412 
413 static inline double bound(const double threshold, const double val)
414 {
415  const double CONST = 0.8862269254527580136490837416705725913987747280611935; //sqrt(PI) / 2.0
416  return erf(CONST * (val / threshold)) * threshold;
417 }
418 
420 {
421  double max = DBL_EPSILON;
422 
423  if (channel == -1) {
424  for (int c = 0; c < frame->ch_layout.nb_channels; c++) {
425  double *data_ptr = (double *)frame->extended_data[c];
426 
427  for (int i = 0; i < frame->nb_samples; i++)
428  max = fmax(max, fabs(data_ptr[i]));
429  }
430  } else {
431  double *data_ptr = (double *)frame->extended_data[channel];
432 
433  for (int i = 0; i < frame->nb_samples; i++)
434  max = fmax(max, fabs(data_ptr[i]));
435  }
436 
437  return max;
438 }
439 
441 {
442  double rms_value = 0.0;
443 
444  if (channel == -1) {
445  for (int c = 0; c < frame->ch_layout.nb_channels; c++) {
446  const double *data_ptr = (double *)frame->extended_data[c];
447 
448  for (int i = 0; i < frame->nb_samples; i++) {
449  rms_value += pow_2(data_ptr[i]);
450  }
451  }
452 
453  rms_value /= frame->nb_samples * frame->ch_layout.nb_channels;
454  } else {
455  const double *data_ptr = (double *)frame->extended_data[channel];
456  for (int i = 0; i < frame->nb_samples; i++) {
457  rms_value += pow_2(data_ptr[i]);
458  }
459 
460  rms_value /= frame->nb_samples;
461  }
462 
463  return fmax(sqrt(rms_value), DBL_EPSILON);
464 }
465 
467  int channel)
468 {
469  const double peak_magnitude = find_peak_magnitude(frame, channel);
470  const double maximum_gain = s->peak_value / peak_magnitude;
471  const double rms_gain = s->target_rms > DBL_EPSILON ? (s->target_rms / compute_frame_rms(frame, channel)) : DBL_MAX;
472  double target_gain = DBL_MAX;
473  local_gain gain;
474 
475  if (s->expr_str) {
476  double var_values[VAR_VARS_NB];
477 
478  memcpy(var_values, s->var_values, sizeof(var_values));
479 
480  var_values[VAR_CH] = channel;
481  var_values[VAR_P] = peak_magnitude;
482 
483  target_gain = av_expr_eval(s->expr, var_values, s) / peak_magnitude;
484  }
485 
486  gain.threshold = peak_magnitude > s->threshold;
487  gain.max_gain = bound(s->max_amplification, fmin(target_gain, fmin(maximum_gain, rms_gain)));
488 
489  return gain;
490 }
491 
492 static double minimum_filter(cqueue *q)
493 {
494  double min = DBL_MAX;
495 
496  for (int i = 0; i < cqueue_size(q); i++) {
497  min = fmin(min, cqueue_peek(q, i));
498  }
499 
500  return min;
501 }
502 
504 {
505  const double *weights = s->weights;
506  double result = 0.0, tsum = 0.0;
507 
508  for (int i = 0; i < cqueue_size(q); i++) {
509  double tq_item = cqueue_peek(tq, i);
510  double q_item = cqueue_peek(q, i);
511 
512  tsum += tq_item * weights[i];
513  result += tq_item * weights[i] * q_item;
514  }
515 
516  if (tsum == 0.0)
517  result = 1.0;
518 
519  return result;
520 }
521 
523  local_gain gain)
524 {
525  if (cqueue_empty(s->gain_history_original[channel])) {
526  const int pre_fill_size = s->filter_size / 2;
527  const double initial_value = s->alt_boundary_mode ? gain.max_gain : fmin(1.0, gain.max_gain);
528 
529  s->prev_amplification_factor[channel] = initial_value;
530 
531  while (cqueue_size(s->gain_history_original[channel]) < pre_fill_size) {
532  cqueue_enqueue(s->gain_history_original[channel], initial_value);
533  cqueue_enqueue(s->threshold_history[channel], gain.threshold);
534  }
535  }
536 
537  cqueue_enqueue(s->gain_history_original[channel], gain.max_gain);
538 
539  while (cqueue_size(s->gain_history_original[channel]) >= s->filter_size) {
540  double minimum;
541 
542  if (cqueue_empty(s->gain_history_minimum[channel])) {
543  const int pre_fill_size = s->filter_size / 2;
544  double initial_value = s->alt_boundary_mode ? cqueue_peek(s->gain_history_original[channel], 0) : 1.0;
545  int input = pre_fill_size;
546 
547  while (cqueue_size(s->gain_history_minimum[channel]) < pre_fill_size) {
548  input++;
549  initial_value = fmin(initial_value, cqueue_peek(s->gain_history_original[channel], input));
550  cqueue_enqueue(s->gain_history_minimum[channel], initial_value);
551  }
552  }
553 
554  minimum = minimum_filter(s->gain_history_original[channel]);
555 
556  cqueue_enqueue(s->gain_history_minimum[channel], minimum);
557 
558  cqueue_enqueue(s->threshold_history[channel], gain.threshold);
559 
560  cqueue_pop(s->gain_history_original[channel]);
561  }
562 
563  while (cqueue_size(s->gain_history_minimum[channel]) >= s->filter_size) {
564  double smoothed, limit;
565 
566  smoothed = gaussian_filter(s, s->gain_history_minimum[channel], s->threshold_history[channel]);
567  limit = cqueue_peek(s->gain_history_original[channel], 0);
568  smoothed = fmin(smoothed, limit);
569 
570  cqueue_enqueue(s->gain_history_smoothed[channel], smoothed);
571 
572  cqueue_pop(s->gain_history_minimum[channel]);
573  cqueue_pop(s->threshold_history[channel]);
574  }
575 }
576 
577 static int update_gain_histories(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
578 {
581  const int channels = s->channels;
582  const int start = (channels * jobnr) / nb_jobs;
583  const int end = (channels * (jobnr+1)) / nb_jobs;
584 
585  for (int c = start; c < end; c++)
587 
588  return 0;
589 }
590 
591 static inline double update_value(double new, double old, double aggressiveness)
592 {
593  av_assert0((aggressiveness >= 0.0) && (aggressiveness <= 1.0));
594  return aggressiveness * new + (1.0 - aggressiveness) * old;
595 }
596 
598 {
600 
601  return av_channel_layout_index_from_channel(&s->ch_layout, channel) < 0;
602 }
603 
605 {
606  const double diff = 1.0 / frame->nb_samples;
607  int is_first_frame = cqueue_empty(s->gain_history_original[0]);
608 
609  for (int c = 0; c < s->channels; c++) {
610  const int bypass = bypass_channel(s, frame, c);
611  double *dst_ptr = (double *)frame->extended_data[c];
612  double current_average_value = 0.0;
613  double prev_value;
614 
615  for (int i = 0; i < frame->nb_samples; i++)
616  current_average_value += dst_ptr[i] * diff;
617 
618  prev_value = is_first_frame ? current_average_value : s->dc_correction_value[c];
619  s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1);
620 
621  for (int i = 0; i < frame->nb_samples && !bypass; i++) {
622  dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, frame->nb_samples);
623  }
624  }
625 }
626 
627 static double setup_compress_thresh(double threshold)
628 {
629  if ((threshold > DBL_EPSILON) && (threshold < (1.0 - DBL_EPSILON))) {
630  double current_threshold = threshold;
631  double step_size = 1.0;
632 
633  while (step_size > DBL_EPSILON) {
634  while ((llrint((current_threshold + step_size) * (UINT64_C(1) << 63)) >
635  llrint(current_threshold * (UINT64_C(1) << 63))) &&
636  (bound(current_threshold + step_size, 1.0) <= threshold)) {
637  current_threshold += step_size;
638  }
639 
640  step_size /= 2.0;
641  }
642 
643  return current_threshold;
644  } else {
645  return threshold;
646  }
647 }
648 
650  AVFrame *frame, int channel)
651 {
652  double variance = 0.0;
653 
654  if (channel == -1) {
655  for (int c = 0; c < s->channels; c++) {
656  const double *data_ptr = (double *)frame->extended_data[c];
657 
658  for (int i = 0; i < frame->nb_samples; i++) {
659  variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
660  }
661  }
662  variance /= (s->channels * frame->nb_samples) - 1;
663  } else {
664  const double *data_ptr = (double *)frame->extended_data[channel];
665 
666  for (int i = 0; i < frame->nb_samples; i++) {
667  variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
668  }
669  variance /= frame->nb_samples - 1;
670  }
671 
672  return fmax(sqrt(variance), DBL_EPSILON);
673 }
674 
676 {
677  int is_first_frame = cqueue_empty(s->gain_history_original[0]);
678 
679  if (s->channels_coupled) {
680  const double standard_deviation = compute_frame_std_dev(s, frame, -1);
681  const double current_threshold = fmin(1.0, s->compress_factor * standard_deviation);
682 
683  const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[0];
684  double prev_actual_thresh, curr_actual_thresh;
685  s->compress_threshold[0] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[0], (1.0/3.0));
686 
687  prev_actual_thresh = setup_compress_thresh(prev_value);
688  curr_actual_thresh = setup_compress_thresh(s->compress_threshold[0]);
689 
690  for (int c = 0; c < s->channels; c++) {
691  double *const dst_ptr = (double *)frame->extended_data[c];
692  const int bypass = bypass_channel(s, frame, c);
693 
694  if (bypass)
695  continue;
696 
697  for (int i = 0; i < frame->nb_samples; i++) {
698  const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
699  dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
700  }
701  }
702  } else {
703  for (int c = 0; c < s->channels; c++) {
704  const int bypass = bypass_channel(s, frame, c);
705  const double standard_deviation = compute_frame_std_dev(s, frame, c);
706  const double current_threshold = setup_compress_thresh(fmin(1.0, s->compress_factor * standard_deviation));
707  const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[c];
708  double prev_actual_thresh, curr_actual_thresh;
709  double *dst_ptr;
710 
711  s->compress_threshold[c] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[c], 1.0/3.0);
712 
713  prev_actual_thresh = setup_compress_thresh(prev_value);
714  curr_actual_thresh = setup_compress_thresh(s->compress_threshold[c]);
715 
716  dst_ptr = (double *)frame->extended_data[c];
717  for (int i = 0; i < frame->nb_samples && !bypass; i++) {
718  const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
719  dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
720  }
721  }
722  }
723 }
724 
726 {
727  FilterLink *outl = ff_filter_link(outlink);
730 
731  if (s->dc_correction || s->compress_factor > DBL_EPSILON) {
732  int ret;
733 
734  if (!av_frame_is_writable(*frame)) {
735  AVFrame *out = ff_get_audio_buffer(outlink, (*frame)->nb_samples);
736 
737  if (!out) {
739  return AVERROR(ENOMEM);
740  }
742  if (ret < 0) {
744  av_frame_free(&out);
745  return ret;
746  }
747  ret = av_frame_copy(out, *frame);
748  if (ret < 0) {
750  av_frame_free(&out);
751  return ret;
752  }
753 
755  *frame = out;
756  }
757  }
758 
759  if (s->dc_correction)
761 
762  if (s->compress_factor > DBL_EPSILON)
764 
765  if (s->frame_len != s->sample_advance) {
766  const int offset = s->frame_len - s->sample_advance;
767 
768  for (int c = 0; c < s->channels; c++) {
769  double *src = (double *)s->window->extended_data[c];
770 
771  memmove(src, &src[s->sample_advance], offset * sizeof(double));
772  memcpy(&src[offset], (*frame)->extended_data[c], (*frame)->nb_samples * sizeof(double));
773  memset(&src[offset + (*frame)->nb_samples], 0, (s->sample_advance - (*frame)->nb_samples) * sizeof(double));
774  }
775 
776  analyze_frame = s->window;
777  } else {
778  av_samples_copy(s->window->extended_data, (*frame)->extended_data, 0, 0,
779  FFMIN(s->frame_len, (*frame)->nb_samples), (*frame)->ch_layout.nb_channels, (*frame)->format);
780  analyze_frame = *frame;
781  }
782 
783  s->var_values[VAR_SN] = outl->sample_count_in;
784  s->var_values[VAR_T] = s->var_values[VAR_SN] * (double)1/outlink->sample_rate;
785 
786  if (s->channels_coupled) {
787  const local_gain gain = get_max_local_gain(s, analyze_frame, -1);
788  for (int c = 0; c < s->channels; c++)
789  update_gain_history(s, c, gain);
790  } else {
792  FFMIN(s->channels, ff_filter_get_nb_threads(ctx)));
793  }
794 
795  return 0;
796 }
797 
799  AVFrame *frame, int enabled, int c)
800 {
801  const int bypass = bypass_channel(s, frame, c);
802  const double *src_ptr = (const double *)in->extended_data[c];
803  double *dst_ptr = (double *)frame->extended_data[c];
804  double current_amplification_factor;
805 
806  cqueue_dequeue(s->gain_history_smoothed[c], &current_amplification_factor);
807 
808  for (int i = 0; i < frame->nb_samples && enabled && !bypass; i++) {
809  const double amplification_factor = fade(s->prev_amplification_factor[c],
810  current_amplification_factor, i,
811  frame->nb_samples);
812 
813  dst_ptr[i] = src_ptr[i] * amplification_factor;
814  }
815 
816  s->prev_amplification_factor[c] = current_amplification_factor;
817 }
818 
819 static int amplify_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
820 {
822  ThreadData *td = arg;
823  AVFrame *out = td->out;
824  AVFrame *in = td->in;
825  const int enabled = td->enabled;
826  const int channels = s->channels;
827  const int start = (channels * jobnr) / nb_jobs;
828  const int end = (channels * (jobnr+1)) / nb_jobs;
829 
830  for (int ch = start; ch < end; ch++)
831  amplify_channel(s, in, out, enabled, ch);
832 
833  return 0;
834 }
835 
837 {
838  AVFilterContext *ctx = inlink->dst;
840  AVFilterLink *outlink = ctx->outputs[0];
841  ThreadData td;
842  int ret;
843 
844  while (((s->queue.available >= s->filter_size) ||
845  (s->eof && s->queue.available)) &&
846  !cqueue_empty(s->gain_history_smoothed[0])) {
847  AVFrame *in = ff_bufqueue_get(&s->queue);
848  AVFrame *out;
849  double is_enabled;
850 
851  cqueue_dequeue(s->is_enabled, &is_enabled);
852 
853  if (av_frame_is_writable(in)) {
854  out = in;
855  } else {
856  out = ff_get_audio_buffer(outlink, in->nb_samples);
857  if (!out) {
858  av_frame_free(&in);
859  return AVERROR(ENOMEM);
860  }
862  }
863 
864  td.in = in;
865  td.out = out;
866  td.enabled = is_enabled > 0.;
868  FFMIN(s->channels, ff_filter_get_nb_threads(ctx)));
869 
870  s->pts = out->pts + av_rescale_q(out->nb_samples, av_make_q(1, outlink->sample_rate),
871  outlink->time_base);
872  if (out != in)
873  av_frame_free(&in);
874  ret = ff_filter_frame(outlink, out);
875  if (ret < 0)
876  return ret;
877  }
878 
879  ret = analyze_frame(ctx, outlink, &in);
880  if (ret < 0)
881  return ret;
882  if (!s->eof) {
883  ff_bufqueue_add(ctx, &s->queue, in);
884  cqueue_enqueue(s->is_enabled, !ctx->is_disabled);
885  } else {
886  av_frame_free(&in);
887  }
888 
889  return 1;
890 }
891 
893  AVFilterLink *outlink)
894 {
895  AVFrame *out = ff_get_audio_buffer(outlink, s->sample_advance);
896 
897  if (!out)
898  return AVERROR(ENOMEM);
899 
900  for (int c = 0; c < s->channels; c++) {
901  double *dst_ptr = (double *)out->extended_data[c];
902 
903  for (int i = 0; i < out->nb_samples; i++) {
904  dst_ptr[i] = s->alt_boundary_mode ? DBL_EPSILON : ((s->target_rms > DBL_EPSILON) ? fmin(s->peak_value, s->target_rms) : s->peak_value);
905  if (s->dc_correction) {
906  dst_ptr[i] *= ((i % 2) == 1) ? -1 : 1;
907  dst_ptr[i] += s->dc_correction_value[c];
908  }
909  }
910  }
911 
912  return filter_frame(inlink, out);
913 }
914 
915 static int flush(AVFilterLink *outlink)
916 {
917  AVFilterContext *ctx = outlink->src;
918  AVFilterLink *inlink = ctx->inputs[0];
920 
921  while (s->eof && cqueue_empty(s->gain_history_smoothed[0])) {
922  for (int c = 0; c < s->channels; c++)
923  update_gain_history(s, c, (local_gain){ cqueue_peek(s->gain_history_original[c], 0), 1.0});
924  }
925 
926  return flush_buffer(s, inlink, outlink);
927 }
928 
930 {
931  AVFilterLink *inlink = ctx->inputs[0];
932  AVFilterLink *outlink = ctx->outputs[0];
934  AVFrame *in = NULL;
935  int ret = 0, status;
936  int64_t pts;
937 
938  ret = av_channel_layout_copy(&s->ch_layout, &inlink->ch_layout);
939  if (ret < 0)
940  return ret;
941  if (strcmp(s->channels_to_filter, "all"))
942  av_channel_layout_from_string(&s->ch_layout, s->channels_to_filter);
943 
945 
946  if (!s->eof) {
947  ret = ff_inlink_consume_samples(inlink, s->sample_advance, s->sample_advance, &in);
948  if (ret < 0)
949  return ret;
950  if (ret > 0) {
951  ret = filter_frame(inlink, in);
952  if (ret <= 0)
953  return ret;
954  }
955 
956  if (ff_inlink_check_available_samples(inlink, s->sample_advance) > 0) {
958  return 0;
959  }
960  }
961 
962  if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
963  if (status == AVERROR_EOF)
964  s->eof = 1;
965  }
966 
967  if (s->eof && s->queue.available)
968  return flush(outlink);
969 
970  if (s->eof && !s->queue.available) {
971  ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
972  return 0;
973  }
974 
975  if (!s->eof)
977 
978  return FFERROR_NOT_READY;
979 }
980 
981 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
982  char *res, int res_len, int flags)
983 {
985  AVFilterLink *inlink = ctx->inputs[0];
986  int prev_filter_size = s->filter_size;
987  int ret;
988 
989  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
990  if (ret < 0)
991  return ret;
992 
993  s->filter_size |= 1;
994  if (prev_filter_size != s->filter_size) {
996 
997  for (int c = 0; c < s->channels; c++) {
998  cqueue_resize(s->gain_history_original[c], s->filter_size);
999  cqueue_resize(s->gain_history_minimum[c], s->filter_size);
1000  cqueue_resize(s->threshold_history[c], s->filter_size);
1001  }
1002  }
1003 
1004  s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
1005  s->sample_advance = FFMAX(1, lrint(s->frame_len * (1. - s->overlap)));
1006  if (s->expr_str) {
1007  ret = av_expr_parse(&s->expr, s->expr_str, var_names, NULL, NULL,
1008  NULL, NULL, 0, ctx);
1009  if (ret < 0)
1010  return ret;
1011  }
1012  return 0;
1013 }
1014 
1016  {
1017  .name = "default",
1018  .type = AVMEDIA_TYPE_AUDIO,
1019  .config_props = config_input,
1020  },
1021 };
1022 
1024  .name = "dynaudnorm",
1025  .description = NULL_IF_CONFIG_SMALL("Dynamic Audio Normalizer."),
1026  .priv_size = sizeof(DynamicAudioNormalizerContext),
1027  .init = init,
1028  .uninit = uninit,
1029  .activate = activate,
1033  .priv_class = &dynaudnorm_class,
1036  .process_command = process_command,
1037 };
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_dynaudnorm.c:343
av_samples_copy
int av_samples_copy(uint8_t *const *dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:222
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:98
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
flush_buffer
static int flush_buffer(DynamicAudioNormalizerContext *s, AVFilterLink *inlink, AVFilterLink *outlink)
Definition: af_dynaudnorm.c:892
OFFSET
#define OFFSET(x)
Definition: af_dynaudnorm.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
var_name
var_name
Definition: noise.c:47
VAR_P
@ VAR_P
Definition: af_dynaudnorm.c:61
out
FILE * out
Definition: movenc.c:55
CONST
#define CONST(name, help, val, u)
Definition: vf_bwdif.c:189
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1062
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
DynamicAudioNormalizerContext::expr_str
char * expr_str
Definition: af_dynaudnorm.c:89
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
DynamicAudioNormalizerContext::overlap
double overlap
Definition: af_dynaudnorm.c:88
int64_t
long long int64_t
Definition: coverity.c:34
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dynaudnorm)
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
MAX_FILTER_SIZE
#define MAX_FILTER_SIZE
Definition: af_dynaudnorm.c:36
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:161
av_channel_layout_channel_from_index
enum AVChannel av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx)
Get the channel with the given index in a channel layout.
Definition: channel_layout.c:671
AVOption
AVOption.
Definition: opt.h:429
DynamicAudioNormalizerContext::dc_correction_value
double * dc_correction_value
Definition: af_dynaudnorm.c:97
VAR_CH
@ VAR_CH
Definition: af_dynaudnorm.c:56
float.h
cqueue_resize
static void cqueue_resize(cqueue *q, int new_size)
Definition: af_dynaudnorm.c:257
max
#define max(a, b)
Definition: cuda_runtime.h:33
DynamicAudioNormalizerContext::window
AVFrame * window
Definition: af_dynaudnorm.c:115
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
c1
static const uint64_t c1
Definition: murmur3.c:52
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:155
cqueue::max_size
int max_size
Definition: af_dynaudnorm.c:73
avfilter_af_dynaudnorm_inputs
static const AVFilterPad avfilter_af_dynaudnorm_inputs[]
Definition: af_dynaudnorm.c:1015
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:434
amplify_channels
static int amplify_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_dynaudnorm.c:819
find_peak_magnitude
static double find_peak_magnitude(AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:419
ff_bufqueue_get
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
Definition: bufferqueue.h:98
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:710
DynamicAudioNormalizerContext
Definition: af_dynaudnorm.c:77
local_gain
Definition: af_dynaudnorm.c:65
VAR_NB_CHANNELS
@ VAR_NB_CHANNELS
Definition: af_dynaudnorm.c:58
cqueue::size
int size
Definition: af_dynaudnorm.c:72
val
static double val(void *priv, double ch)
Definition: aeval.c:77
DynamicAudioNormalizerContext::channels_to_filter
char * channels_to_filter
Definition: af_dynaudnorm.c:104
cqueue_create
static cqueue * cqueue_create(int size, int max_size)
Definition: af_dynaudnorm.c:179
pts
static int64_t pts
Definition: transcode_aac.c:644
activate
static int activate(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:929
update_gain_histories
static int update_gain_histories(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_dynaudnorm.c:577
update_value
static double update_value(double new, double old, double aggressiveness)
Definition: af_dynaudnorm.c:591
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:358
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
DynamicAudioNormalizerContext::is_enabled
cqueue * is_enabled
Definition: af_dynaudnorm.c:113
DynamicAudioNormalizerContext::queue
struct FFBufQueue queue
Definition: af_dynaudnorm.c:80
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
cqueue::nb_elements
int nb_elements
Definition: af_dynaudnorm.c:74
ff_inlink_check_available_samples
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1472
av_cold
#define av_cold
Definition: attributes.h:90
frame_size
static int frame_size(int sample_rate, int frame_len_msec)
Definition: af_dynaudnorm.c:173
minimum_filter
static double minimum_filter(cqueue *q)
Definition: af_dynaudnorm.c:492
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:424
s
#define s(width, name)
Definition: cbs_vp9.c:198
DynamicAudioNormalizerContext::ch_layout
AVChannelLayout ch_layout
Definition: af_dynaudnorm.c:105
cqueue_empty
static int cqueue_empty(cqueue *q)
Definition: af_dynaudnorm.c:215
adjust
static int adjust(int x, int size)
Definition: mobiclip.c:513
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Underlying C type is double.
Definition: opt.h:267
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
filters.h
DynamicAudioNormalizerContext::channels
int channels
Definition: af_dynaudnorm.c:101
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
channels
channels
Definition: aptx.h:31
copysign
static av_always_inline double copysign(double x, double y)
Definition: libm.h:68
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:792
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVExpr
Definition: eval.c:158
cqueue_size
static int cqueue_size(cqueue *q)
Definition: af_dynaudnorm.c:210
DynamicAudioNormalizerContext::weights
double * weights
Definition: af_dynaudnorm.c:99
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:305
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
FLAGS
#define FLAGS
Definition: af_dynaudnorm.c:127
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
DynamicAudioNormalizerContext::peak_value
double peak_value
Definition: af_dynaudnorm.c:91
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1511
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:713
get_max_local_gain
static local_gain get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:466
pow_2
static double pow_2(const double value)
Definition: af_dynaudnorm.c:408
perform_dc_correction
static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:604
ff_bufqueue_discard_all
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
Definition: bufferqueue.h:111
flush
static int flush(AVFilterLink *outlink)
Definition: af_dynaudnorm.c:915
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:34
double
double
Definition: af_crystalizer.c:132
DynamicAudioNormalizerContext::frame_len_msec
int frame_len_msec
Definition: af_dynaudnorm.c:83
exp
int8_t exp
Definition: eval.c:73
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1438
DynamicAudioNormalizerContext::threshold_history
cqueue ** threshold_history
Definition: af_dynaudnorm.c:111
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
analyze_frame
static int analyze_frame(AVFilterContext *ctx, AVFilterLink *outlink, AVFrame **frame)
Definition: af_dynaudnorm.c:725
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
cqueue_enqueue
static int cqueue_enqueue(cqueue *q, double element)
Definition: af_dynaudnorm.c:220
eval.h
FILTER_SINGLE_SAMPLEFMT
#define FILTER_SINGLE_SAMPLEFMT(sample_fmt_)
Definition: filters.h:255
bufferqueue.h
compute_frame_std_dev
static double compute_frame_std_dev(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:649
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
DynamicAudioNormalizerContext::target_rms
double target_rms
Definition: af_dynaudnorm.c:93
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:317
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:1003
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
DynamicAudioNormalizerContext::prev_amplification_factor
double * prev_amplification_factor
Definition: af_dynaudnorm.c:96
DynamicAudioNormalizerContext::var_values
double var_values[VAR_VARS_NB]
Definition: af_dynaudnorm.c:118
fmin
double fmin(double, double)
size
int size
Definition: twinvq_data.h:10344
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:649
cqueue_pop
static int cqueue_pop(cqueue *q)
Definition: af_dynaudnorm.c:247
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:901
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
cqueue_free
static void cqueue_free(cqueue *q)
Definition: af_dynaudnorm.c:203
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_dynaudnorm.c:981
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_dynaudnorm.c:836
ff_af_dynaudnorm
const AVFilter ff_af_dynaudnorm
Definition: af_dynaudnorm.c:1023
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
ff_bufqueue_add
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
Definition: bufferqueue.h:71
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
M_PI
#define M_PI
Definition: mathematics.h:67
var_names
static const char *const var_names[]
Definition: af_dynaudnorm.c:45
AVChannel
AVChannel
Definition: channel_layout.h:47
DynamicAudioNormalizerContext::compress_threshold
double * compress_threshold
Definition: af_dynaudnorm.c:98
update_gain_history
static void update_gain_history(DynamicAudioNormalizerContext *s, int channel, local_gain gain)
Definition: af_dynaudnorm.c:522
DynamicAudioNormalizerContext::filter_size
int filter_size
Definition: af_dynaudnorm.c:84
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
DynamicAudioNormalizerContext::pts
int64_t pts
Definition: af_dynaudnorm.c:106
av_channel_layout_from_string
int av_channel_layout_from_string(AVChannelLayout *channel_layout, const char *str)
Initialize a channel layout from a given string description.
Definition: channel_layout.c:310
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:469
DynamicAudioNormalizerContext::threshold
double threshold
Definition: af_dynaudnorm.c:95
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
FFBufQueue
Structure holding the queue.
Definition: bufferqueue.h:49
DynamicAudioNormalizerContext::dc_correction
int dc_correction
Definition: af_dynaudnorm.c:85
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:450
VAR_T
@ VAR_T
Definition: af_dynaudnorm.c:59
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
weights
static const int weights[]
Definition: hevc_pel.c:32
DynamicAudioNormalizerContext::gain_history_minimum
cqueue ** gain_history_minimum
Definition: af_dynaudnorm.c:109
VAR_SN
@ VAR_SN
Definition: af_dynaudnorm.c:57
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:841
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
gaussian_filter
static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q, cqueue *tq)
Definition: af_dynaudnorm.c:503
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
erf
static double erf(double z)
erf function Algorithm taken from the Boost project, source: http://www.boost.org/doc/libs/1_46_1/boo...
Definition: libm.h:121
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
AVFilter
Filter definition.
Definition: avfilter.h:201
bound
static double bound(const double threshold, const double val)
Definition: af_dynaudnorm.c:413
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
amplify_channel
static void amplify_channel(DynamicAudioNormalizerContext *s, AVFrame *in, AVFrame *frame, int enabled, int c)
Definition: af_dynaudnorm.c:798
cqueue_peek
static double cqueue_peek(cqueue *q, int index)
Definition: af_dynaudnorm.c:230
cqueue::elements
double * elements
Definition: af_dynaudnorm.c:71
pos
unsigned int pos
Definition: spdifenc.c:414
compute_frame_rms
static double compute_frame_rms(AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:440
cqueue
Definition: af_dynaudnorm.c:70
fmax
double fmax(double, double)
init_gaussian_filter
static void init_gaussian_filter(DynamicAudioNormalizerContext *s)
Definition: af_dynaudnorm.c:279
c2
static const uint64_t c2
Definition: murmur3.c:53
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
DynamicAudioNormalizerContext::sample_advance
int sample_advance
Definition: af_dynaudnorm.c:102
channel_layout.h
dynaudnorm_options
static const AVOption dynaudnorm_options[]
Definition: af_dynaudnorm.c:129
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1667
DynamicAudioNormalizerContext::gain_history_original
cqueue ** gain_history_original
Definition: af_dynaudnorm.c:108
av_channel_layout_index_from_channel
int av_channel_layout_index_from_channel(const AVChannelLayout *channel_layout, enum AVChannel channel)
Get the index of a given channel in a channel layout.
Definition: channel_layout.c:711
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
DynamicAudioNormalizerContext::alt_boundary_mode
int alt_boundary_mode
Definition: af_dynaudnorm.c:87
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:440
VAR_SR
@ VAR_SR
Definition: af_dynaudnorm.c:60
DynamicAudioNormalizerContext::compress_factor
double compress_factor
Definition: af_dynaudnorm.c:94
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
MIN_FILTER_SIZE
#define MIN_FILTER_SIZE
Definition: af_dynaudnorm.c:35
DynamicAudioNormalizerContext::frame_len
int frame_len
Definition: af_dynaudnorm.c:82
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:447
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:152
DynamicAudioNormalizerContext::eof
int eof
Definition: af_dynaudnorm.c:103
mem.h
audio.h
local_gain::max_gain
double max_gain
Definition: af_dynaudnorm.c:66
llrint
#define llrint(x)
Definition: libm.h:394
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
bypass_channel
static int bypass_channel(DynamicAudioNormalizerContext *s, AVFrame *frame, int ch)
Definition: af_dynaudnorm.c:597
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
DynamicAudioNormalizerContext::channels_coupled
int channels_coupled
Definition: af_dynaudnorm.c:86
fade
static double fade(double prev, double next, int pos, int length)
Definition: af_dynaudnorm.c:400
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:190
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
DynamicAudioNormalizerContext::expr
AVExpr * expr
Definition: af_dynaudnorm.c:117
VAR_VARS_NB
@ VAR_VARS_NB
Definition: af_dynaudnorm.c:62
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
perform_compression
static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:675
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
setup_compress_thresh
static double setup_compress_thresh(double threshold)
Definition: af_dynaudnorm.c:627
DynamicAudioNormalizerContext::max_amplification
double max_amplification
Definition: af_dynaudnorm.c:92
DynamicAudioNormalizerContext::gain_history_smoothed
cqueue ** gain_history_smoothed
Definition: af_dynaudnorm.c:110
src
#define src
Definition: vp8dsp.c:248
cqueue_dequeue
static int cqueue_dequeue(cqueue *q, double *element)
Definition: af_dynaudnorm.c:236
channel
channel
Definition: ebur128.h:39
ThreadData::enabled
int enabled
Definition: af_dynaudnorm.c:123
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:239
local_gain::threshold
double threshold
Definition: af_dynaudnorm.c:67
min
float min
Definition: vorbis_enc_data.h:429