FFmpeg
af_afade.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * fade audio filter
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/opt.h"
29 #include "audio.h"
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "internal.h"
33 
34 typedef struct AudioFadeContext {
35  const AVClass *class;
36  int type;
37  int curve, curve2;
38  int64_t nb_samples;
39  int64_t start_sample;
40  int64_t duration;
41  int64_t start_time;
42  double silence;
43  double unity;
44  int overlap;
45  int status[2];
47  int64_t pts;
48 
49  void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
50  int nb_samples, int channels, int direction,
51  int64_t start, int64_t range, int curve,
52  double silence, double unity);
53  void (*scale_samples)(uint8_t **dst, uint8_t * const *src,
54  int nb_samples, int channels, double unity);
55  void (*crossfade_samples)(uint8_t **dst, uint8_t * const *cf0,
56  uint8_t * const *cf1,
57  int nb_samples, int channels,
58  int curve0, int curve1);
60 
62 
63 #define OFFSET(x) offsetof(AudioFadeContext, x)
64 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
65 #define TFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
66 
67  static const enum AVSampleFormat sample_fmts[] = {
73  };
74 
75 static double fade_gain(int curve, int64_t index, int64_t range, double silence, double unity)
76 {
77 #define CUBE(a) ((a)*(a)*(a))
78  double gain;
79 
80  gain = av_clipd(1.0 * index / range, 0, 1.0);
81 
82  switch (curve) {
83  case QSIN:
84  gain = sin(gain * M_PI / 2.0);
85  break;
86  case IQSIN:
87  /* 0.6... = 2 / M_PI */
88  gain = 0.6366197723675814 * asin(gain);
89  break;
90  case ESIN:
91  gain = 1.0 - cos(M_PI / 4.0 * (CUBE(2.0*gain - 1) + 1));
92  break;
93  case HSIN:
94  gain = (1.0 - cos(gain * M_PI)) / 2.0;
95  break;
96  case IHSIN:
97  /* 0.3... = 1 / M_PI */
98  gain = 0.3183098861837907 * acos(1 - 2 * gain);
99  break;
100  case EXP:
101  /* -11.5... = 5*ln(0.1) */
102  gain = exp(-11.512925464970227 * (1 - gain));
103  break;
104  case LOG:
105  gain = av_clipd(1 + 0.2 * log10(gain), 0, 1.0);
106  break;
107  case PAR:
108  gain = 1 - sqrt(1 - gain);
109  break;
110  case IPAR:
111  gain = (1 - (1 - gain) * (1 - gain));
112  break;
113  case QUA:
114  gain *= gain;
115  break;
116  case CUB:
117  gain = CUBE(gain);
118  break;
119  case SQU:
120  gain = sqrt(gain);
121  break;
122  case CBR:
123  gain = cbrt(gain);
124  break;
125  case DESE:
126  gain = gain <= 0.5 ? cbrt(2 * gain) / 2: 1 - cbrt(2 * (1 - gain)) / 2;
127  break;
128  case DESI:
129  gain = gain <= 0.5 ? CUBE(2 * gain) / 2: 1 - CUBE(2 * (1 - gain)) / 2;
130  break;
131  case LOSI: {
132  const double a = 1. / (1. - 0.787) - 1;
133  double A = 1. / (1.0 + exp(0 -((gain-0.5) * a * 2.0)));
134  double B = 1. / (1.0 + exp(a));
135  double C = 1. / (1.0 + exp(0-a));
136  gain = (A - B) / (C - B);
137  }
138  break;
139  case SINC:
140  gain = gain >= 1.0 ? 1.0 : sin(M_PI * (1.0 - gain)) / (M_PI * (1.0 - gain));
141  break;
142  case ISINC:
143  gain = gain <= 0.0 ? 0.0 : 1.0 - sin(M_PI * gain) / (M_PI * gain);
144  break;
145  case QUAT:
146  gain = gain * gain * gain * gain;
147  break;
148  case QUATR:
149  gain = pow(gain, 0.25);
150  break;
151  case QSIN2:
152  gain = sin(gain * M_PI / 2.0) * sin(gain * M_PI / 2.0);
153  break;
154  case HSIN2:
155  gain = pow((1.0 - cos(gain * M_PI)) / 2.0, 2.0);
156  break;
157  case NONE:
158  gain = 1.0;
159  break;
160  }
161 
162  return silence + (unity - silence) * gain;
163 }
164 
165 #define FADE_PLANAR(name, type) \
166 static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
167  int nb_samples, int channels, int dir, \
168  int64_t start, int64_t range,int curve,\
169  double silence, double unity) \
170 { \
171  int i, c; \
172  \
173  for (i = 0; i < nb_samples; i++) { \
174  double gain = fade_gain(curve, start + i * dir,range,silence,unity);\
175  for (c = 0; c < channels; c++) { \
176  type *d = (type *)dst[c]; \
177  const type *s = (type *)src[c]; \
178  \
179  d[i] = s[i] * gain; \
180  } \
181  } \
182 }
183 
184 #define FADE(name, type) \
185 static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
186  int nb_samples, int channels, int dir, \
187  int64_t start, int64_t range, int curve, \
188  double silence, double unity) \
189 { \
190  type *d = (type *)dst[0]; \
191  const type *s = (type *)src[0]; \
192  int i, c, k = 0; \
193  \
194  for (i = 0; i < nb_samples; i++) { \
195  double gain = fade_gain(curve, start + i * dir,range,silence,unity);\
196  for (c = 0; c < channels; c++, k++) \
197  d[k] = s[k] * gain; \
198  } \
199 }
200 
201 FADE_PLANAR(dbl, double)
202 FADE_PLANAR(flt, float)
203 FADE_PLANAR(s16, int16_t)
204 FADE_PLANAR(s32, int32_t)
205 
206 FADE(dbl, double)
207 FADE(flt, float)
208 FADE(s16, int16_t)
209 FADE(s32, int32_t)
210 
211 #define SCALE_PLANAR(name, type) \
212 static void scale_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
213  int nb_samples, int channels, \
214  double gain) \
215 { \
216  int i, c; \
217  \
218  for (i = 0; i < nb_samples; i++) { \
219  for (c = 0; c < channels; c++) { \
220  type *d = (type *)dst[c]; \
221  const type *s = (type *)src[c]; \
222  \
223  d[i] = s[i] * gain; \
224  } \
225  } \
226 }
227 
228 #define SCALE(name, type) \
229 static void scale_samples_## name (uint8_t **dst, uint8_t * const *src, \
230  int nb_samples, int channels, double gain)\
231 { \
232  type *d = (type *)dst[0]; \
233  const type *s = (type *)src[0]; \
234  int i, c, k = 0; \
235  \
236  for (i = 0; i < nb_samples; i++) { \
237  for (c = 0; c < channels; c++, k++) \
238  d[k] = s[k] * gain; \
239  } \
240 }
241 
242 SCALE_PLANAR(dbl, double)
243 SCALE_PLANAR(flt, float)
244 SCALE_PLANAR(s16, int16_t)
245 SCALE_PLANAR(s32, int32_t)
246 
247 SCALE(dbl, double)
248 SCALE(flt, float)
249 SCALE(s16, int16_t)
250 SCALE(s32, int32_t)
251 
252 static int config_output(AVFilterLink *outlink)
253 {
254  AVFilterContext *ctx = outlink->src;
255  AudioFadeContext *s = ctx->priv;
256 
257  switch (outlink->format) {
258  case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl;
259  s->scale_samples = scale_samples_dbl;
260  break;
261  case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp;
262  s->scale_samples = scale_samples_dblp;
263  break;
264  case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt;
265  s->scale_samples = scale_samples_flt;
266  break;
267  case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp;
268  s->scale_samples = scale_samples_fltp;
269  break;
270  case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16;
271  s->scale_samples = scale_samples_s16;
272  break;
273  case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p;
274  s->scale_samples = scale_samples_s16p;
275  break;
276  case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32;
277  s->scale_samples = scale_samples_s32;
278  break;
279  case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p;
280  s->scale_samples = scale_samples_s32p;
281  break;
282  }
283 
284  if (s->duration)
285  s->nb_samples = av_rescale(s->duration, outlink->sample_rate, AV_TIME_BASE);
286  s->duration = 0;
287  if (s->start_time)
288  s->start_sample = av_rescale(s->start_time, outlink->sample_rate, AV_TIME_BASE);
289  s->start_time = 0;
290 
291  return 0;
292 }
293 
294 #if CONFIG_AFADE_FILTER
295 
296 static const AVOption afade_options[] = {
297  { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
298  { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
299  { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, TFLAGS, "type" },
300  { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, TFLAGS, "type" },
301  { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
302  { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
303  { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
304  { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
305  { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
306  { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
307  { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
308  { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
309  { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
310  { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
311  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, TFLAGS, "curve" },
312  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, TFLAGS, "curve" },
313  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, TFLAGS, "curve" },
314  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, TFLAGS, "curve" },
315  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, TFLAGS, "curve" },
316  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, TFLAGS, "curve" },
317  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, TFLAGS, "curve" },
318  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, TFLAGS, "curve" },
319  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, TFLAGS, "curve" },
320  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, TFLAGS, "curve" },
321  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, TFLAGS, "curve" },
322  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, TFLAGS, "curve" },
323  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, TFLAGS, "curve" },
324  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, TFLAGS, "curve" },
325  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, TFLAGS, "curve" },
326  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, TFLAGS, "curve" },
327  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, TFLAGS, "curve" },
328  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, TFLAGS, "curve" },
329  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, TFLAGS, "curve" },
330  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, TFLAGS, "curve" },
331  { "quat", "quartic", 0, AV_OPT_TYPE_CONST, {.i64 = QUAT }, 0, 0, TFLAGS, "curve" },
332  { "quatr", "quartic root", 0, AV_OPT_TYPE_CONST, {.i64 = QUATR}, 0, 0, TFLAGS, "curve" },
333  { "qsin2", "squared quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN2}, 0, 0, TFLAGS, "curve" },
334  { "hsin2", "squared half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN2}, 0, 0, TFLAGS, "curve" },
335  { "silence", "set the silence gain", OFFSET(silence), AV_OPT_TYPE_DOUBLE, {.dbl = 0 }, 0, 1, TFLAGS },
336  { "unity", "set the unity gain", OFFSET(unity), AV_OPT_TYPE_DOUBLE, {.dbl = 1 }, 0, 1, TFLAGS },
337  { NULL }
338 };
339 
340 AVFILTER_DEFINE_CLASS(afade);
341 
342 static av_cold int init(AVFilterContext *ctx)
343 {
344  AudioFadeContext *s = ctx->priv;
345 
346  if (INT64_MAX - s->nb_samples < s->start_sample)
347  return AVERROR(EINVAL);
348 
349  return 0;
350 }
351 
352 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
353 {
354  AudioFadeContext *s = inlink->dst->priv;
355  AVFilterLink *outlink = inlink->dst->outputs[0];
356  int nb_samples = buf->nb_samples;
357  AVFrame *out_buf;
358  int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
359 
360  if (s->unity == 1.0 &&
361  ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
362  ( s->type && (cur_sample + nb_samples < s->start_sample))))
363  return ff_filter_frame(outlink, buf);
364 
365  if (av_frame_is_writable(buf)) {
366  out_buf = buf;
367  } else {
368  out_buf = ff_get_audio_buffer(outlink, nb_samples);
369  if (!out_buf)
370  return AVERROR(ENOMEM);
371  av_frame_copy_props(out_buf, buf);
372  }
373 
374  if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
375  ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
376  if (s->silence == 0.) {
377  av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
378  out_buf->ch_layout.nb_channels, out_buf->format);
379  } else {
380  s->scale_samples(out_buf->extended_data, buf->extended_data,
381  nb_samples, buf->ch_layout.nb_channels,
382  s->silence);
383  }
384  } else if (( s->type && (cur_sample + nb_samples < s->start_sample)) ||
385  (!s->type && (s->start_sample + s->nb_samples < cur_sample))) {
386  s->scale_samples(out_buf->extended_data, buf->extended_data,
387  nb_samples, buf->ch_layout.nb_channels,
388  s->unity);
389  } else {
390  int64_t start;
391 
392  if (!s->type)
393  start = cur_sample - s->start_sample;
394  else
395  start = s->start_sample + s->nb_samples - cur_sample;
396 
397  s->fade_samples(out_buf->extended_data, buf->extended_data,
398  nb_samples, buf->ch_layout.nb_channels,
399  s->type ? -1 : 1, start,
400  s->nb_samples, s->curve, s->silence, s->unity);
401  }
402 
403  if (buf != out_buf)
404  av_frame_free(&buf);
405 
406  return ff_filter_frame(outlink, out_buf);
407 }
408 
409 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
410  char *res, int res_len, int flags)
411 {
412  int ret;
413 
414  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
415  if (ret < 0)
416  return ret;
417 
418  return config_output(ctx->outputs[0]);
419 }
420 
421 static const AVFilterPad avfilter_af_afade_inputs[] = {
422  {
423  .name = "default",
424  .type = AVMEDIA_TYPE_AUDIO,
425  .filter_frame = filter_frame,
426  },
427 };
428 
429 static const AVFilterPad avfilter_af_afade_outputs[] = {
430  {
431  .name = "default",
432  .type = AVMEDIA_TYPE_AUDIO,
433  .config_props = config_output,
434  },
435 };
436 
437 const AVFilter ff_af_afade = {
438  .name = "afade",
439  .description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
440  .priv_size = sizeof(AudioFadeContext),
441  .init = init,
442  FILTER_INPUTS(avfilter_af_afade_inputs),
443  FILTER_OUTPUTS(avfilter_af_afade_outputs),
445  .priv_class = &afade_class,
446  .process_command = process_command,
448 };
449 
450 #endif /* CONFIG_AFADE_FILTER */
451 
452 #if CONFIG_ACROSSFADE_FILTER
453 
454 static const AVOption acrossfade_options[] = {
455  { "nb_samples", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
456  { "ns", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
457  { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
458  { "d", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
459  { "overlap", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
460  { "o", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
461  { "curve1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
462  { "c1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
463  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, FLAGS, "curve" },
464  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
465  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
466  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
467  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
468  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
469  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, "curve" },
470  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
471  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
472  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
473  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
474  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
475  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, "curve" },
476  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, "curve" },
477  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, "curve" },
478  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, "curve" },
479  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, "curve" },
480  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, FLAGS, "curve" },
481  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, FLAGS, "curve" },
482  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, FLAGS, "curve" },
483  { "quat", "quartic", 0, AV_OPT_TYPE_CONST, {.i64 = QUAT }, 0, 0, FLAGS, "curve" },
484  { "quatr", "quartic root", 0, AV_OPT_TYPE_CONST, {.i64 = QUATR}, 0, 0, FLAGS, "curve" },
485  { "qsin2", "squared quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN2}, 0, 0, FLAGS, "curve" },
486  { "hsin2", "squared half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN2}, 0, 0, FLAGS, "curve" },
487  { "curve2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
488  { "c2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
489  { NULL }
490 };
491 
492 AVFILTER_DEFINE_CLASS(acrossfade);
493 
494 #define CROSSFADE_PLANAR(name, type) \
495 static void crossfade_samples_## name ##p(uint8_t **dst, uint8_t * const *cf0, \
496  uint8_t * const *cf1, \
497  int nb_samples, int channels, \
498  int curve0, int curve1) \
499 { \
500  int i, c; \
501  \
502  for (i = 0; i < nb_samples; i++) { \
503  double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples,0.,1.);\
504  double gain1 = fade_gain(curve1, i, nb_samples, 0., 1.); \
505  for (c = 0; c < channels; c++) { \
506  type *d = (type *)dst[c]; \
507  const type *s0 = (type *)cf0[c]; \
508  const type *s1 = (type *)cf1[c]; \
509  \
510  d[i] = s0[i] * gain0 + s1[i] * gain1; \
511  } \
512  } \
513 }
514 
515 #define CROSSFADE(name, type) \
516 static void crossfade_samples_## name (uint8_t **dst, uint8_t * const *cf0, \
517  uint8_t * const *cf1, \
518  int nb_samples, int channels, \
519  int curve0, int curve1) \
520 { \
521  type *d = (type *)dst[0]; \
522  const type *s0 = (type *)cf0[0]; \
523  const type *s1 = (type *)cf1[0]; \
524  int i, c, k = 0; \
525  \
526  for (i = 0; i < nb_samples; i++) { \
527  double gain0 = fade_gain(curve0, nb_samples - 1-i,nb_samples,0.,1.);\
528  double gain1 = fade_gain(curve1, i, nb_samples, 0., 1.); \
529  for (c = 0; c < channels; c++, k++) \
530  d[k] = s0[k] * gain0 + s1[k] * gain1; \
531  } \
532 }
533 
534 CROSSFADE_PLANAR(dbl, double)
535 CROSSFADE_PLANAR(flt, float)
536 CROSSFADE_PLANAR(s16, int16_t)
537 CROSSFADE_PLANAR(s32, int32_t)
538 
539 CROSSFADE(dbl, double)
540 CROSSFADE(flt, float)
541 CROSSFADE(s16, int16_t)
542 CROSSFADE(s32, int32_t)
543 
544 static int check_input(AVFilterLink *inlink)
545 {
546  const int queued_samples = ff_inlink_queued_samples(inlink);
547 
548  return ff_inlink_check_available_samples(inlink, queued_samples + 1) == 1;
549 }
550 
551 static int activate(AVFilterContext *ctx)
552 {
553  AudioFadeContext *s = ctx->priv;
554  AVFilterLink *outlink = ctx->outputs[0];
555  AVFrame *in = NULL, *out, *cf[2] = { NULL };
556  int ret = 0, nb_samples, status;
557  int64_t pts;
558 
560 
561  if (s->passthrough && s->status[0]) {
562  ret = ff_inlink_consume_frame(ctx->inputs[1], &in);
563  if (ret > 0) {
564  in->pts = s->pts;
565  s->pts += av_rescale_q(in->nb_samples,
566  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
567  return ff_filter_frame(outlink, in);
568  } else if (ret < 0) {
569  return ret;
570  } else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) {
571  ff_outlink_set_status(outlink, status, pts);
572  return 0;
573  } else if (!ret) {
574  if (ff_outlink_frame_wanted(outlink)) {
575  ff_inlink_request_frame(ctx->inputs[1]);
576  return 0;
577  }
578  }
579  }
580 
581  nb_samples = ff_inlink_queued_samples(ctx->inputs[0]);
582  if (nb_samples > s->nb_samples) {
583  nb_samples -= s->nb_samples;
584  s->passthrough = 1;
585  ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples, nb_samples, &in);
586  if (ret < 0)
587  return ret;
588  in->pts = s->pts;
589  s->pts += av_rescale_q(in->nb_samples,
590  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
591  return ff_filter_frame(outlink, in);
592  } else if (s->status[0] && nb_samples >= s->nb_samples &&
593  ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples) {
594  if (s->overlap) {
595  out = ff_get_audio_buffer(outlink, s->nb_samples);
596  if (!out)
597  return AVERROR(ENOMEM);
598 
599  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
600  if (ret < 0) {
601  av_frame_free(&out);
602  return ret;
603  }
604 
605  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
606  if (ret < 0) {
607  av_frame_free(&out);
608  return ret;
609  }
610 
611  s->crossfade_samples(out->extended_data, cf[0]->extended_data,
612  cf[1]->extended_data,
613  s->nb_samples, out->ch_layout.nb_channels,
614  s->curve, s->curve2);
615  out->pts = s->pts;
616  s->pts += av_rescale_q(s->nb_samples,
617  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
618  s->passthrough = 1;
619  av_frame_free(&cf[0]);
620  av_frame_free(&cf[1]);
621  return ff_filter_frame(outlink, out);
622  } else {
623  out = ff_get_audio_buffer(outlink, s->nb_samples);
624  if (!out)
625  return AVERROR(ENOMEM);
626 
627  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
628  if (ret < 0) {
629  av_frame_free(&out);
630  return ret;
631  }
632 
633  s->fade_samples(out->extended_data, cf[0]->extended_data, s->nb_samples,
634  outlink->ch_layout.nb_channels, -1, s->nb_samples - 1, s->nb_samples, s->curve, 0., 1.);
635  out->pts = s->pts;
636  s->pts += av_rescale_q(s->nb_samples,
637  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
638  av_frame_free(&cf[0]);
639  ret = ff_filter_frame(outlink, out);
640  if (ret < 0)
641  return ret;
642 
643  out = ff_get_audio_buffer(outlink, s->nb_samples);
644  if (!out)
645  return AVERROR(ENOMEM);
646 
647  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
648  if (ret < 0) {
649  av_frame_free(&out);
650  return ret;
651  }
652 
653  s->fade_samples(out->extended_data, cf[1]->extended_data, s->nb_samples,
654  outlink->ch_layout.nb_channels, 1, 0, s->nb_samples, s->curve2, 0., 1.);
655  out->pts = s->pts;
656  s->pts += av_rescale_q(s->nb_samples,
657  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
658  s->passthrough = 1;
659  av_frame_free(&cf[1]);
660  return ff_filter_frame(outlink, out);
661  }
662  } else if (ff_outlink_frame_wanted(outlink)) {
663  if (!s->status[0] && check_input(ctx->inputs[0]))
664  s->status[0] = AVERROR_EOF;
665  s->passthrough = !s->status[0];
666  if (check_input(ctx->inputs[1])) {
667  s->status[1] = AVERROR_EOF;
669  return 0;
670  }
671  if (!s->status[0])
672  ff_inlink_request_frame(ctx->inputs[0]);
673  else
674  ff_inlink_request_frame(ctx->inputs[1]);
675  return 0;
676  }
677 
678  return ret;
679 }
680 
681 static int acrossfade_config_output(AVFilterLink *outlink)
682 {
683  AVFilterContext *ctx = outlink->src;
684  AudioFadeContext *s = ctx->priv;
685 
686  outlink->time_base = ctx->inputs[0]->time_base;
687 
688  switch (outlink->format) {
689  case AV_SAMPLE_FMT_DBL: s->crossfade_samples = crossfade_samples_dbl; break;
690  case AV_SAMPLE_FMT_DBLP: s->crossfade_samples = crossfade_samples_dblp; break;
691  case AV_SAMPLE_FMT_FLT: s->crossfade_samples = crossfade_samples_flt; break;
692  case AV_SAMPLE_FMT_FLTP: s->crossfade_samples = crossfade_samples_fltp; break;
693  case AV_SAMPLE_FMT_S16: s->crossfade_samples = crossfade_samples_s16; break;
694  case AV_SAMPLE_FMT_S16P: s->crossfade_samples = crossfade_samples_s16p; break;
695  case AV_SAMPLE_FMT_S32: s->crossfade_samples = crossfade_samples_s32; break;
696  case AV_SAMPLE_FMT_S32P: s->crossfade_samples = crossfade_samples_s32p; break;
697  }
698 
699  config_output(outlink);
700 
701  return 0;
702 }
703 
704 static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples)
705 {
706  AVFilterContext *ctx = inlink->dst;
707  AudioFadeContext *s = ctx->priv;
708 
709  return s->passthrough ?
710  ff_null_get_audio_buffer (inlink, nb_samples) :
711  ff_default_get_audio_buffer(inlink, nb_samples);
712 }
713 
714 static const AVFilterPad avfilter_af_acrossfade_inputs[] = {
715  {
716  .name = "crossfade0",
717  .type = AVMEDIA_TYPE_AUDIO,
718  .get_buffer.audio = get_audio_buffer,
719  },
720  {
721  .name = "crossfade1",
722  .type = AVMEDIA_TYPE_AUDIO,
723  .get_buffer.audio = get_audio_buffer,
724  },
725 };
726 
727 static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
728  {
729  .name = "default",
730  .type = AVMEDIA_TYPE_AUDIO,
731  .config_props = acrossfade_config_output,
732  },
733 };
734 
735 const AVFilter ff_af_acrossfade = {
736  .name = "acrossfade",
737  .description = NULL_IF_CONFIG_SMALL("Cross fade two input audio streams."),
738  .priv_size = sizeof(AudioFadeContext),
739  .activate = activate,
740  .priv_class = &acrossfade_class,
741  FILTER_INPUTS(avfilter_af_acrossfade_inputs),
742  FILTER_OUTPUTS(avfilter_af_acrossfade_outputs),
744 };
745 
746 #endif /* CONFIG_ACROSSFADE_FILTER */
AudioFadeContext::unity
double unity
Definition: af_afade.c:43
A
#define A(x)
Definition: vpx_arith.h:28
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:107
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AudioFadeContext::type
int type
Definition: af_afade.c:36
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AudioFadeContext::curve2
int curve2
Definition: af_afade.c:37
ff_af_afade
const AVFilter ff_af_afade
out
FILE * out
Definition: movenc.c:54
NONE
@ NONE
Definition: af_afade.c:61
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AudioFadeContext::fade_samples
void(* fade_samples)(uint8_t **dst, uint8_t *const *src, int nb_samples, int channels, int direction, int64_t start, int64_t range, int curve, double silence, double unity)
Definition: af_afade.c:49
QUA
@ QUA
Definition: af_afade.c:61
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
av_samples_set_silence
int av_samples_set_silence(uint8_t *const *audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:246
AVOption
AVOption.
Definition: opt.h:251
IPAR
@ IPAR
Definition: af_afade.c:61
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:65
AV_OPT_TYPE_DURATION
@ AV_OPT_TYPE_DURATION
Definition: opt.h:239
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_afade.c:252
NB_CURVES
@ NB_CURVES
Definition: af_afade.c:61
SCALE_PLANAR
#define SCALE_PLANAR(name, type)
Definition: af_afade.c:211
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:317
DESE
@ DESE
Definition: af_afade.c:61
DESI
@ DESI
Definition: af_afade.c:61
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1383
FADE
#define FADE(name, type)
Definition: af_afade.c:184
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
ISINC
@ ISINC
Definition: af_afade.c:61
scale_samples_s32
static void scale_samples_s32(uint8_t *dst, const uint8_t *src, int nb_samples, int volume)
Definition: af_volume.c:205
scale_samples_s16
static void scale_samples_s16(uint8_t *dst, const uint8_t *src, int nb_samples, int volume)
Definition: af_volume.c:185
fade_gain
static double fade_gain(int curve, int64_t index, int64_t range, double silence, double unity)
Definition: af_afade.c:75
CUBE
#define CUBE(a)
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:802
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:643
OFFSET
#define OFFSET(x)
Definition: af_afade.c:63
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
cbrt
#define cbrt
Definition: tablegen.h:35
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
AudioFadeContext::silence
double silence
Definition: af_afade.c:42
QUAT
@ QUAT
Definition: af_afade.c:61
ff_af_acrossfade
const AVFilter ff_af_acrossfade
ff_inlink_check_available_samples
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1367
av_cold
#define av_cold
Definition: attributes.h:90
CUB
@ CUB
Definition: af_afade.c:61
QSIN
@ QSIN
Definition: af_afade.c:61
duration
int64_t duration
Definition: movenc.c:64
IHSIN
@ IHSIN
Definition: af_afade.c:61
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1506
s
#define s(width, name)
Definition: cbs_vp9.c:198
TRI
@ TRI
Definition: af_afade.c:61
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
HSIN2
@ HSIN2
Definition: af_afade.c:61
HSIN
@ HSIN
Definition: af_afade.c:61
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
AV_OPT_TYPE_INT64
@ AV_OPT_TYPE_INT64
Definition: opt.h:226
filters.h
B
#define B
Definition: huffyuv.h:42
ff_null_get_audio_buffer
AVFrame * ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
get_audio_buffer() handler for filters which simply pass audio along
Definition: audio.c:39
ctx
AVFormatContext * ctx
Definition: movenc.c:48
FLAGS
#define FLAGS
Definition: af_afade.c:64
channels
channels
Definition: aptx.h:31
IQSIN
@ IQSIN
Definition: af_afade.c:61
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
AudioFadeContext::crossfade_samples
void(* crossfade_samples)(uint8_t **dst, uint8_t *const *cf0, uint8_t *const *cf1, int nb_samples, int channels, int curve0, int curve1)
Definition: af_afade.c:55
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1402
NULL
#define NULL
Definition: coverity.c:32
AudioFadeContext::status
int status[2]
Definition: af_afade.c:45
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:736
SCALE
#define SCALE(name, type)
Definition: af_afade.c:228
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AudioFadeContext::start_sample
int64_t start_sample
Definition: af_afade.c:39
activate
filter_frame For filters that do not use the activate() callback
filter_frame
static int filter_frame(DBEDecodeContext *s, AVFrame *frame)
Definition: dolby_e.c:1059
QSIN2
@ QSIN2
Definition: af_afade.c:61
exp
int8_t exp
Definition: eval.c:72
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1337
index
int index
Definition: gxfenc.c:89
SQU
@ SQU
Definition: af_afade.c:61
CurveType
CurveType
Definition: af_afade.c:61
TFLAGS
#define TFLAGS
Definition: af_afade.c:65
get_audio_buffer
static AVFrame * get_audio_buffer(AVFilterLink *inlink, int nb_samples)
Definition: avf_concat.c:209
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_acrusher.c:306
start_time
static int64_t start_time
Definition: ffplay.c:328
AudioFadeContext::curve
int curve
Definition: af_afade.c:37
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
FILTER_SAMPLEFMTS_ARRAY
#define FILTER_SAMPLEFMTS_ARRAY(array)
Definition: internal.h:175
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:666
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
AudioFadeContext::scale_samples
void(* scale_samples)(uint8_t **dst, uint8_t *const *src, int nb_samples, int channels, double unity)
Definition: af_afade.c:53
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2646
AudioFadeContext::duration
int64_t duration
Definition: af_afade.c:40
EXP
@ EXP
Definition: af_afade.c:61
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:851
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
M_PI
#define M_PI
Definition: mathematics.h:67
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:319
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:420
SINC
@ SINC
Definition: af_afade.c:61
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:401
AudioFadeContext::pts
int64_t pts
Definition: af_afade.c:47
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
AudioFadeContext::overlap
int overlap
Definition: af_afade.c:44
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
CBR
@ CBR
Definition: af_afade.c:61
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1362
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
AudioFadeContext::start_time
int64_t start_time
Definition: af_afade.c:41
AVFilter
Filter definition.
Definition: avfilter.h:166
LOSI
@ LOSI
Definition: af_afade.c:61
ret
ret
Definition: filter_design.txt:187
status
ov_status_e status
Definition: dnn_backend_openvino.c:119
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
AudioFadeContext::passthrough
int passthrough
Definition: af_afade.c:46
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
FADE_PLANAR
#define FADE_PLANAR(name, type)
Definition: af_afade.c:165
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
audio.h
ff_default_get_audio_buffer
AVFrame * ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples)
default handler for get_audio_buffer() for audio inputs
Definition: audio.c:44
QUATR
@ QUATR
Definition: af_afade.c:61
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
int32_t
int32_t
Definition: audioconvert.c:56
ESIN
@ ESIN
Definition: af_afade.c:61
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: af_afade.c:67
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:61
PAR
@ PAR
Definition: af_afade.c:61
AV_SAMPLE_FMT_S32
@ AV_SAMPLE_FMT_S32
signed 32 bits
Definition: samplefmt.h:59
AudioFadeContext::nb_samples
int64_t nb_samples
Definition: af_afade.c:38
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
AudioFadeContext
Definition: af_afade.c:34
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:60
av_clipd
av_clipd
Definition: af_crystalizer.c:131
LOG
@ LOG
Definition: af_afade.c:61