FFmpeg
avf_showcwt.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 #include <math.h>
23 
24 #include "libavutil/tx.h"
25 #include "libavutil/avassert.h"
26 #include "libavutil/avstring.h"
28 #include "libavutil/float_dsp.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/parseutils.h"
32 #include "audio.h"
33 #include "formats.h"
34 #include "video.h"
35 #include "avfilter.h"
36 #include "filters.h"
37 #include "internal.h"
38 
50 };
51 
59 };
60 
67 };
68 
69 enum SlideMode {
74 };
75 
76 typedef struct ShowCWTContext {
77  const AVClass *class;
78  int w, h;
79  int mode;
80  char *rate_str;
86  int pos;
87  int64_t in_pts;
88  int64_t old_pts;
89  int64_t eof_pts;
92  unsigned *index;
108  int pps;
109  int eof;
110  int slide;
123  float deviation;
124  float bar_ratio;
125  int bar_size;
127  float rotation;
128 
131 
132 #define OFFSET(x) offsetof(ShowCWTContext, x)
133 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
134 
135 static const AVOption showcwt_options[] = {
136  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
137  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
138  { "rate", "set video rate", OFFSET(rate_str), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
139  { "r", "set video rate", OFFSET(rate_str), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS },
140  { "scale", "set frequency scale", OFFSET(frequency_scale), AV_OPT_TYPE_INT, {.i64=0}, 0, NB_FSCALE-1, FLAGS, .unit = "scale" },
141  { "linear", "linear", 0, AV_OPT_TYPE_CONST,{.i64=FSCALE_LINEAR}, 0, 0, FLAGS, .unit = "scale" },
142  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST,{.i64=FSCALE_LOG}, 0, 0, FLAGS, .unit = "scale" },
143  { "bark", "bark", 0, AV_OPT_TYPE_CONST,{.i64=FSCALE_BARK}, 0, 0, FLAGS, .unit = "scale" },
144  { "mel", "mel", 0, AV_OPT_TYPE_CONST,{.i64=FSCALE_MEL}, 0, 0, FLAGS, .unit = "scale" },
145  { "erbs", "erbs", 0, AV_OPT_TYPE_CONST,{.i64=FSCALE_ERBS}, 0, 0, FLAGS, .unit = "scale" },
146  { "sqrt", "sqrt", 0, AV_OPT_TYPE_CONST,{.i64=FSCALE_SQRT}, 0, 0, FLAGS, .unit = "scale" },
147  { "cbrt", "cbrt", 0, AV_OPT_TYPE_CONST,{.i64=FSCALE_CBRT}, 0, 0, FLAGS, .unit = "scale" },
148  { "qdrt", "qdrt", 0, AV_OPT_TYPE_CONST,{.i64=FSCALE_QDRT}, 0, 0, FLAGS, .unit = "scale" },
149  { "fm", "fm", 0, AV_OPT_TYPE_CONST,{.i64=FSCALE_FM}, 0, 0, FLAGS, .unit = "scale" },
150  { "iscale", "set intensity scale", OFFSET(intensity_scale),AV_OPT_TYPE_INT, {.i64=0}, 0, NB_ISCALE-1, FLAGS, .unit = "iscale" },
151  { "linear", "linear", 0, AV_OPT_TYPE_CONST,{.i64=ISCALE_LINEAR}, 0, 0, FLAGS, .unit = "iscale" },
152  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST,{.i64=ISCALE_LOG}, 0, 0, FLAGS, .unit = "iscale" },
153  { "sqrt", "sqrt", 0, AV_OPT_TYPE_CONST,{.i64=ISCALE_SQRT}, 0, 0, FLAGS, .unit = "iscale" },
154  { "cbrt", "cbrt", 0, AV_OPT_TYPE_CONST,{.i64=ISCALE_CBRT}, 0, 0, FLAGS, .unit = "iscale" },
155  { "qdrt", "qdrt", 0, AV_OPT_TYPE_CONST,{.i64=ISCALE_QDRT}, 0, 0, FLAGS, .unit = "iscale" },
156  { "min", "set minimum frequency", OFFSET(minimum_frequency), AV_OPT_TYPE_FLOAT, {.dbl = 20.}, 1, 192000, FLAGS },
157  { "max", "set maximum frequency", OFFSET(maximum_frequency), AV_OPT_TYPE_FLOAT, {.dbl = 20000.}, 1, 192000, FLAGS },
158  { "imin", "set minimum intensity", OFFSET(minimum_intensity), AV_OPT_TYPE_FLOAT, {.dbl = 0.}, 0, 1, FLAGS },
159  { "imax", "set maximum intensity", OFFSET(maximum_intensity), AV_OPT_TYPE_FLOAT, {.dbl = 1.}, 0, 1, FLAGS },
160  { "logb", "set logarithmic basis", OFFSET(logarithmic_basis), AV_OPT_TYPE_FLOAT, {.dbl = 0.0001}, 0, 1, FLAGS },
161  { "deviation", "set frequency deviation", OFFSET(deviation), AV_OPT_TYPE_FLOAT, {.dbl = 1.}, 0, 100, FLAGS },
162  { "pps", "set pixels per second", OFFSET(pps), AV_OPT_TYPE_INT, {.i64 = 64}, 1, 1024, FLAGS },
163  { "mode", "set output mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 4, FLAGS, .unit = "mode" },
164  { "magnitude", "magnitude", 0, AV_OPT_TYPE_CONST,{.i64=0}, 0, 0, FLAGS, .unit = "mode" },
165  { "phase", "phase", 0, AV_OPT_TYPE_CONST,{.i64=1}, 0, 0, FLAGS, .unit = "mode" },
166  { "magphase", "magnitude+phase", 0, AV_OPT_TYPE_CONST,{.i64=2}, 0, 0, FLAGS, .unit = "mode" },
167  { "channel", "color per channel", 0, AV_OPT_TYPE_CONST,{.i64=3}, 0, 0, FLAGS, .unit = "mode" },
168  { "stereo", "stereo difference", 0, AV_OPT_TYPE_CONST,{.i64=4}, 0, 0, FLAGS, .unit = "mode" },
169  { "slide", "set slide mode", OFFSET(slide), AV_OPT_TYPE_INT, {.i64=0}, 0, NB_SLIDE-1, FLAGS, .unit = "slide" },
170  { "replace", "replace", 0, AV_OPT_TYPE_CONST,{.i64=SLIDE_REPLACE},0, 0, FLAGS, .unit = "slide" },
171  { "scroll", "scroll", 0, AV_OPT_TYPE_CONST,{.i64=SLIDE_SCROLL}, 0, 0, FLAGS, .unit = "slide" },
172  { "frame", "frame", 0, AV_OPT_TYPE_CONST,{.i64=SLIDE_FRAME}, 0, 0, FLAGS, .unit = "slide" },
173  { "direction", "set direction mode", OFFSET(direction), AV_OPT_TYPE_INT, {.i64=0}, 0, NB_DIRECTION-1, FLAGS, .unit = "direction" },
174  { "lr", "left to right", 0, AV_OPT_TYPE_CONST,{.i64=DIRECTION_LR}, 0, 0, FLAGS, .unit = "direction" },
175  { "rl", "right to left", 0, AV_OPT_TYPE_CONST,{.i64=DIRECTION_RL}, 0, 0, FLAGS, .unit = "direction" },
176  { "ud", "up to down", 0, AV_OPT_TYPE_CONST,{.i64=DIRECTION_UD}, 0, 0, FLAGS, .unit = "direction" },
177  { "du", "down to up", 0, AV_OPT_TYPE_CONST,{.i64=DIRECTION_DU}, 0, 0, FLAGS, .unit = "direction" },
178  { "bar", "set bargraph ratio", OFFSET(bar_ratio), AV_OPT_TYPE_FLOAT, {.dbl = 0.}, 0, 1, FLAGS },
179  { "rotation", "set color rotation", OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl = 0}, -1, 1, FLAGS },
180  { NULL }
181 };
182 
183 AVFILTER_DEFINE_CLASS(showcwt);
184 
186 {
187  ShowCWTContext *s = ctx->priv;
188 
189  av_freep(&s->frequency_band);
190  av_freep(&s->kernel_start);
191  av_freep(&s->kernel_stop);
192  av_freep(&s->index);
193 
194  av_frame_free(&s->cache);
195  av_frame_free(&s->outpicref);
196  av_frame_free(&s->fft_in);
197  av_frame_free(&s->fft_out);
198  av_frame_free(&s->dst_x);
199  av_frame_free(&s->src_x);
200  av_frame_free(&s->ifft_in);
201  av_frame_free(&s->ifft_out);
202  av_frame_free(&s->ch_out);
203  av_frame_free(&s->over);
204  av_frame_free(&s->bh_out);
205 
206  if (s->fft) {
207  for (int n = 0; n < s->nb_threads; n++)
208  av_tx_uninit(&s->fft[n]);
209  av_freep(&s->fft);
210  }
211 
212  if (s->ifft) {
213  for (int n = 0; n < s->nb_threads; n++)
214  av_tx_uninit(&s->ifft[n]);
215  av_freep(&s->ifft);
216  }
217 
218  if (s->kernel) {
219  for (int n = 0; n < s->frequency_band_count; n++)
220  av_freep(&s->kernel[n]);
221  }
222  av_freep(&s->kernel);
223 
224  av_freep(&s->fdsp);
225 }
226 
228 {
231  AVFilterLink *inlink = ctx->inputs[0];
232  AVFilterLink *outlink = ctx->outputs[0];
235  int ret;
236 
238  if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
239  return ret;
240 
242  if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
243  return ret;
244 
246  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
247  return ret;
248 
250  if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
251  return ret;
252 
253  return 0;
254 }
255 
256 static float frequency_band(float *frequency_band,
257  int frequency_band_count,
258  float frequency_range,
259  float frequency_offset,
260  int frequency_scale, float deviation)
261 {
262  float ret = 0.f;
263 
264  deviation = sqrtf(deviation / (4.f * M_PI)); // Heisenberg Gabor Limit
265  for (int y = 0; y < frequency_band_count; y++) {
266  float frequency = frequency_range * (1.f - (float)y / frequency_band_count) + frequency_offset;
267  float frequency_derivative = frequency_range / frequency_band_count;
268 
269  switch (frequency_scale) {
270  case FSCALE_LOG:
271  frequency = powf(2.f, frequency);
272  frequency_derivative *= logf(2.f) * frequency;
273  break;
274  case FSCALE_BARK:
275  frequency = 600.f * sinhf(frequency / 6.f);
276  frequency_derivative *= sqrtf(frequency * frequency + 360000.f) / 6.f;
277  break;
278  case FSCALE_MEL:
279  frequency = 700.f * (powf(10.f, frequency / 2595.f) - 1.f);
280  frequency_derivative *= (frequency + 700.f) * logf(10.f) / 2595.f;
281  break;
282  case FSCALE_ERBS:
283  frequency = 676170.4f / (47.06538f - expf(frequency * 0.08950404f)) - 14678.49f;
284  frequency_derivative *= (frequency * frequency + 14990.4f * frequency + 4577850.f) / 160514.f;
285  break;
286  case FSCALE_SQRT:
287  frequency = frequency * frequency;
288  frequency_derivative *= 2.f * sqrtf(frequency);
289  break;
290  case FSCALE_CBRT:
291  frequency = frequency * frequency * frequency;
292  frequency_derivative *= 3.f * powf(frequency, 2.f / 3.f);
293  break;
294  case FSCALE_QDRT:
295  frequency = frequency * frequency * frequency * frequency;
296  frequency_derivative *= 4.f * powf(frequency, 3.f / 4.f);
297  break;
298  case FSCALE_FM:
299  frequency = 2.f * powf(frequency, 3.f / 2.f) / 3.f;
300  frequency_derivative *= sqrtf(frequency);
301  break;
302  }
303 
304  frequency_band[y*2 ] = frequency;
305  frequency_band[y*2+1] = frequency_derivative * deviation;
306 
307  ret = 1.f / (frequency_derivative * deviation);
308  }
309 
310  return ret;
311 }
312 
313 static float remap_log(ShowCWTContext *s, float value, int iscale, float log_factor)
314 {
315  const float max = s->maximum_intensity;
316  const float min = s->minimum_intensity;
317  float ret;
318 
319  value += min;
320 
321  switch (iscale) {
322  case ISCALE_LINEAR:
323  ret = max - expf(value / log_factor);
324  break;
325  case ISCALE_LOG:
326  value = logf(value) * log_factor;
327  ret = max - av_clipf(value, 0.f, 1.f);
328  break;
329  case ISCALE_SQRT:
330  value = max - expf(value / log_factor);
331  ret = sqrtf(value);
332  break;
333  case ISCALE_CBRT:
334  value = max - expf(value / log_factor);
335  ret = cbrtf(value);
336  break;
337  case ISCALE_QDRT:
338  value = max - expf(value / log_factor);
339  ret = powf(value, 0.25f);
340  break;
341  }
342 
343  return av_clipf(ret, 0.f, 1.f);
344 }
345 
346 static int run_channel_cwt_prepare(AVFilterContext *ctx, void *arg, int jobnr, int ch)
347 {
348  ShowCWTContext *s = ctx->priv;
349  const int hop_size = s->hop_size;
350  AVFrame *fin = arg;
351  float *cache = (float *)s->cache->extended_data[ch];
352  AVComplexFloat *src = (AVComplexFloat *)s->fft_in->extended_data[ch];
353  AVComplexFloat *dst = (AVComplexFloat *)s->fft_out->extended_data[ch];
354  const int offset = (s->input_padding_size - hop_size) >> 1;
355 
356  if (fin) {
357  const float *input = (const float *)fin->extended_data[ch];
358  const int offset = s->hop_size - fin->nb_samples;
359 
360  memmove(cache, &cache[fin->nb_samples], offset * sizeof(float));
361  memcpy(&cache[offset], input, fin->nb_samples * sizeof(float));
362  }
363 
364  if (fin && s->hop_index + fin->nb_samples < hop_size)
365  return 0;
366 
367  memset(src, 0, sizeof(float) * s->fft_size);
368  for (int n = 0; n < hop_size; n++)
369  src[n+offset].re = cache[n];
370 
371  s->tx_fn(s->fft[jobnr], dst, src, sizeof(*src));
372 
373  return 0;
374 }
375 
376 #define DRAW_BAR_COLOR(x) \
377 do { \
378  if (Y <= ht) { \
379  dstY[x] = 0; \
380  dstU[x] = 128; \
381  dstV[x] = 128; \
382  } else { \
383  float mul = (Y - ht) * bh[0]; \
384  dstY[x] = av_clip_uint8(lrintf(Y * mul * 255.f)); \
385  dstU[x] = av_clip_uint8(lrintf((U-0.5f) * 128.f + 128)); \
386  dstV[x] = av_clip_uint8(lrintf((V-0.5f) * 128.f + 128)); \
387  } \
388 } while (0)
389 
390 static void draw_bar(ShowCWTContext *s, int y,
391  float Y, float U, float V)
392 {
393  float *bh = ((float *)s->bh_out->extended_data[0]) + y;
394  const ptrdiff_t ylinesize = s->outpicref->linesize[0];
395  const ptrdiff_t ulinesize = s->outpicref->linesize[1];
396  const ptrdiff_t vlinesize = s->outpicref->linesize[2];
397  const int direction = s->direction;
398  const int sono_size = s->sono_size;
399  const int bar_size = s->bar_size;
400  const float rcp_bar_h = 1.f / bar_size;
401  uint8_t *dstY, *dstU, *dstV;
402  const int w = s->w;
403 
404  bh[0] = 1.f / (Y + 0.0001f);
405  switch (direction) {
406  case DIRECTION_LR:
407  dstY = s->outpicref->data[0] + y * ylinesize;
408  dstU = s->outpicref->data[1] + y * ulinesize;
409  dstV = s->outpicref->data[2] + y * vlinesize;
410  for (int x = 0; x < bar_size; x++) {
411  float ht = (bar_size - x) * rcp_bar_h;
412  DRAW_BAR_COLOR(x);
413  }
414  break;
415  case DIRECTION_RL:
416  dstY = s->outpicref->data[0] + y * ylinesize;
417  dstU = s->outpicref->data[1] + y * ulinesize;
418  dstV = s->outpicref->data[2] + y * vlinesize;
419  for (int x = 0; x < bar_size; x++) {
420  float ht = x * rcp_bar_h;
421  DRAW_BAR_COLOR(w - bar_size + x);
422  }
423  break;
424  case DIRECTION_UD:
425  dstY = s->outpicref->data[0] + w - 1 - y;
426  dstU = s->outpicref->data[1] + w - 1 - y;
427  dstV = s->outpicref->data[2] + w - 1 - y;
428  for (int x = 0; x < bar_size; x++) {
429  float ht = (bar_size - x) * rcp_bar_h;
430  DRAW_BAR_COLOR(0);
431  dstY += ylinesize;
432  dstU += ulinesize;
433  dstV += vlinesize;
434  }
435  break;
436  case DIRECTION_DU:
437  dstY = s->outpicref->data[0] + w - 1 - y + ylinesize * sono_size;
438  dstU = s->outpicref->data[1] + w - 1 - y + ulinesize * sono_size;
439  dstV = s->outpicref->data[2] + w - 1 - y + vlinesize * sono_size;
440  for (int x = 0; x < bar_size; x++) {
441  float ht = x * rcp_bar_h;
442  DRAW_BAR_COLOR(0);
443  dstY += ylinesize;
444  dstU += ulinesize;
445  dstV += vlinesize;
446  }
447  break;
448  }
449 }
450 
451 static int draw(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
452 {
453  ShowCWTContext *s = ctx->priv;
454  const ptrdiff_t ylinesize = s->outpicref->linesize[0];
455  const ptrdiff_t ulinesize = s->outpicref->linesize[1];
456  const ptrdiff_t vlinesize = s->outpicref->linesize[2];
457  const ptrdiff_t alinesize = s->outpicref->linesize[3];
458  const float log_factor = 1.f/logf(s->logarithmic_basis);
459  const int count = s->frequency_band_count;
460  const int start = (count * jobnr) / nb_jobs;
461  const int end = (count * (jobnr+1)) / nb_jobs;
462  const int nb_channels = s->nb_channels;
463  const int iscale = s->intensity_scale;
464  const int ihop_index = s->ihop_index;
465  const int ihop_size = s->ihop_size;
466  const float rotation = s->rotation;
467  const int direction = s->direction;
468  uint8_t *dstY, *dstU, *dstV, *dstA;
469  const int sono_size = s->sono_size;
470  const int bar_size = s->bar_size;
471  const int mode = s->mode;
472  const int w_1 = s->w - 1;
473  const int x = s->pos;
474  float Y, U, V;
475 
476  for (int y = start; y < end; y++) {
477  const AVComplexFloat *src = ((const AVComplexFloat *)s->ch_out->extended_data[y]) +
478  0 * ihop_size + ihop_index;
479 
480  if (sono_size <= 0)
481  goto skip;
482 
483  switch (direction) {
484  case DIRECTION_LR:
485  case DIRECTION_RL:
486  dstY = s->outpicref->data[0] + y * ylinesize;
487  dstU = s->outpicref->data[1] + y * ulinesize;
488  dstV = s->outpicref->data[2] + y * vlinesize;
489  dstA = s->outpicref->data[3] ? s->outpicref->data[3] + y * alinesize : NULL;
490  break;
491  case DIRECTION_UD:
492  case DIRECTION_DU:
493  dstY = s->outpicref->data[0] + x * ylinesize + w_1 - y;
494  dstU = s->outpicref->data[1] + x * ulinesize + w_1 - y;
495  dstV = s->outpicref->data[2] + x * vlinesize + w_1 - y;
496  dstA = s->outpicref->data[3] ? s->outpicref->data[3] + x * alinesize + w_1 - y : NULL;
497  break;
498  }
499 
500  switch (s->slide) {
501  case SLIDE_REPLACE:
502  case SLIDE_FRAME:
503  /* nothing to do here */
504  break;
505  case SLIDE_SCROLL:
506  switch (s->direction) {
507  case DIRECTION_RL:
508  memmove(dstY, dstY + 1, w_1);
509  memmove(dstU, dstU + 1, w_1);
510  memmove(dstV, dstV + 1, w_1);
511  if (dstA != NULL)
512  memmove(dstA, dstA + 1, w_1);
513  break;
514  case DIRECTION_LR:
515  memmove(dstY + 1, dstY, w_1);
516  memmove(dstU + 1, dstU, w_1);
517  memmove(dstV + 1, dstV, w_1);
518  if (dstA != NULL)
519  memmove(dstA + 1, dstA, w_1);
520  break;
521  }
522  break;
523  }
524 
525  if (direction == DIRECTION_RL ||
526  direction == DIRECTION_LR) {
527  dstY += x;
528  dstU += x;
529  dstV += x;
530  if (dstA != NULL)
531  dstA += x;
532  }
533 skip:
534 
535  switch (mode) {
536  case 4:
537  {
538  const AVComplexFloat *src2 = (nb_channels > 1) ? src + ihop_size: src;
539  float z, u, v;
540 
541  z = hypotf(src[0].re + src2[0].re, src[0].im + src2[0].im);
542  u = hypotf(src[0].re, src[0].im);
543  v = hypotf(src2[0].re, src2[0].im);
544 
545  z = remap_log(s, z, iscale, log_factor);
546  u = remap_log(s, u, iscale, log_factor);
547  v = remap_log(s, v, iscale, log_factor);
548 
549  Y = z;
550  U = sinf((v - u) * M_PI_2);
551  V = sinf((u - v) * M_PI_2);
552 
553  u = U * cosf(rotation * M_PI) - V * sinf(rotation * M_PI);
554  v = U * sinf(rotation * M_PI) + V * cosf(rotation * M_PI);
555 
556  U = 0.5f + 0.5f * z * u;
557  V = 0.5f + 0.5f * z * v;
558 
559  if (sono_size > 0) {
560  dstY[0] = av_clip_uint8(lrintf(Y * 255.f));
561  dstU[0] = av_clip_uint8(lrintf(U * 255.f));
562  dstV[0] = av_clip_uint8(lrintf(V * 255.f));
563  if (dstA)
564  dstA[0] = dstY[0];
565  }
566 
567  if (bar_size > 0)
568  draw_bar(s, y, Y, U, V);
569  }
570  break;
571  case 3:
572  {
573  const int nb_channels = s->nb_channels;
574  const float yf = 1.f / nb_channels;
575 
576  Y = 0.f;
577  U = V = 0.5f;
578  for (int ch = 0; ch < nb_channels; ch++) {
579  const AVComplexFloat *srcn = src + ihop_size * ch;
580  float z;
581 
582  z = hypotf(srcn[0].re, srcn[0].im);
583  z = remap_log(s, z, iscale, log_factor);
584 
585  Y += z * yf;
586  U += z * yf * sinf(2.f * M_PI * (ch * yf + rotation));
587  V += z * yf * cosf(2.f * M_PI * (ch * yf + rotation));
588  }
589 
590  if (sono_size > 0) {
591  dstY[0] = av_clip_uint8(lrintf(Y * 255.f));
592  dstU[0] = av_clip_uint8(lrintf(U * 255.f));
593  dstV[0] = av_clip_uint8(lrintf(V * 255.f));
594  if (dstA)
595  dstA[0] = dstY[0];
596  }
597 
598  if (bar_size > 0)
599  draw_bar(s, y, Y, U, V);
600  }
601  break;
602  case 2:
603  Y = hypotf(src[0].re, src[0].im);
604  Y = remap_log(s, Y, iscale, log_factor);
605  U = atan2f(src[0].im, src[0].re);
606  U = 0.5f + 0.5f * U * Y / M_PI;
607  V = 1.f - U;
608 
609  if (sono_size > 0) {
610  dstY[0] = av_clip_uint8(lrintf(Y * 255.f));
611  dstU[0] = av_clip_uint8(lrintf(U * 255.f));
612  dstV[0] = av_clip_uint8(lrintf(V * 255.f));
613  if (dstA)
614  dstA[0] = dstY[0];
615  }
616 
617  if (bar_size > 0)
618  draw_bar(s, y, Y, U, V);
619  break;
620  case 1:
621  Y = atan2f(src[0].im, src[0].re);
622  Y = 0.5f + 0.5f * Y / M_PI;
623 
624  if (sono_size > 0) {
625  dstY[0] = av_clip_uint8(lrintf(Y * 255.f));
626  if (dstA)
627  dstA[0] = dstY[0];
628  }
629 
630  if (bar_size > 0)
631  draw_bar(s, y, Y, 0.5f, 0.5f);
632  break;
633  case 0:
634  Y = hypotf(src[0].re, src[0].im);
635  Y = remap_log(s, Y, iscale, log_factor);
636 
637  if (sono_size > 0) {
638  dstY[0] = av_clip_uint8(lrintf(Y * 255.f));
639  if (dstA)
640  dstA[0] = dstY[0];
641  }
642 
643  if (bar_size > 0)
644  draw_bar(s, y, Y, 0.5f, 0.5f);
645  break;
646  }
647  }
648 
649  return 0;
650 }
651 
652 static int run_channel_cwt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
653 {
654  ShowCWTContext *s = ctx->priv;
655  const int ch = *(int *)arg;
656  const AVComplexFloat *fft_out = (const AVComplexFloat *)s->fft_out->extended_data[ch];
657  AVComplexFloat *isrc = (AVComplexFloat *)s->ifft_in->extended_data[jobnr];
658  AVComplexFloat *idst = (AVComplexFloat *)s->ifft_out->extended_data[jobnr];
659  const int output_padding_size = s->output_padding_size;
660  const int input_padding_size = s->input_padding_size;
661  const float scale = 1.f / input_padding_size;
662  const int ihop_size = s->ihop_size;
663  const int count = s->frequency_band_count;
664  const int start = (count * jobnr) / nb_jobs;
665  const int end = (count * (jobnr+1)) / nb_jobs;
666 
667  for (int y = start; y < end; y++) {
668  AVComplexFloat *chout = ((AVComplexFloat *)s->ch_out->extended_data[y]) + ch * ihop_size;
669  AVComplexFloat *over = ((AVComplexFloat *)s->over->extended_data[ch]) + y * ihop_size;
670  AVComplexFloat *dstx = (AVComplexFloat *)s->dst_x->extended_data[jobnr];
671  AVComplexFloat *srcx = (AVComplexFloat *)s->src_x->extended_data[jobnr];
672  const AVComplexFloat *kernel = s->kernel[y];
673  const unsigned *index = (const unsigned *)s->index;
674  const int kernel_start = s->kernel_start[y];
675  const int kernel_stop = s->kernel_stop[y];
676  const int kernel_range = kernel_stop - kernel_start + 1;
677  int offset;
678 
679  if (kernel_start >= 0) {
680  offset = 0;
681  memcpy(srcx, fft_out + kernel_start, sizeof(*fft_out) * kernel_range);
682  } else {
683  offset = -kernel_start;
684  memcpy(srcx+offset, fft_out, sizeof(*fft_out) * (kernel_range-offset));
685  memcpy(srcx, fft_out+input_padding_size-offset, sizeof(*fft_out)*offset);
686  }
687 
688  s->fdsp->vector_fmul_scalar((float *)srcx, (const float *)srcx, scale, FFALIGN(kernel_range * 2, 4));
689  s->fdsp->vector_fmul((float *)dstx, (const float *)srcx,
690  (const float *)kernel, FFALIGN(kernel_range * 2, 16));
691 
692  memset(isrc, 0, sizeof(*isrc) * output_padding_size);
693  if (offset == 0) {
694  const unsigned *kindex = index + kernel_start;
695  for (int i = 0; i < kernel_range; i++) {
696  const unsigned n = kindex[i];
697 
698  isrc[n].re += dstx[i].re;
699  isrc[n].im += dstx[i].im;
700  }
701  } else {
702  for (int i = 0; i < kernel_range; i++) {
703  const unsigned n = (i-kernel_start) & (output_padding_size-1);
704 
705  isrc[n].re += dstx[i].re;
706  isrc[n].im += dstx[i].im;
707  }
708  }
709 
710  s->itx_fn(s->ifft[jobnr], idst, isrc, sizeof(*isrc));
711 
712  memcpy(chout, idst, sizeof(*chout) * ihop_size);
713  for (int n = 0; n < ihop_size; n++) {
714  chout[n].re += over[n].re;
715  chout[n].im += over[n].im;
716  }
717  memcpy(over, idst + ihop_size, sizeof(*over) * ihop_size);
718  }
719 
720  return 0;
721 }
722 
724 {
725  ShowCWTContext *s = ctx->priv;
726  const int size = s->input_padding_size;
727  const int output_sample_count = s->output_sample_count;
728  const int fsize = s->frequency_band_count;
729  int *kernel_start = s->kernel_start;
730  int *kernel_stop = s->kernel_stop;
731  unsigned *index = s->index;
732  int range_min = INT_MAX;
733  int range_max = 0, ret = 0;
734  float *tkernel;
735 
736  tkernel = av_malloc_array(size, sizeof(*tkernel));
737  if (!tkernel)
738  return AVERROR(ENOMEM);
739 
740  for (int y = 0; y < fsize; y++) {
741  AVComplexFloat *kernel = s->kernel[y];
742  int start = INT_MIN, stop = INT_MAX;
743  const float frequency = s->frequency_band[y*2];
744  const float deviation = 1.f / (s->frequency_band[y*2+1] *
745  output_sample_count);
746  const int a = FFMAX(frequency-12.f*sqrtf(1.f/deviation)-0.5f, -size);
747  const int b = FFMIN(frequency+12.f*sqrtf(1.f/deviation)-0.5f, size+a);
748  const int range = -a;
749 
750  memset(tkernel, 0, size * sizeof(*tkernel));
751  for (int n = a; n < b; n++) {
752  float ff, f = n+0.5f-frequency;
753 
754  ff = expf(-f*f*deviation);
755  tkernel[n+range] = ff;
756  }
757 
758  for (int n = a; n < b; n++) {
759  if (tkernel[n+range] != 0.f) {
760  if (tkernel[n+range] > FLT_MIN)
761  av_log(ctx, AV_LOG_DEBUG, "out of range kernel %g\n", tkernel[n+range]);
762  start = n;
763  break;
764  }
765  }
766 
767  for (int n = b; n >= a; n--) {
768  if (tkernel[n+range] != 0.f) {
769  if (tkernel[n+range] > FLT_MIN)
770  av_log(ctx, AV_LOG_DEBUG, "out of range kernel %g\n", tkernel[n+range]);
771  stop = n;
772  break;
773  }
774  }
775 
776  if (start == INT_MIN || stop == INT_MAX) {
777  ret = AVERROR(EINVAL);
778  break;
779  }
780 
781  kernel_start[y] = start;
782  kernel_stop[y] = stop;
783 
784  kernel = av_calloc(FFALIGN(stop-start+1, 16), sizeof(*kernel));
785  if (!kernel) {
786  ret = AVERROR(ENOMEM);
787  break;
788  }
789 
790  for (int n = 0; n <= stop - start; n++) {
791  kernel[n].re = tkernel[n+range+start];
792  kernel[n].im = tkernel[n+range+start];
793  }
794 
795  range_min = FFMIN(range_min, stop+1-start);
796  range_max = FFMAX(range_max, stop+1-start);
797 
798  s->kernel[y] = kernel;
799  }
800 
801  for (int n = 0; n < size; n++)
802  index[n] = n & (s->output_padding_size - 1);
803 
804  av_log(ctx, AV_LOG_DEBUG, "range_min: %d\n", range_min);
805  av_log(ctx, AV_LOG_DEBUG, "range_max: %d\n", range_max);
806 
807  av_freep(&tkernel);
808 
809  return ret;
810 }
811 
812 static int config_output(AVFilterLink *outlink)
813 {
814  AVFilterContext *ctx = outlink->src;
815  AVFilterLink *inlink = ctx->inputs[0];
816  ShowCWTContext *s = ctx->priv;
817  const float limit_frequency = inlink->sample_rate * 0.5f;
818  float maximum_frequency = fminf(s->maximum_frequency, limit_frequency);
819  float minimum_frequency = s->minimum_frequency;
820  float scale = 1.f, factor;
821  int ret;
822 
823  if (minimum_frequency >= maximum_frequency) {
824  av_log(ctx, AV_LOG_ERROR, "min frequency (%f) >= (%f) max frequency\n",
825  minimum_frequency, maximum_frequency);
826  return AVERROR(EINVAL);
827  }
828 
829  uninit(ctx);
830 
831  s->fdsp = avpriv_float_dsp_alloc(0);
832  if (!s->fdsp)
833  return AVERROR(ENOMEM);
834 
835  switch (s->direction) {
836  case DIRECTION_LR:
837  case DIRECTION_RL:
838  s->bar_size = s->w * s->bar_ratio;
839  s->sono_size = s->w - s->bar_size;
840  s->frequency_band_count = s->h;
841  break;
842  case DIRECTION_UD:
843  case DIRECTION_DU:
844  s->bar_size = s->h * s->bar_ratio;
845  s->sono_size = s->h - s->bar_size;
846  s->frequency_band_count = s->w;
847  break;
848  }
849 
850  switch (s->frequency_scale) {
851  case FSCALE_LOG:
852  minimum_frequency = logf(minimum_frequency) / logf(2.f);
853  maximum_frequency = logf(maximum_frequency) / logf(2.f);
854  break;
855  case FSCALE_BARK:
856  minimum_frequency = 6.f * asinhf(minimum_frequency / 600.f);
857  maximum_frequency = 6.f * asinhf(maximum_frequency / 600.f);
858  break;
859  case FSCALE_MEL:
860  minimum_frequency = 2595.f * log10f(1.f + minimum_frequency / 700.f);
861  maximum_frequency = 2595.f * log10f(1.f + maximum_frequency / 700.f);
862  break;
863  case FSCALE_ERBS:
864  minimum_frequency = 11.17268f * logf(1.f + (46.06538f * minimum_frequency) / (minimum_frequency + 14678.49f));
865  maximum_frequency = 11.17268f * logf(1.f + (46.06538f * maximum_frequency) / (maximum_frequency + 14678.49f));
866  break;
867  case FSCALE_SQRT:
868  minimum_frequency = sqrtf(minimum_frequency);
869  maximum_frequency = sqrtf(maximum_frequency);
870  break;
871  case FSCALE_CBRT:
872  minimum_frequency = cbrtf(minimum_frequency);
873  maximum_frequency = cbrtf(maximum_frequency);
874  break;
875  case FSCALE_QDRT:
876  minimum_frequency = powf(minimum_frequency, 0.25f);
877  maximum_frequency = powf(maximum_frequency, 0.25f);
878  break;
879  case FSCALE_FM:
880  minimum_frequency = powf(9.f * (minimum_frequency * minimum_frequency) / 4.f, 1.f / 3.f);
881  maximum_frequency = powf(9.f * (maximum_frequency * maximum_frequency) / 4.f, 1.f / 3.f);
882  break;
883  }
884 
885  s->frequency_band = av_calloc(s->frequency_band_count,
886  sizeof(*s->frequency_band) * 2);
887  if (!s->frequency_band)
888  return AVERROR(ENOMEM);
889 
890  s->nb_consumed_samples = inlink->sample_rate *
891  frequency_band(s->frequency_band,
892  s->frequency_band_count, maximum_frequency - minimum_frequency,
893  minimum_frequency, s->frequency_scale, s->deviation);
894  s->nb_consumed_samples = FFMIN(s->nb_consumed_samples, 65536);
895 
896  s->nb_threads = FFMIN(s->frequency_band_count, ff_filter_get_nb_threads(ctx));
897  s->nb_channels = inlink->ch_layout.nb_channels;
898  s->old_pts = AV_NOPTS_VALUE;
899  s->eof_pts = AV_NOPTS_VALUE;
900 
901  s->input_sample_count = 1 << (32 - ff_clz(s->nb_consumed_samples));
902  s->input_padding_size = 1 << (32 - ff_clz(s->input_sample_count));
903  s->output_sample_count = FFMAX(1, av_rescale(s->input_sample_count, s->pps, inlink->sample_rate));
904  s->output_padding_size = 1 << (32 - ff_clz(s->output_sample_count));
905 
906  s->hop_size = s->input_sample_count;
907  s->ihop_size = s->output_padding_size >> 1;
908 
909  outlink->w = s->w;
910  outlink->h = s->h;
911  outlink->sample_aspect_ratio = (AVRational){1,1};
912 
913  s->fft_size = FFALIGN(s->input_padding_size, av_cpu_max_align());
914  s->ifft_size = FFALIGN(s->output_padding_size, av_cpu_max_align());
915 
916  s->fft = av_calloc(s->nb_threads, sizeof(*s->fft));
917  if (!s->fft)
918  return AVERROR(ENOMEM);
919 
920  for (int n = 0; n < s->nb_threads; n++) {
921  ret = av_tx_init(&s->fft[n], &s->tx_fn, AV_TX_FLOAT_FFT, 0, s->input_padding_size, &scale, 0);
922  if (ret < 0)
923  return ret;
924  }
925 
926  s->ifft = av_calloc(s->nb_threads, sizeof(*s->ifft));
927  if (!s->ifft)
928  return AVERROR(ENOMEM);
929 
930  for (int n = 0; n < s->nb_threads; n++) {
931  ret = av_tx_init(&s->ifft[n], &s->itx_fn, AV_TX_FLOAT_FFT, 1, s->output_padding_size, &scale, 0);
932  if (ret < 0)
933  return ret;
934  }
935 
936  s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
937  s->fft_in = ff_get_audio_buffer(inlink, s->fft_size * 2);
938  s->fft_out = ff_get_audio_buffer(inlink, s->fft_size * 2);
939  s->dst_x = av_frame_alloc();
940  s->src_x = av_frame_alloc();
941  s->kernel = av_calloc(s->frequency_band_count, sizeof(*s->kernel));
942  s->cache = ff_get_audio_buffer(inlink, s->hop_size);
943  s->over = ff_get_audio_buffer(inlink, s->frequency_band_count * 2 * s->ihop_size);
944  s->bh_out = ff_get_audio_buffer(inlink, s->frequency_band_count);
945  s->ifft_in = av_frame_alloc();
946  s->ifft_out = av_frame_alloc();
947  s->ch_out = av_frame_alloc();
948  s->index = av_calloc(s->input_padding_size, sizeof(*s->index));
949  s->kernel_start = av_calloc(s->frequency_band_count, sizeof(*s->kernel_start));
950  s->kernel_stop = av_calloc(s->frequency_band_count, sizeof(*s->kernel_stop));
951  if (!s->outpicref || !s->fft_in || !s->fft_out || !s->src_x || !s->dst_x || !s->over ||
952  !s->ifft_in || !s->ifft_out || !s->kernel_start || !s->kernel_stop || !s->ch_out ||
953  !s->cache || !s->index || !s->bh_out || !s->kernel)
954  return AVERROR(ENOMEM);
955 
956  s->ch_out->format = inlink->format;
957  s->ch_out->nb_samples = 2 * s->ihop_size * inlink->ch_layout.nb_channels;
958  s->ch_out->ch_layout.nb_channels = s->frequency_band_count;
959  ret = av_frame_get_buffer(s->ch_out, 0);
960  if (ret < 0)
961  return ret;
962 
963  s->ifft_in->format = inlink->format;
964  s->ifft_in->nb_samples = s->ifft_size * 2;
965  s->ifft_in->ch_layout.nb_channels = s->nb_threads;
966  ret = av_frame_get_buffer(s->ifft_in, 0);
967  if (ret < 0)
968  return ret;
969 
970  s->ifft_out->format = inlink->format;
971  s->ifft_out->nb_samples = s->ifft_size * 2;
972  s->ifft_out->ch_layout.nb_channels = s->nb_threads;
973  ret = av_frame_get_buffer(s->ifft_out, 0);
974  if (ret < 0)
975  return ret;
976 
977  s->src_x->format = inlink->format;
978  s->src_x->nb_samples = s->fft_size * 2;
979  s->src_x->ch_layout.nb_channels = s->nb_threads;
980  ret = av_frame_get_buffer(s->src_x, 0);
981  if (ret < 0)
982  return ret;
983 
984  s->dst_x->format = inlink->format;
985  s->dst_x->nb_samples = s->fft_size * 2;
986  s->dst_x->ch_layout.nb_channels = s->nb_threads;
987  ret = av_frame_get_buffer(s->dst_x, 0);
988  if (ret < 0)
989  return ret;
990 
991  s->outpicref->sample_aspect_ratio = (AVRational){1,1};
992 
993  for (int y = 0; y < outlink->h; y++) {
994  memset(s->outpicref->data[0] + y * s->outpicref->linesize[0], 0, outlink->w);
995  memset(s->outpicref->data[1] + y * s->outpicref->linesize[1], 128, outlink->w);
996  memset(s->outpicref->data[2] + y * s->outpicref->linesize[2], 128, outlink->w);
997  if (s->outpicref->data[3])
998  memset(s->outpicref->data[3] + y * s->outpicref->linesize[3], 0, outlink->w);
999  }
1000 
1001  s->outpicref->color_range = AVCOL_RANGE_JPEG;
1002 
1003  factor = s->input_padding_size / (float)inlink->sample_rate;
1004  for (int n = 0; n < s->frequency_band_count; n++) {
1005  s->frequency_band[2*n ] *= factor;
1006  s->frequency_band[2*n+1] *= factor;
1007  }
1008 
1009  av_log(ctx, AV_LOG_DEBUG, "factor: %f\n", factor);
1010  av_log(ctx, AV_LOG_DEBUG, "nb_consumed_samples: %d\n", s->nb_consumed_samples);
1011  av_log(ctx, AV_LOG_DEBUG, "hop_size: %d\n", s->hop_size);
1012  av_log(ctx, AV_LOG_DEBUG, "ihop_size: %d\n", s->ihop_size);
1013  av_log(ctx, AV_LOG_DEBUG, "input_sample_count: %d\n", s->input_sample_count);
1014  av_log(ctx, AV_LOG_DEBUG, "input_padding_size: %d\n", s->input_padding_size);
1015  av_log(ctx, AV_LOG_DEBUG, "output_sample_count: %d\n", s->output_sample_count);
1016  av_log(ctx, AV_LOG_DEBUG, "output_padding_size: %d\n", s->output_padding_size);
1017 
1018  switch (s->direction) {
1019  case DIRECTION_LR:
1020  case DIRECTION_UD:
1021  s->pos = s->bar_size;
1022  break;
1023  case DIRECTION_RL:
1024  case DIRECTION_DU:
1025  s->pos = s->sono_size;
1026  break;
1027  }
1028 
1029  s->auto_frame_rate = av_make_q(inlink->sample_rate, s->hop_size);
1030  if (strcmp(s->rate_str, "auto")) {
1031  ret = av_parse_video_rate(&s->frame_rate, s->rate_str);
1032  } else {
1033  s->frame_rate = s->auto_frame_rate;
1034  }
1035  outlink->frame_rate = s->frame_rate;
1036  outlink->time_base = av_inv_q(outlink->frame_rate);
1037 
1038  ret = compute_kernel(ctx);
1039  if (ret < 0)
1040  return ret;
1041 
1042  return 0;
1043 }
1044 
1046 {
1047  AVFilterLink *outlink = ctx->outputs[0];
1048  AVFilterLink *inlink = ctx->inputs[0];
1049  ShowCWTContext *s = ctx->priv;
1050  const int nb_planes = 3 + (s->outpicref->data[3] != NULL);
1051  int ret;
1052 
1053  switch (s->slide) {
1054  case SLIDE_SCROLL:
1055  switch (s->direction) {
1056  case DIRECTION_UD:
1057  for (int p = 0; p < nb_planes; p++) {
1058  ptrdiff_t linesize = s->outpicref->linesize[p];
1059 
1060  for (int y = s->h - 1; y > s->bar_size; y--) {
1061  uint8_t *dst = s->outpicref->data[p] + y * linesize;
1062 
1063  memmove(dst, dst - linesize, s->w);
1064  }
1065  }
1066  break;
1067  case DIRECTION_DU:
1068  for (int p = 0; p < nb_planes; p++) {
1069  ptrdiff_t linesize = s->outpicref->linesize[p];
1070 
1071  for (int y = 0; y < s->sono_size; y++) {
1072  uint8_t *dst = s->outpicref->data[p] + y * linesize;
1073 
1074  memmove(dst, dst + linesize, s->w);
1075  }
1076  }
1077  break;
1078  }
1079  break;
1080  }
1081 
1082  ff_filter_execute(ctx, draw, NULL, NULL, s->nb_threads);
1083 
1084  switch (s->slide) {
1085  case SLIDE_REPLACE:
1086  case SLIDE_FRAME:
1087  switch (s->direction) {
1088  case DIRECTION_LR:
1089  s->pos++;
1090  if (s->pos >= s->w) {
1091  s->pos = s->bar_size;
1092  s->new_frame = 1;
1093  }
1094  break;
1095  case DIRECTION_RL:
1096  s->pos--;
1097  if (s->pos < 0) {
1098  s->pos = s->sono_size;
1099  s->new_frame = 1;
1100  }
1101  break;
1102  case DIRECTION_UD:
1103  s->pos++;
1104  if (s->pos >= s->h) {
1105  s->pos = s->bar_size;
1106  s->new_frame = 1;
1107  }
1108  break;
1109  case DIRECTION_DU:
1110  s->pos--;
1111  if (s->pos < 0) {
1112  s->pos = s->sono_size;
1113  s->new_frame = 1;
1114  }
1115  break;
1116  }
1117  break;
1118  case SLIDE_SCROLL:
1119  switch (s->direction) {
1120  case DIRECTION_UD:
1121  case DIRECTION_LR:
1122  s->pos = s->bar_size;
1123  break;
1124  case DIRECTION_RL:
1125  case DIRECTION_DU:
1126  s->pos = s->sono_size;
1127  break;
1128  }
1129  break;
1130  }
1131 
1132  if (s->slide == SLIDE_FRAME && s->eof) {
1133  switch (s->direction) {
1134  case DIRECTION_LR:
1135  for (int p = 0; p < nb_planes; p++) {
1136  ptrdiff_t linesize = s->outpicref->linesize[p];
1137  const int size = s->w - s->pos;
1138  const int fill = p > 0 && p < 3 ? 128 : 0;
1139  const int x = s->pos;
1140 
1141  for (int y = 0; y < s->h; y++) {
1142  uint8_t *dst = s->outpicref->data[p] + y * linesize + x;
1143 
1144  memset(dst, fill, size);
1145  }
1146  }
1147  break;
1148  case DIRECTION_RL:
1149  for (int p = 0; p < nb_planes; p++) {
1150  ptrdiff_t linesize = s->outpicref->linesize[p];
1151  const int size = s->w - s->pos;
1152  const int fill = p > 0 && p < 3 ? 128 : 0;
1153 
1154  for (int y = 0; y < s->h; y++) {
1155  uint8_t *dst = s->outpicref->data[p] + y * linesize;
1156 
1157  memset(dst, fill, size);
1158  }
1159  }
1160  break;
1161  case DIRECTION_UD:
1162  for (int p = 0; p < nb_planes; p++) {
1163  ptrdiff_t linesize = s->outpicref->linesize[p];
1164  const int fill = p > 0 && p < 3 ? 128 : 0;
1165 
1166  for (int y = s->pos; y < s->h; y++) {
1167  uint8_t *dst = s->outpicref->data[p] + y * linesize;
1168 
1169  memset(dst, fill, s->w);
1170  }
1171  }
1172  break;
1173  case DIRECTION_DU:
1174  for (int p = 0; p < nb_planes; p++) {
1175  ptrdiff_t linesize = s->outpicref->linesize[p];
1176  const int fill = p > 0 && p < 3 ? 128 : 0;
1177 
1178  for (int y = s->h - s->pos; y >= 0; y--) {
1179  uint8_t *dst = s->outpicref->data[p] + y * linesize;
1180 
1181  memset(dst, fill, s->w);
1182  }
1183  }
1184  break;
1185  }
1186  }
1187 
1188  s->new_frame = s->slide == SLIDE_FRAME && (s->new_frame || s->eof);
1189 
1190  if (s->slide != SLIDE_FRAME || s->new_frame == 1) {
1191  int64_t pts_offset = s->new_frame ? 0LL : av_rescale(s->ihop_index, s->hop_size, s->ihop_size);
1192  const int offset = (s->input_padding_size - s->hop_size) >> 1;
1193 
1194  pts_offset = av_rescale_q(pts_offset - offset, av_make_q(1, inlink->sample_rate), inlink->time_base);
1195  s->outpicref->pts = av_rescale_q(s->in_pts + pts_offset, inlink->time_base, outlink->time_base);
1196  s->outpicref->duration = 1;
1197  }
1198 
1199  s->ihop_index++;
1200  if (s->ihop_index >= s->ihop_size)
1201  s->ihop_index = s->hop_index = 0;
1202 
1203  if (s->slide == SLIDE_FRAME && s->new_frame == 0)
1204  return 1;
1205 
1206  if (s->old_pts < s->outpicref->pts) {
1207  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
1208  if (!out)
1209  return AVERROR(ENOMEM);
1210  ret = av_frame_copy_props(out, s->outpicref);
1211  if (ret < 0)
1212  goto fail;
1213  ret = av_frame_copy(out, s->outpicref);
1214  if (ret < 0)
1215  goto fail;
1216  s->old_pts = s->outpicref->pts;
1217  s->new_frame = 0;
1218  ret = ff_filter_frame(outlink, out);
1219  if (ret <= 0)
1220  return ret;
1221 fail:
1222  av_frame_free(&out);
1223  return ret;
1224  }
1225 
1226  return 1;
1227 }
1228 
1229 static int run_channels_cwt_prepare(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
1230 {
1231  ShowCWTContext *s = ctx->priv;
1232  const int count = s->nb_channels;
1233  const int start = (count * jobnr) / nb_jobs;
1234  const int end = (count * (jobnr+1)) / nb_jobs;
1235 
1236  for (int ch = start; ch < end; ch++)
1237  run_channel_cwt_prepare(ctx, arg, jobnr, ch);
1238 
1239  return 0;
1240 }
1241 
1243 {
1244  AVFilterLink *inlink = ctx->inputs[0];
1245  AVFilterLink *outlink = ctx->outputs[0];
1246  ShowCWTContext *s = ctx->priv;
1247  int ret = 0, status;
1248  int64_t pts;
1249 
1251 
1252  if (s->outpicref) {
1253  AVFrame *fin = NULL;
1254 
1255  if (s->hop_index < s->hop_size) {
1256  if (!s->eof) {
1257  ret = ff_inlink_consume_samples(inlink, 1, s->hop_size - s->hop_index, &fin);
1258  if (ret < 0)
1259  return ret;
1260  }
1261 
1262  if (ret > 0 || s->eof) {
1264  FFMIN(s->nb_threads, s->nb_channels));
1265  if (fin) {
1266  if (s->hop_index == 0) {
1267  s->in_pts = fin->pts;
1268  if (s->old_pts == AV_NOPTS_VALUE)
1269  s->old_pts = av_rescale_q(s->in_pts, inlink->time_base, outlink->time_base) - 1;
1270  }
1271  s->hop_index += fin->nb_samples;
1272  av_frame_free(&fin);
1273  } else {
1274  s->hop_index = s->hop_size;
1275  }
1276  }
1277  }
1278 
1279  if (s->hop_index >= s->hop_size || s->ihop_index > 0) {
1280  for (int ch = 0; ch < s->nb_channels && s->ihop_index == 0; ch++) {
1281  ff_filter_execute(ctx, run_channel_cwt, (void *)&ch, NULL,
1282  s->nb_threads);
1283  }
1284 
1285  ret = output_frame(ctx);
1286  if (ret != 1)
1287  return ret;
1288  }
1289  }
1290 
1291  if (s->eof) {
1292  if (s->slide == SLIDE_FRAME)
1293  ret = output_frame(ctx);
1294  ff_outlink_set_status(outlink, AVERROR_EOF, s->eof_pts);
1295  return ret;
1296  }
1297 
1298  if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
1299  if (status == AVERROR_EOF) {
1300  s->eof = 1;
1301  ff_filter_set_ready(ctx, 10);
1302  s->eof_pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
1303  return 0;
1304  }
1305  }
1306 
1307  if (ff_inlink_queued_samples(inlink) > 0 || s->ihop_index ||
1308  s->hop_index >= s->hop_size || s->eof) {
1309  ff_filter_set_ready(ctx, 10);
1310  return 0;
1311  }
1312 
1313  if (ff_outlink_frame_wanted(outlink)) {
1315  return 0;
1316  }
1317 
1318  return FFERROR_NOT_READY;
1319 }
1320 
1321 static const AVFilterPad showcwt_outputs[] = {
1322  {
1323  .name = "default",
1324  .type = AVMEDIA_TYPE_VIDEO,
1325  .config_props = config_output,
1326  },
1327 };
1328 
1330  .name = "showcwt",
1331  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a CWT (Continuous Wavelet Transform) spectrum video output."),
1332  .uninit = uninit,
1333  .priv_size = sizeof(ShowCWTContext),
1337  .activate = activate,
1338  .priv_class = &showcwt_class,
1339  .flags = AVFILTER_FLAG_SLICE_THREADS,
1340 };
ShowCWTContext::dst_x
AVFrame * dst_x
Definition: avf_showcwt.c:98
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
FSCALE_LINEAR
@ FSCALE_LINEAR
Definition: avf_showcwt.c:40
ISCALE_LINEAR
@ ISCALE_LINEAR
Definition: avf_showcwt.c:54
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:97
IntensityScale
IntensityScale
Definition: avf_showcwt.c:52
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ShowCWTContext::frequency_scale
int frequency_scale
Definition: avf_showcwt.c:120
FSCALE_FM
@ FSCALE_FM
Definition: avf_showcwt.c:48
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:435
DIRECTION_LR
@ DIRECTION_LR
Definition: avf_showcwt.c:62
out
FILE * out
Definition: movenc.c:54
run_channels_cwt_prepare
static int run_channels_cwt_prepare(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: avf_showcwt.c:1229
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:258
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:673
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:261
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FSCALE_ERBS
@ FSCALE_ERBS
Definition: avf_showcwt.c:44
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AVTXContext
Definition: tx_priv.h:235
atan2f
#define atan2f(y, x)
Definition: libm.h:45
ShowCWTContext::input_sample_count
int input_sample_count
Definition: avf_showcwt.c:116
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ShowCWTContext::rate_str
char * rate_str
Definition: avf_showcwt.c:80
ShowCWTContext::itx_fn
av_tx_fn itx_fn
Definition: avf_showcwt.c:84
ShowCWTContext::bar_size
int bar_size
Definition: avf_showcwt.c:125
ff_clz
#define ff_clz
Definition: intmath.h:143
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:621
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
FSCALE_SQRT
@ FSCALE_SQRT
Definition: avf_showcwt.c:45
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:456
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:683
FSCALE_LOG
@ FSCALE_LOG
Definition: avf_showcwt.c:41
M_PI_2
#define M_PI_2
Definition: mathematics.h:73
ISCALE_LOG
@ ISCALE_LOG
Definition: avf_showcwt.c:53
ShowCWTContext::bh_out
AVFrame * bh_out
Definition: avf_showcwt.c:104
AVOption
AVOption.
Definition: opt.h:346
ShowCWTContext::slide
int slide
Definition: avf_showcwt.c:110
b
#define b
Definition: input.c:41
showcwt_options
static const AVOption showcwt_options[]
Definition: avf_showcwt.c:135
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
ShowCWTContext::nb_threads
int nb_threads
Definition: avf_showcwt.c:105
expf
#define expf(x)
Definition: libm.h:283
FLAGS
#define FLAGS
Definition: avf_showcwt.c:133
float.h
AVComplexFloat
Definition: tx.h:27
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
output_frame
static int output_frame(AVFilterContext *ctx)
Definition: avf_showcwt.c:1045
video.h
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
ShowCWTContext::index
unsigned * index
Definition: avf_showcwt.c:92
ShowCWTContext::ifft_size
int ifft_size
Definition: avf_showcwt.c:85
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:902
DIRECTION_RL
@ DIRECTION_RL
Definition: avf_showcwt.c:63
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
SLIDE_REPLACE
@ SLIDE_REPLACE
Definition: avf_showcwt.c:70
AVComplexFloat::im
float im
Definition: tx.h:28
ISCALE_QDRT
@ ISCALE_QDRT
Definition: avf_showcwt.c:57
ff_avf_showcwt
const AVFilter ff_avf_showcwt
Definition: avf_showcwt.c:1329
cosf
#define cosf(x)
Definition: libm.h:78
fail
#define fail()
Definition: checkasm.h:179
log10f
#define log10f(x)
Definition: libm.h:414
NB_FSCALE
@ NB_FSCALE
Definition: avf_showcwt.c:49
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: avf_showcwt.c:185
pts
static int64_t pts
Definition: transcode_aac.c:643
ShowCWTContext::kernel_start
int * kernel_start
Definition: avf_showcwt.c:93
ShowCWTContext::output_sample_count
int output_sample_count
Definition: avf_showcwt.c:116
FSCALE_CBRT
@ FSCALE_CBRT
Definition: avf_showcwt.c:46
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:118
ShowCWTContext::frequency_band
float * frequency_band
Definition: avf_showcwt.c:90
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
float
float
Definition: af_crystalizer.c:121
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1571
s
#define s(width, name)
Definition: cbs_vp9.c:198
ShowCWTContext::logarithmic_basis
float logarithmic_basis
Definition: avf_showcwt.c:118
ShowCWTContext::eof_pts
int64_t eof_pts
Definition: avf_showcwt.c:89
ShowCWTContext::frequency_band_count
int frequency_band_count
Definition: avf_showcwt.c:117
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:678
ShowCWTContext::nb_channels
int nb_channels
Definition: avf_showcwt.c:106
fminf
float fminf(float, float)
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
Definition: tx.h:47
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ShowCWTContext::maximum_frequency
float maximum_frequency
Definition: avf_showcwt.c:121
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
ShowCWTContext::direction
int direction
Definition: avf_showcwt.c:112
ShowCWTContext
Definition: avf_showcwt.c:76
ShowCWTContext::fft_in
AVFrame * fft_in
Definition: avf_showcwt.c:96
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
ShowCWTContext::ifft_out
AVFrame * ifft_out
Definition: avf_showcwt.c:101
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
ShowCWTContext::fft
AVTXContext ** fft
Definition: avf_showcwt.c:83
arg
const char * arg
Definition: jacosubdec.c:67
ShowCWTContext::deviation
float deviation
Definition: avf_showcwt.c:123
if
if(ret)
Definition: filter_design.txt:179
ShowCWTContext::ch_out
AVFrame * ch_out
Definition: avf_showcwt.c:102
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1465
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:679
ShowCWTContext::rotation
float rotation
Definition: avf_showcwt.c:127
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ShowCWTContext::bar_ratio
float bar_ratio
Definition: avf_showcwt.c:124
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:245
ShowCWTContext::ihop_size
int ihop_size
Definition: avf_showcwt.c:113
V
#define V
Definition: avdct.c:30
ShowCWTContext::outpicref
AVFrame * outpicref
Definition: avf_showcwt.c:95
parseutils.h
ShowCWTContext::ihop_index
int ihop_index
Definition: avf_showcwt.c:114
ShowCWTContext::cache
AVFrame * cache
Definition: avf_showcwt.c:94
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:33
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
av_cpu_max_align
size_t av_cpu_max_align(void)
Get the maximum data alignment that may be required by FFmpeg.
Definition: cpu.c:268
ShowCWTContext::kernel
AVComplexFloat ** kernel
Definition: avf_showcwt.c:91
ShowCWTContext::h
int h
Definition: avf_showcwt.c:78
sinf
#define sinf(x)
Definition: libm.h:419
av_clipf
av_clipf
Definition: af_crystalizer.c:121
DIRECTION_DU
@ DIRECTION_DU
Definition: avf_showcwt.c:65
compute_kernel
static int compute_kernel(AVFilterContext *ctx)
Definition: avf_showcwt.c:723
ISCALE_CBRT
@ ISCALE_CBRT
Definition: avf_showcwt.c:56
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1392
index
int index
Definition: gxfenc.c:89
float_dsp.h
ShowCWTContext::fft_out
AVFrame * fft_out
Definition: avf_showcwt.c:97
ShowCWTContext::over
AVFrame * over
Definition: avf_showcwt.c:103
f
f
Definition: af_crystalizer.c:121
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: vvc_intra.c:291
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
powf
#define powf(x, y)
Definition: libm.h:50
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:890
cpu.h
ISCALE_SQRT
@ ISCALE_SQRT
Definition: avf_showcwt.c:55
ShowCWTContext::src_x
AVFrame * src_x
Definition: avf_showcwt.c:99
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
run_channel_cwt
static int run_channel_cwt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: avf_showcwt.c:652
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
size
int size
Definition: twinvq_data.h:10344
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
ShowCWTContext::in_pts
int64_t in_pts
Definition: avf_showcwt.c:87
AVComplexFloat::re
float re
Definition: tx.h:28
ShowCWTContext::maximum_intensity
float maximum_intensity
Definition: avf_showcwt.c:122
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ShowCWTContext::intensity_scale
int intensity_scale
Definition: avf_showcwt.c:119
DIRECTION_UD
@ DIRECTION_UD
Definition: avf_showcwt.c:64
AVFloatDSPContext
Definition: float_dsp.h:22
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2557
ShowCWTContext::mode
int mode
Definition: avf_showcwt.c:79
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: avf_showcwt.c:227
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FrequencyScale
FrequencyScale
Definition: avf_showcwt.c:39
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
ShowCWTContext::minimum_intensity
float minimum_intensity
Definition: avf_showcwt.c:122
draw_bar
static void draw_bar(ShowCWTContext *s, int y, float Y, float U, float V)
Definition: avf_showcwt.c:390
frequency_band
static float frequency_band(float *frequency_band, int frequency_band_count, float frequency_range, float frequency_offset, int frequency_scale, float deviation)
Definition: avf_showcwt.c:256
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ShowCWTContext::ifft
AVTXContext ** ifft
Definition: avf_showcwt.c:83
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
M_PI
#define M_PI
Definition: mathematics.h:67
Y
#define Y
Definition: boxblur.h:37
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:294
NB_DIRECTION
@ NB_DIRECTION
Definition: avf_showcwt.c:66
draw
static int draw(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: avf_showcwt.c:451
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
ShowCWTContext::nb_consumed_samples
int nb_consumed_samples
Definition: avf_showcwt.c:107
av_parse_video_rate
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:181
ShowCWTContext::output_padding_size
int output_padding_size
Definition: avf_showcwt.c:115
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:424
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
ShowCWTContext::kernel_stop
int * kernel_stop
Definition: avf_showcwt.c:93
run_channel_cwt_prepare
static int run_channel_cwt_prepare(AVFilterContext *ctx, void *arg, int jobnr, int ch)
Definition: avf_showcwt.c:346
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:405
DirectionMode
DirectionMode
Definition: avf_showcwt.c:61
src2
const pixel * src2
Definition: h264pred_template.c:422
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
NB_SLIDE
@ NB_SLIDE
Definition: avf_showcwt.c:73
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:825
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cbrtf
static av_always_inline float cbrtf(float x)
Definition: libm.h:61
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(showcwt)
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
ShowCWTContext::tx_fn
av_tx_fn tx_fn
Definition: avf_showcwt.c:84
ShowCWTContext::sono_size
int sono_size
Definition: avf_showcwt.c:126
OFFSET
#define OFFSET(x)
Definition: avf_showcwt.c:132
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1420
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
ShowCWTContext::minimum_frequency
float minimum_frequency
Definition: avf_showcwt.c:121
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
activate
static int activate(AVFilterContext *ctx)
Definition: avf_showcwt.c:1242
ShowCWTContext::auto_frame_rate
AVRational auto_frame_rate
Definition: avf_showcwt.c:81
ShowCWTContext::hop_index
int hop_index
Definition: avf_showcwt.c:114
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
ShowCWTContext::frame_rate
AVRational frame_rate
Definition: avf_showcwt.c:82
NB_ISCALE
@ NB_ISCALE
Definition: avf_showcwt.c:58
ShowCWTContext::fdsp
AVFloatDSPContext * fdsp
Definition: avf_showcwt.c:129
ShowCWTContext::pos
int pos
Definition: avf_showcwt.c:86
config_output
static int config_output(AVFilterLink *outlink)
Definition: avf_showcwt.c:812
U
#define U(x)
Definition: vpx_arith.h:37
FSCALE_QDRT
@ FSCALE_QDRT
Definition: avf_showcwt.c:47
SLIDE_SCROLL
@ SLIDE_SCROLL
Definition: avf_showcwt.c:71
ShowCWTContext::fft_size
int fft_size
Definition: avf_showcwt.c:85
ShowCWTContext::pps
int pps
Definition: avf_showcwt.c:108
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:606
status
ov_status_e status
Definition: dnn_backend_openvino.c:120
channel_layout.h
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
DRAW_BAR_COLOR
#define DRAW_BAR_COLOR(x)
Definition: avf_showcwt.c:376
av_clip_uint8
#define av_clip_uint8
Definition: common.h:104
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
factor
static const int factor[16]
Definition: vf_pp7.c:78
ShowCWTContext::old_pts
int64_t old_pts
Definition: avf_showcwt.c:88
ShowCWTContext::hop_size
int hop_size
Definition: avf_showcwt.c:113
ShowCWTContext::eof
int eof
Definition: avf_showcwt.c:109
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:510
ShowCWTContext::w
int w
Definition: avf_showcwt.c:78
FSCALE_BARK
@ FSCALE_BARK
Definition: avf_showcwt.c:42
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
ShowCWTContext::new_frame
int new_frame
Definition: avf_showcwt.c:111
ShowCWTContext::input_padding_size
int input_padding_size
Definition: avf_showcwt.c:115
ShowCWTContext::ifft_in
AVFrame * ifft_in
Definition: avf_showcwt.c:100
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
FSCALE_MEL
@ FSCALE_MEL
Definition: avf_showcwt.c:43
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
showcwt_outputs
static const AVFilterPad showcwt_outputs[]
Definition: avf_showcwt.c:1321
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
remap_log
static float remap_log(ShowCWTContext *s, float value, int iscale, float log_factor)
Definition: avf_showcwt.c:313
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
SlideMode
SlideMode
Definition: avf_ahistogram.c:33
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:234
tx.h
min
float min
Definition: vorbis_enc_data.h:429
SLIDE_FRAME
@ SLIDE_FRAME
Definition: avf_showcwt.c:72