Go to the documentation of this file.
30 #define MEASURE_ALL UINT_MAX
31 #define MEASURE_NONE 0
33 #define MEASURE_DC_OFFSET (1 << 0)
34 #define MEASURE_MIN_LEVEL (1 << 1)
35 #define MEASURE_MAX_LEVEL (1 << 2)
36 #define MEASURE_MIN_DIFFERENCE (1 << 3)
37 #define MEASURE_MAX_DIFFERENCE (1 << 4)
38 #define MEASURE_MEAN_DIFFERENCE (1 << 5)
39 #define MEASURE_RMS_DIFFERENCE (1 << 6)
40 #define MEASURE_PEAK_LEVEL (1 << 7)
41 #define MEASURE_RMS_LEVEL (1 << 8)
42 #define MEASURE_RMS_PEAK (1 << 9)
43 #define MEASURE_RMS_TROUGH (1 << 10)
44 #define MEASURE_CREST_FACTOR (1 << 11)
45 #define MEASURE_FLAT_FACTOR (1 << 12)
46 #define MEASURE_PEAK_COUNT (1 << 13)
47 #define MEASURE_BIT_DEPTH (1 << 14)
48 #define MEASURE_DYNAMIC_RANGE (1 << 15)
49 #define MEASURE_ZERO_CROSSINGS (1 << 16)
50 #define MEASURE_ZERO_CROSSINGS_RATE (1 << 17)
51 #define MEASURE_NUMBER_OF_SAMPLES (1 << 18)
52 #define MEASURE_NUMBER_OF_NANS (1 << 19)
53 #define MEASURE_NUMBER_OF_INFS (1 << 20)
54 #define MEASURE_NUMBER_OF_DENORMALS (1 << 21)
56 #define MEASURE_MINMAXPEAK (MEASURE_MIN_LEVEL | MEASURE_MAX_LEVEL | MEASURE_PEAK_LEVEL)
97 #define OFFSET(x) offsetof(AudioStatsContext, x)
98 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
173 for (
c = 0;
c <
s->nb_channels;
c++) {
191 p->
imask = 0xFFFFFFFFFFFFFFFF;
212 s->tc_samples = 5 *
s->time_constant * outlink->
sample_rate + .5;
228 unsigned result =
s->maxbitdepth;
258 }
else if (d == p->
min) {
261 }
else if (p->
last == p->
min) {
274 }
else if (d == p->
max) {
277 }
else if (p->
last == p->
max) {
308 int type = fpclassify(d);
317 int type = fpclassify(d);
325 const char *
fmt,
double val)
332 snprintf(key2,
sizeof(key2),
"lavfi.astats.%d.%s", chan,
key);
334 snprintf(key2,
sizeof(key2),
"lavfi.astats.%s",
key);
338 #define LINEAR_TO_DB(x) (log10(x) * 20)
342 uint64_t
mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0;
343 uint64_t nb_nans = 0, nb_infs = 0, nb_denormals = 0;
344 double min_runs = 0, max_runs = 0,
345 min = DBL_MAX,
max =-DBL_MAX, min_diff = DBL_MAX, max_diff = 0,
346 nmin = DBL_MAX, nmax =-DBL_MAX,
352 min_sigma_x2 = DBL_MAX,
353 max_sigma_x2 =-DBL_MAX;
357 for (
c = 0;
c <
s->nb_channels;
c++) {
385 if (fabs(p->
sigma_x) > fabs(max_sigma_x))
418 set_meta(metadata,
c + 1,
"Bit_depth",
"%f", depth.
num);
419 set_meta(metadata,
c + 1,
"Bit_depth2",
"%f", depth.
den);
436 set_meta(metadata, 0,
"Overall.DC_offset",
"%f", max_sigma_x / (nb_samples /
s->nb_channels));
438 set_meta(metadata, 0,
"Overall.Min_level",
"%f",
min);
440 set_meta(metadata, 0,
"Overall.Max_level",
"%f",
max);
442 set_meta(metadata, 0,
"Overall.Min_difference",
"%f", min_diff);
444 set_meta(metadata, 0,
"Overall.Max_difference",
"%f", max_diff);
446 set_meta(metadata, 0,
"Overall.Mean_difference",
"%f", diff1_sum / (nb_samples -
s->nb_channels));
448 set_meta(metadata, 0,
"Overall.RMS_difference",
"%f", sqrt(diff1_sum_x2 / (nb_samples -
s->nb_channels)));
458 set_meta(metadata, 0,
"Overall.Flat_factor",
"%f",
LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
460 set_meta(metadata, 0,
"Overall.Peak_count",
"%f", (
float)(min_count + max_count) / (
double)
s->nb_channels);
463 set_meta(metadata, 0,
"Overall.Bit_depth",
"%f", depth.
num);
464 set_meta(metadata, 0,
"Overall.Bit_depth2",
"%f", depth.
den);
467 set_meta(metadata, 0,
"Overall.Number_of_samples",
"%f", nb_samples /
s->nb_channels);
469 set_meta(metadata, 0,
"Number of NaNs",
"%f", nb_nans / (
float)
s->nb_channels);
471 set_meta(metadata, 0,
"Number of Infs",
"%f", nb_infs / (
float)
s->nb_channels);
473 set_meta(metadata, 0,
"Number of denormals",
"%f", nb_denormals / (
float)
s->nb_channels);
476 #define UPDATE_STATS_P(type, update_func, update_float, channel_func) \
477 for (int c = 0; c < channels; c++) { \
478 ChannelStats *p = &s->chstats[c]; \
479 const type *src = (const type *)data[c]; \
480 const type * const srcend = src + samples; \
481 for (; src < srcend; src++) { \
488 #define UPDATE_STATS_I(type, update_func, update_float, channel_func) \
489 for (int c = 0; c < channels; c++) { \
490 ChannelStats *p = &s->chstats[c]; \
491 const type *src = (const type *)data[0]; \
492 const type * const srcend = src + samples * channels; \
493 for (src += c; src < srcend; src += channels) { \
500 #define UPDATE_STATS(planar, type, sample, normalizer_suffix, int_sample) \
501 if ((s->measure_overall | s->measure_perchannel) & ~MEASURE_MINMAXPEAK) { \
502 UPDATE_STATS_##planar(type, update_stat(s, p, sample, sample normalizer_suffix, int_sample), s->is_float ? update_float_stat(s, p, sample) : s->is_double ? update_double_stat(s, p, sample) : (void)NULL, ); \
504 UPDATE_STATS_##planar(type, update_minmax(s, p, sample), , p->nmin = p->min normalizer_suffix; p->nmax = p->max normalizer_suffix;); \
515 if (
s->reset_count > 0) {
516 if (
s->nb_frames >=
s->reset_count) {
565 uint64_t
mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0;
566 uint64_t nb_nans = 0, nb_infs = 0, nb_denormals = 0;
567 double min_runs = 0, max_runs = 0,
568 min = DBL_MAX,
max =-DBL_MAX, min_diff = DBL_MAX, max_diff = 0,
569 nmin = DBL_MAX, nmax =-DBL_MAX,
575 min_sigma_x2 = DBL_MAX,
576 max_sigma_x2 =-DBL_MAX;
580 for (
c = 0;
c <
s->nb_channels;
c++) {
608 if (fabs(p->
sigma_x) > fabs(max_sigma_x))
673 av_log(
ctx,
AV_LOG_INFO,
"RMS difference: %f\n", sqrt(diff1_sum_x2 / (nb_samples -
s->nb_channels)));
681 if (min_sigma_x2 != 1)
733 .priv_class = &astats_class,
@ AV_SAMPLE_FMT_FLTP
float, planar
A list of supported channel layouts.
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
static void set_meta(AVDictionary **metadata, int chan, const char *key, const char *fmt, double val)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define MEASURE_PEAK_COUNT
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static enum AVSampleFormat sample_fmts[]
static const AVOption astats_options[]
enum MovChannelLayoutTag * layouts
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
#define MEASURE_RMS_TROUGH
#define MEASURE_MIN_LEVEL
This structure describes decoded (raw) audio or video data.
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
static int config_output(AVFilterLink *outlink)
const char * name
Filter name.
A link between two filters.
static void reset_stats(AudioStatsContext *s)
int channels
Number of channels.
static void update_stat(AudioStatsContext *s, ChannelStats *p, double d, double nd, int64_t i)
void * priv
private data for use by the filter
static void update_minmax(AudioStatsContext *s, ChannelStats *p, double d)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
@ AV_SAMPLE_FMT_S64P
signed 64 bits, planar
A filter pad used for either input or output.
static const uint16_t mask[17]
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
static const AVFilterPad outputs[]
#define UPDATE_STATS(planar, type, sample, normalizer_suffix, int_sample)
static void update_double_stat(AudioStatsContext *s, ChannelStats *p, double d)
static void update_float_stat(AudioStatsContext *s, ChannelStats *p, float d)
static av_cold void uninit(AVFilterContext *ctx)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define MEASURE_RMS_DIFFERENCE
Describe the class of an AVClass context structure.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
#define MEASURE_ZERO_CROSSINGS_RATE
#define MEASURE_DC_OFFSET
Rational number (pair of numerator and denominator).
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define MEASURE_FLAT_FACTOR
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define MEASURE_DYNAMIC_RANGE
int format
agreed upon media format
AVFilterContext * src
source filter
const char const char void * val
#define MEASURE_NUMBER_OF_SAMPLES
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
#define AV_LOG_INFO
Standard information.
static const AVFilterPad astats_inputs[]
int sample_rate
samples per second
#define MEASURE_MEAN_DIFFERENCE
#define i(width, name, range_min, range_max)
#define MEASURE_NUMBER_OF_DENORMALS
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
AVSampleFormat
Audio sample formats.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static int query_formats(AVFilterContext *ctx)
#define MEASURE_PEAK_LEVEL
@ AV_SAMPLE_FMT_S16
signed 16 bits
static void set_metadata(AudioStatsContext *s, AVDictionary **metadata)
const char * name
Pad name.
#define MEASURE_RMS_LEVEL
#define MEASURE_NUMBER_OF_NANS
#define MEASURE_BIT_DEPTH
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
@ AV_SAMPLE_FMT_DBLP
double, planar
Filter the word “frame” indicates either a video frame or a group of audio samples
#define MEASURE_NUMBER_OF_INFS
AVFILTER_DEFINE_CLASS(astats)
static void print_stats(AVFilterContext *ctx)
#define MEASURE_MAX_LEVEL
#define MEASURE_MIN_DIFFERENCE
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
#define MEASURE_CREST_FACTOR
@ AV_SAMPLE_FMT_DBL
double
@ AV_SAMPLE_FMT_S32
signed 32 bits
#define MEASURE_MAX_DIFFERENCE
static const AVFilterPad astats_outputs[]
@ AV_SAMPLE_FMT_S64
signed 64 bits
#define MEASURE_ZERO_CROSSINGS