Go to the documentation of this file.
   37     if (
map->src.prim != 
map->dst.prim || 
map->src.trc != 
map->dst.trc)
 
   44     switch (
map->intent) {
 
   60 static const float hull(
float I)
 
   62     return ((I - 6.0
f) * I + 9.0
f) * I;
 
  121     const float Imax = 
pq_oetf(Lw);
 
  155         .I = 0.4000f * Lp + 0.4000f * Mp + 0.2000f * Sp,
 
  156         .P = 4.4550f * Lp - 4.8510f * Mp + 0.3960f * Sp,
 
  157         .T = 0.8056f * Lp + 0.3572f * Mp - 1.1628f * Sp,
 
  163     const float Lp = 
c.I + 0.0975689f * 
c.P + 0.205226f * 
c.T;
 
  164     const float Mp = 
c.I - 0.1138760f * 
c.P + 0.133217f * 
c.T;
 
  165     const float Sp = 
c.I + 0.0326151f * 
c.P - 0.676887f * 
c.T;
 
  184     const float min_rgb = gamut.
Lb - 1e-4
f;
 
  185     const float max_rgb = gamut.
Lw + 1e-2
f;
 
  186     const float Lp = 
c.I + 0.0975689f * 
c.P + 0.205226f * 
c.T;
 
  187     const float Mp = 
c.I - 0.1138760f * 
c.P + 0.133217f * 
c.T;
 
  188     const float Sp = 
c.I + 0.0326151f * 
c.P - 0.676887f * 
c.T;
 
  189     if (Lp < gamut.Imin || Lp > gamut.
Imax ||
 
  190         Mp < gamut.Imin || Mp > gamut.
Imax ||
 
  191         Sp < gamut.Imin || Sp > gamut.
Imax)
 
  210         return rgb.R >= min_rgb && 
rgb.R <= max_rgb &&
 
  211                rgb.G >= min_rgb && 
rgb.G <= max_rgb &&
 
  212                rgb.B >= min_rgb && 
rgb.B <= max_rgb;
 
  223         return (
ICh) { .
I = gamut.
Imin, .C = 0, .h = 
h };
 
  224     else if (I >= gamut.
Imax)
 
  225         return (
ICh) { .I = gamut.
Imax, .C = 0, .h = 
h };
 
  228         ICh res = { .
I = I, .C = (Cmin + Cmax) / 2, .
h = 
h };
 
  235             res.
C = (Cmin + Cmax) / 2;
 
  236         } 
while (Cmax - Cmin > maxDI);
 
  245     static const float invphi = 0.6180339887498948f;
 
  246     static const float invphi2 = 0.38196601125010515f;
 
  248     ICh lo = { .
I = gamut.
Imin, .h = hue };
 
  249     ICh hi = { .
I = gamut.
Imax, .h = hue };
 
  250     float de = hi.
I - lo.
I;
 
  251     ICh a = { .I = lo.
I + invphi2 * de };
 
  252     ICh b = { .I = lo.
I + invphi  * de };
 
  261             a.I = lo.
I + invphi2 * de;
 
  266             b.I = lo.
I + invphi * de;
 
  271     return a.C > 
b.C ? 
a : 
b;
 
  283     if (x <= j || peak <= 1.0)
 
  287     a = -j*j * (peak - 1.0f) / (j*j - 2.0
f * j + peak);
 
  288     b = (j*j - 2.0f * j * peak + peak) / 
fmaxf(1e-6
f, peak - 1.0
f);
 
  291     return scale * (x + 
a) / (x + 
b) * target;
 
  314     const float Imin = gamut.
Imin;
 
  315     const float Irel = 
fmaxf((ich.
I - Imin) / (gamut.
peak.
I - Imin), 0.0f);
 
  322     float lo = 0.0f, hi = 1.0f, x = 0.5f;
 
  326     if (ipt.
I <= gamut.
Imin)
 
  343         x = (lo + hi) / 2.0
f;
 
  344     } 
while (hi - lo > maxDI);
 
  382                              float dst_max, 
float dst_min,
 
  383                              float *out_src_knee, 
float *out_dst_knee)
 
  388     const float src_knee_min = 
fmixf(src_min, src_max, min_knee);
 
  389     const float src_knee_max = 
fmixf(src_min, src_max, max_knee);
 
  390     const float dst_knee_min = 
fmixf(dst_min, dst_max, min_knee);
 
  391     const float dst_knee_max = 
fmixf(dst_min, dst_max, max_knee);
 
  392     float src_knee, target, adapted, tuning, adaptation, dst_knee;
 
  395     src_knee = src_avg ? src_avg : 
fmixf(src_min, src_max, def_knee);
 
  396     src_knee = 
av_clipf(src_knee, src_knee_min, src_knee_max);
 
  399     target = (src_knee - src_min) / (src_max - src_min);
 
  400     adapted = 
fmixf(dst_min, dst_max, target);
 
  413     dst_knee = 
fmixf(src_knee, adapted, adaptation);
 
  414     dst_knee = 
av_clipf(dst_knee, dst_knee_min, dst_knee_max);
 
  416     *out_src_knee = src_knee;
 
  417     *out_dst_knee = dst_knee;
 
  422     const float dst_min = 
ctx->dst.Imin;
 
  423     const float dst_max = 
ctx->dst.Imax;
 
  424     const float src_min = 
ctx->src.Imin;
 
  425     const float src_max = dynamic ? 
ctx->src.Imax_frame : 
ctx->src.Imax;
 
  426     const float src_avg = dynamic ? 
ctx->src.Iavg_frame : 0.0f;
 
  427     float slope, ratio, in_min, in_max, out_min, out_max, t;
 
  429     switch (
ctx->map.intent) {
 
  432                          &
ctx->src_knee, &
ctx->dst_knee);
 
  435         slope = (
ctx->dst_knee - dst_min) / (
ctx->src_knee - src_min);
 
  443         ratio = src_max / dst_max - 1.0f;
 
  448         in_min  = src_min - 
ctx->src_knee;
 
  449         in_max  = src_max - 
ctx->src_knee;
 
  450         out_min = dst_min - 
ctx->dst_knee;
 
  451         out_max = dst_max - 
ctx->dst_knee;
 
  459         ctx->Pa = (out_min - slope * in_min) / (in_min * in_min);
 
  469         t = 2 * in_max * in_max;
 
  470         ctx->Qa = (slope * in_max - out_max) / (in_max * t);
 
  471         ctx->Qb = -3 * (slope * in_max - out_max) / t;
 
  476         ctx->I_scale = (dst_max - dst_min) / (src_max - src_min);
 
  477         ctx->I_offset = dst_min - src_min * 
ctx->I_scale;
 
  481         ctx->I_scale = src_max / (src_max - src_min) /
 
  482                       (dst_max / (dst_max - dst_min));
 
  483         ctx->I_offset = dst_min - src_min * 
ctx->I_scale;
 
  488         ctx->I_offset = 0.0f;
 
  495     float I = ipt.
I, desat;
 
  498         const float Pa = 
ctx->Pa, Pb = 
ctx->Pb;
 
  499         const float Qa = 
ctx->Qa, Qb = 
ctx->Qb, Qc = 
ctx->Qc;
 
  501         I = I > 0 ? ((Qa * I + Qb) * I + Qc) * I : (Pa * I + Pb) * I;
 
  504         I = 
ctx->I_scale * I + 
ctx->I_offset;
 
  528     const float maxC = 
fmaxf(
ctx->tmp.peak.C, 
ctx->dst.peak.C);
 
  574     switch (
ctx->map.intent) {
 
  593     const int slice_stride = 
ctx.size_input * 
ctx.size_input;
 
  597     const int output_slice_h = (
ctx.size_output_PT + nb_jobs - 1) / nb_jobs;
 
  598     const int output_start   = jobnr * output_slice_h;
 
  599     const int output_stride  = 
ctx.size_output_PT * 
ctx.size_output_I;
 
  600     const int output_end     = 
FFMIN((jobnr + 1) * output_slice_h, 
ctx.size_output_PT);
 
  603     const float I_scale   = 1.0f / (
ctx.src.Imax - 
ctx.src.Imin);
 
  604     const float I_offset  = -
ctx.src.Imin * I_scale;
 
  605     const float PT_offset = (
float) (1 << 15) / (UINT16_MAX - 1);
 
  607     const float input_scale     = 1.0f / (
ctx.size_input - 1);
 
  608     const float output_scale_PT = 1.0f / (
ctx.size_output_PT - 1);
 
  609     const float output_scale_I  = (
ctx.tmp.Imax - 
ctx.tmp.Imin) /
 
  610                                   (
ctx.size_output_I - 1);
 
  613         const float B = input_scale * Bx;
 
  614         for (
int Gx = 0; Gx < 
ctx.size_input; Gx++) {
 
  615             const float G = input_scale * Gx;
 
  616             for (
int Rx = 0; Rx < 
ctx.size_input; Rx++) {
 
  617                 double c[3] = { input_scale * Rx, 
G, 
B };
 
  636                     ipt = 
ctx.adapt_colors(&
ctx, ipt);
 
  657     for (
int Tx = output_start; Tx < output_end; Tx++) {
 
  658         const float T = output_scale_PT * Tx - PT_offset;
 
  659         for (
int Px = 0; Px < 
ctx.size_output_PT; Px++) {
 
  660             const float P = output_scale_PT * Px - PT_offset;
 
  663             for (
int Ix = 0; Ix < 
ctx.size_output_I; Ix++) {
 
  664                 const float I = output_scale_I * Ix + 
ctx.tmp.Imin;
 
  685                                       int size_input, 
int size_I, 
int size_PT,
 
  695         .size_input     = size_input,
 
  696         .size_output_I  = size_I,
 
  697         .size_output_PT = size_PT,
 
  702     switch (
ctx.map.intent) {
 
  707     default: 
return AVERROR(EINVAL);
 
  719     ctx.tmp.Imin = 
ctx.dst.Imin;
 
  720     ctx.tmp.Imax = 
ctx.dst.Imax;
 
  737     num_slices = (
ctx.size_input + 
ctx.slice_size - 1) / 
ctx.slice_size;
 
  751     const float src_scale  = (
ctx.src.Imax - 
ctx.src.Imin) / (
size - 1);
 
  752     const float src_offset = 
ctx.src.Imin;
 
  753     const float dst_scale  = 1.0f / (
ctx.dst.Imax - 
ctx.dst.Imin);
 
  754     const float dst_offset = -
ctx.dst.Imin * dst_scale;
 
  758     for (
int i = 0; 
i < 
size; 
i++) {
 
  759         const float I = src_scale * 
i + src_offset;
 
  
static const float hull(float I)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void generate_slice(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
#define PERCEPTUAL_CONTRAST
#define PERCEPTUAL_KNEE_MIN
@ SWS_INTENT_SATURATION
Saturation mapping.
AVWhitepointCoefficients wp
Struct that contains both white point location and primaries location, providing the complete descrip...
av_csp_eotf_function eotf
static IPT saturation(const CmsCtx *ctx, IPT ipt)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static ICh saturate(float hue, Gamut gamut)
SwsMatrix3x3 lms2encoding
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
Execute slice threading.
static float smoothstepf(float edge0, float edge1, float x)
void ff_sws_matrix3x3_apply(const SwsMatrix3x3 *mat, float vec[3])
#define PERCEPTUAL_KNEE_DEF
#define PERCEPTUAL_STRENGTH
struct AVSliceThread AVSliceThread
static IPT clip_gamma(IPT ipt, float gamma, Gamut gamut)
AVPrimaryCoefficients gamut
static av_always_inline RGB ipt2rgb(IPT c, const SwsMatrix3x3 lms2rgb)
static av_always_inline IPT tone_map_apply(const CmsCtx *ctx, IPT ipt)
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
int avpriv_slicethread_create(AVSliceThread **pctx, void *priv, void(*worker_func)(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads), void(*main_func)(void *priv), int nb_threads)
Create slice threading context.
static av_always_inline void update_hue_peaks(CmsCtx *ctx, float P, float T)
enum AVColorTransferCharacteristic trc
static const float lms2rgb[3][3]
IPT(* adapt_colors)(const CmsCtx *ctx, IPT ipt)
int ff_sws_color_map_generate_dynamic(v3u16_t *input, v3u16_t *output, int size_input, int size_I, int size_PT, const SwsColorMap *map)
Generates a split pair of 3DLUTS, going to IPT and back, allowing an arbitrary dynamic EETF to be nes...
void ff_sws_tone_map_generate(v2u16_t *lut, int size, const SwsColorMap *map)
Generate a 1D LUT of size size adapting intensity (I) levels from the source to the destination color...
const AVColorPrimariesDesc * av_csp_primaries_desc_from_id(enum AVColorPrimaries prm)
Retrieves a complete gamut description from an enum constant describing the color primaries.
av_csp_eotf_function eotf_inv
static double av_q2d(AVRational a)
Convert an AVRational to a double.
float fminf(float, float)
static float pq_eotf(float x)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
SwsMatrix3x3 ff_sws_ipt_rgb2lms(const AVColorPrimariesDesc *prim)
int ff_sws_color_map_generate_static(v3u16_t *lut, int size, const SwsColorMap *map)
Generates a single end-to-end color mapping 3DLUT embedding a static tone mapping curve.
static IPT absolute(const CmsCtx *ctx, IPT ipt)
SwsMatrix3x3 ff_sws_ipt_lms2rgb(const AVColorPrimariesDesc *prim)
@ SWS_INTENT_PERCEPTUAL
Perceptual tone mapping.
static bool ingamut(IPT c, Gamut gamut)
static __device__ float sqrtf(float a)
#define COLORIMETRIC_GAMMA
@ SWS_INTENT_ABSOLUTE_COLORIMETRIC
Absolute colorimetric clipping.
Struct containing chromaticity x and y values for the standard CIE 1931 chromaticity definition.
SwsMatrix3x3 ff_sws_get_adaptation(const AVPrimaryCoefficients *prim, AVWhitepointCoefficients from, AVWhitepointCoefficients to)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
static IPT relative(const CmsCtx *ctx, IPT ipt)
float fmaxf(float, float)
#define PERCEPTUAL_ADAPTATION
#define PERCEPTUAL_KNEE_MAX
bool ff_prim_superset(const AVPrimaryCoefficients *a, const AVPrimaryCoefficients *b)
Returns true if 'b' is entirely contained in 'a'.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
static float softclip(float value, float source, float target)
static av_always_inline ICh ipt2ich(IPT c)
#define i(width, name, range_min, range_max)
static ICh mix_exp(ICh c, float x, float gamma, float base)
Something like fmixf(base, c, x) but follows an exponential curve, note that this can be used to exte...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static ICh desat_bounded(float I, float h, float Cmin, float Cmax, Gamut gamut)
static void st2094_pick_knee(float src_max, float src_min, float src_avg, float dst_max, float dst_min, float *out_src_knee, float *out_dst_knee)
Helper function to pick a knee point based on the * HDR10+ brightness metadata and scene brightness a...
void(* av_csp_eotf_function)(double Lw, double Lb, double c[3])
Function pointer representing an ITU EOTF transfer for a given reference display configuration.
static float scale_gamma(float gamma, ICh ich, Gamut gamut)
Drop gamma for colors approaching black and achromatic to avoid numerical instabilities,...
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
static const float rgb2lms[3][3]
@ SWS_INTENT_RELATIVE_COLORIMETRIC
Relative colorimetric clipping.
static IPT perceptual(const CmsCtx *ctx, IPT ipt)
static const float maxDelta
SwsMatrix3x3 encoding2lms
bool ff_sws_color_map_noop(const SwsColorMap *map)
Returns true if the given color map is a semantic no-op - that is, the overall RGB end to end transfo...
static void tone_map_setup(CmsCtx *ctx, bool dynamic)
static float pq_oetf(float x)
static Gamut gamut_from_colorspace(SwsColor fmt)
static av_always_inline av_const uint16_t av_round16f(float x)
const VDPAUPixFmtMap * map
#define PERCEPTUAL_DEADZONE
static void scale(int *out, const int *in, const int w, const int h, const int shift)
AVPrimaryCoefficients prim
static av_always_inline IPT ich2ipt(ICh c)
av_csp_eotf_function av_csp_itu_eotf(enum AVColorTransferCharacteristic trc)
Returns the ITU EOTF corresponding to a given TRC.
static const SheerTable rgb[2]
void avpriv_slicethread_free(AVSliceThread **pctx)
Destroy slice threading context.
float(* tone_map)(const CmsCtx *ctx, float I)
static av_always_inline IPT rgb2ipt(RGB c, const SwsMatrix3x3 rgb2lms)
enum AVColorPrimaries prim
av_csp_eotf_function av_csp_itu_eotf_inv(enum AVColorTransferCharacteristic trc)
Returns the mathematical inverse of the corresponding EOTF.