FFmpeg
vf_overlay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  * Copyright (c) 2010 Baptiste Coudurier
4  * Copyright (c) 2007 Bobby Bingham
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * overlay one video on top of another
26  */
27 
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "libavutil/common.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timestamp.h"
38 #include "filters.h"
39 #include "drawutils.h"
40 #include "framesync.h"
41 #include "video.h"
42 #include "vf_overlay.h"
43 
44 typedef struct ThreadData {
45  AVFrame *dst, *src;
46 } ThreadData;
47 
48 static const char *const var_names[] = {
49  "main_w", "W", ///< width of the main video
50  "main_h", "H", ///< height of the main video
51  "overlay_w", "w", ///< width of the overlay video
52  "overlay_h", "h", ///< height of the overlay video
53  "hsub",
54  "vsub",
55  "x",
56  "y",
57  "n", ///< number of frame
58  "t", ///< timestamp expressed in seconds
59  NULL
60 };
61 
62 #define MAIN 0
63 #define OVERLAY 1
64 
65 #define R 0
66 #define G 1
67 #define B 2
68 #define A 3
69 
70 #define Y 0
71 #define U 1
72 #define V 2
73 
74 enum EvalMode {
78 };
79 
81 {
82  OverlayContext *s = ctx->priv;
83 
84  ff_framesync_uninit(&s->fs);
85  av_expr_free(s->x_pexpr); s->x_pexpr = NULL;
86  av_expr_free(s->y_pexpr); s->y_pexpr = NULL;
87 }
88 
89 static inline int normalize_xy(double d, int chroma_sub)
90 {
91  if (isnan(d))
92  return INT_MAX;
93  return (int)d & ~((1 << chroma_sub) - 1);
94 }
95 
97 {
98  OverlayContext *s = ctx->priv;
99 
100  s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
101  s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
102  /* It is necessary if x is expressed from y */
103  s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
104  s->x = normalize_xy(s->var_values[VAR_X], s->hsub);
105  s->y = normalize_xy(s->var_values[VAR_Y], s->vsub);
106 }
107 
108 static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
109 {
110  int ret;
111  AVExpr *old = NULL;
112 
113  if (*pexpr)
114  old = *pexpr;
115  ret = av_expr_parse(pexpr, expr, var_names,
116  NULL, NULL, NULL, NULL, 0, log_ctx);
117  if (ret < 0) {
118  av_log(log_ctx, AV_LOG_ERROR,
119  "Error when evaluating the expression '%s' for %s\n",
120  expr, option);
121  *pexpr = old;
122  return ret;
123  }
124 
125  av_expr_free(old);
126  return 0;
127 }
128 
129 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
130  char *res, int res_len, int flags)
131 {
132  OverlayContext *s = ctx->priv;
133  int ret;
134 
135  if (!strcmp(cmd, "x"))
136  ret = set_expr(&s->x_pexpr, args, cmd, ctx);
137  else if (!strcmp(cmd, "y"))
138  ret = set_expr(&s->y_pexpr, args, cmd, ctx);
139  else
140  ret = AVERROR(ENOSYS);
141 
142  if (ret < 0)
143  return ret;
144 
145  if (s->eval_mode == EVAL_MODE_INIT) {
146  eval_expr(ctx);
147  av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
148  s->var_values[VAR_X], s->x,
149  s->var_values[VAR_Y], s->y);
150  }
151  return ret;
152 }
153 
154 static const enum AVPixelFormat alpha_pix_fmts[] = {
159 };
160 
162  AVFilterFormatsConfig **cfg_in,
163  AVFilterFormatsConfig **cfg_out)
164 {
165  const OverlayContext *s = ctx->priv;
166 
167  /* overlay formats contains alpha, for avoiding conversion with alpha information loss */
168  static const enum AVPixelFormat main_pix_fmts_yuv420[] = {
172  };
173  static const enum AVPixelFormat overlay_pix_fmts_yuv420[] = {
175  };
176 
177  static const enum AVPixelFormat main_pix_fmts_yuv420p10[] = {
180  };
181  static const enum AVPixelFormat overlay_pix_fmts_yuv420p10[] = {
183  };
184 
185  static const enum AVPixelFormat main_pix_fmts_yuv422[] = {
187  };
188  static const enum AVPixelFormat overlay_pix_fmts_yuv422[] = {
190  };
191 
192  static const enum AVPixelFormat main_pix_fmts_yuv422p10[] = {
194  };
195  static const enum AVPixelFormat overlay_pix_fmts_yuv422p10[] = {
197  };
198 
199  static const enum AVPixelFormat main_pix_fmts_yuv444[] = {
201  };
202  static const enum AVPixelFormat overlay_pix_fmts_yuv444[] = {
204  };
205 
206  static const enum AVPixelFormat main_pix_fmts_yuv444p10[] = {
208  };
209  static const enum AVPixelFormat overlay_pix_fmts_yuv444p10[] = {
211  };
212 
213  static const enum AVPixelFormat main_pix_fmts_gbrp[] = {
215  };
216  static const enum AVPixelFormat overlay_pix_fmts_gbrp[] = {
218  };
219 
220  static const enum AVPixelFormat main_pix_fmts_rgb[] = {
225  };
226  static const enum AVPixelFormat overlay_pix_fmts_rgb[] = {
230  };
231 
232  const enum AVPixelFormat *main_formats, *overlay_formats;
234  int ret;
235 
236  switch (s->format) {
238  main_formats = main_pix_fmts_yuv420;
239  overlay_formats = overlay_pix_fmts_yuv420;
240  break;
242  main_formats = main_pix_fmts_yuv420p10;
243  overlay_formats = overlay_pix_fmts_yuv420p10;
244  break;
246  main_formats = main_pix_fmts_yuv422;
247  overlay_formats = overlay_pix_fmts_yuv422;
248  break;
250  main_formats = main_pix_fmts_yuv422p10;
251  overlay_formats = overlay_pix_fmts_yuv422p10;
252  break;
254  main_formats = main_pix_fmts_yuv444;
255  overlay_formats = overlay_pix_fmts_yuv444;
256  break;
258  main_formats = main_pix_fmts_yuv444p10;
259  overlay_formats = overlay_pix_fmts_yuv444p10;
260  break;
261  case OVERLAY_FORMAT_RGB:
262  main_formats = main_pix_fmts_rgb;
263  overlay_formats = overlay_pix_fmts_rgb;
264  break;
265  case OVERLAY_FORMAT_GBRP:
266  main_formats = main_pix_fmts_gbrp;
267  overlay_formats = overlay_pix_fmts_gbrp;
268  break;
269  case OVERLAY_FORMAT_AUTO:
270  return ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out, alpha_pix_fmts);
271  default:
272  av_assert0(0);
273  }
274 
275  formats = ff_make_format_list(main_formats);
276  if ((ret = ff_formats_ref(formats, &cfg_in[MAIN]->formats)) < 0 ||
277  (ret = ff_formats_ref(formats, &cfg_out[MAIN]->formats)) < 0)
278  return ret;
279 
280  return ff_formats_ref(ff_make_format_list(overlay_formats),
281  &cfg_in[OVERLAY]->formats);
282 }
283 
285 {
286  AVFilterContext *ctx = inlink->dst;
287  OverlayContext *s = inlink->dst->priv;
288  int ret;
289  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
290 
291  av_image_fill_max_pixsteps(s->overlay_pix_step, NULL, pix_desc);
292 
293  /* Finish the configuration by evaluating the expressions
294  now when both inputs are configured. */
295  s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
296  s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
297  s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
298  s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
299  s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
300  s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
301  s->var_values[VAR_X] = NAN;
302  s->var_values[VAR_Y] = NAN;
303  s->var_values[VAR_N] = 0;
304  s->var_values[VAR_T] = NAN;
305 
306  if ((ret = set_expr(&s->x_pexpr, s->x_expr, "x", ctx)) < 0 ||
307  (ret = set_expr(&s->y_pexpr, s->y_expr, "y", ctx)) < 0)
308  return ret;
309 
310  s->overlay_is_packed_rgb =
311  ff_fill_rgba_map(s->overlay_rgba_map, inlink->format) >= 0;
312  s->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
313 
314  if (s->eval_mode == EVAL_MODE_INIT) {
315  eval_expr(ctx);
316  av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
317  s->var_values[VAR_X], s->x,
318  s->var_values[VAR_Y], s->y);
319  }
320 
322  "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s\n",
323  ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
324  av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
325  ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
326  av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format));
327  return 0;
328 }
329 
330 static int config_output(AVFilterLink *outlink)
331 {
332  AVFilterContext *ctx = outlink->src;
333  OverlayContext *s = ctx->priv;
334  int ret;
335 
336  if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0)
337  return ret;
338 
339  outlink->w = ctx->inputs[MAIN]->w;
340  outlink->h = ctx->inputs[MAIN]->h;
341  outlink->time_base = ctx->inputs[MAIN]->time_base;
342 
343  return ff_framesync_configure(&s->fs);
344 }
345 
346 // divide by 255 and round to nearest
347 // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
348 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
349 
350 // calculate the unpremultiplied alpha, applying the general equation:
351 // alpha = alpha_overlay / ( (alpha_main + alpha_overlay) - (alpha_main * alpha_overlay) )
352 // (((x) << 16) - ((x) << 9) + (x)) is a faster version of: 255 * 255 * x
353 // ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)) is a faster version of: 255 * (x + y)
354 #define UNPREMULTIPLY_ALPHA(x, y) ((((x) << 16) - ((x) << 9) + (x)) / ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)))
355 
356 /**
357  * Blend image in src to destination buffer dst at position (x, y).
358  */
359 
361  AVFrame *dst, const AVFrame *src,
362  int main_has_alpha, int x, int y,
363  int is_straight, int jobnr, int nb_jobs)
364 {
365  OverlayContext *s = ctx->priv;
366  int i, imax, j, jmax;
367  const int src_w = src->width;
368  const int src_h = src->height;
369  const int dst_w = dst->width;
370  const int dst_h = dst->height;
371  uint8_t alpha; ///< the amount of overlay to blend on to main
372  const int dr = s->main_rgba_map[R];
373  const int dg = s->main_rgba_map[G];
374  const int db = s->main_rgba_map[B];
375  const int da = s->main_rgba_map[A];
376  const int dstep = s->main_pix_step[0];
377  const int sr = s->overlay_rgba_map[R];
378  const int sg = s->overlay_rgba_map[G];
379  const int sb = s->overlay_rgba_map[B];
380  const int sa = s->overlay_rgba_map[A];
381  const int sstep = s->overlay_pix_step[0];
382  int slice_start, slice_end;
383  uint8_t *S, *sp, *d, *dp;
384 
385  i = FFMAX(-y, 0);
386  imax = FFMIN3(-y + dst_h, FFMIN(src_h, dst_h), y + src_h);
387 
388  slice_start = i + (imax * jobnr) / nb_jobs;
389  slice_end = i + (imax * (jobnr+1)) / nb_jobs;
390 
391  sp = src->data[0] + (slice_start) * src->linesize[0];
392  dp = dst->data[0] + (y + slice_start) * dst->linesize[0];
393 
394  for (i = slice_start; i < slice_end; i++) {
395  j = FFMAX(-x, 0);
396  S = sp + j * sstep;
397  d = dp + (x+j) * dstep;
398 
399  for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
400  alpha = S[sa];
401 
402  // if the main channel has an alpha channel, alpha has to be calculated
403  // to create an un-premultiplied (straight) alpha value
404  if (main_has_alpha && alpha != 0 && alpha != 255) {
405  uint8_t alpha_d = d[da];
406  alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
407  }
408 
409  switch (alpha) {
410  case 0:
411  break;
412  case 255:
413  d[dr] = S[sr];
414  d[dg] = S[sg];
415  d[db] = S[sb];
416  break;
417  default:
418  // main_value = main_value * (1 - alpha) + overlay_value * alpha
419  // since alpha is in the range 0-255, the result must divided by 255
420  d[dr] = is_straight ? FAST_DIV255(d[dr] * (255 - alpha) + S[sr] * alpha) :
421  FFMIN(FAST_DIV255(d[dr] * (255 - alpha)) + S[sr], 255);
422  d[dg] = is_straight ? FAST_DIV255(d[dg] * (255 - alpha) + S[sg] * alpha) :
423  FFMIN(FAST_DIV255(d[dg] * (255 - alpha)) + S[sg], 255);
424  d[db] = is_straight ? FAST_DIV255(d[db] * (255 - alpha) + S[sb] * alpha) :
425  FFMIN(FAST_DIV255(d[db] * (255 - alpha)) + S[sb], 255);
426  }
427  if (main_has_alpha) {
428  switch (alpha) {
429  case 0:
430  break;
431  case 255:
432  d[da] = S[sa];
433  break;
434  default:
435  // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
436  d[da] += FAST_DIV255((255 - d[da]) * S[sa]);
437  }
438  }
439  d += dstep;
440  S += sstep;
441  }
442  dp += dst->linesize[0];
443  sp += src->linesize[0];
444  }
445 }
446 
447 #define DEFINE_BLEND_PLANE(depth, nbits) \
448 static av_always_inline void blend_plane_##depth##_##nbits##bits(AVFilterContext *ctx, \
449  AVFrame *dst, const AVFrame *src, \
450  int src_w, int src_h, \
451  int dst_w, int dst_h, \
452  int i, int hsub, int vsub, \
453  int x, int y, \
454  int main_has_alpha, \
455  int dst_plane, \
456  int dst_offset, \
457  int dst_step, \
458  int straight, \
459  int yuv, \
460  int jobnr, \
461  int nb_jobs) \
462 { \
463  OverlayContext *octx = ctx->priv; \
464  int src_wp = AV_CEIL_RSHIFT(src_w, hsub); \
465  int src_hp = AV_CEIL_RSHIFT(src_h, vsub); \
466  int dst_wp = AV_CEIL_RSHIFT(dst_w, hsub); \
467  int dst_hp = AV_CEIL_RSHIFT(dst_h, vsub); \
468  int yp = y>>vsub; \
469  int xp = x>>hsub; \
470  uint##depth##_t *s, *sp, *d, *dp, *dap, *a, *da, *ap; \
471  int jmax, j, k, kmax; \
472  int slice_start, slice_end; \
473  const uint##depth##_t max = (1 << nbits) - 1; \
474  const uint##depth##_t mid = (1 << (nbits -1)) ; \
475  int bytes = depth / 8; \
476  \
477  dst_step /= bytes; \
478  j = FFMAX(-yp, 0); \
479  jmax = FFMIN3(-yp + dst_hp, FFMIN(src_hp, dst_hp), yp + src_hp); \
480  \
481  slice_start = j + (jmax * jobnr) / nb_jobs; \
482  slice_end = j + (jmax * (jobnr+1)) / nb_jobs; \
483  \
484  sp = (uint##depth##_t *)(src->data[i] + (slice_start) * src->linesize[i]); \
485  dp = (uint##depth##_t *)(dst->data[dst_plane] \
486  + (yp + slice_start) * dst->linesize[dst_plane] \
487  + dst_offset); \
488  ap = (uint##depth##_t *)(src->data[3] + (slice_start << vsub) * src->linesize[3]); \
489  dap = (uint##depth##_t *)(dst->data[3] + ((yp + slice_start) << vsub) * dst->linesize[3]); \
490  \
491  for (j = slice_start; j < slice_end; j++) { \
492  k = FFMAX(-xp, 0); \
493  d = dp + (xp+k) * dst_step; \
494  s = sp + k; \
495  a = ap + (k<<hsub); \
496  da = dap + ((xp+k) << hsub); \
497  kmax = FFMIN(-xp + dst_wp, src_wp); \
498  \
499  if (nbits == 8 && ((vsub && j+1 < src_hp) || !vsub) && octx->blend_row[i]) { \
500  int c = octx->blend_row[i]((uint8_t*)d, (uint8_t*)da, (uint8_t*)s, \
501  (uint8_t*)a, kmax - k, src->linesize[3]); \
502  \
503  s += c; \
504  d += dst_step * c; \
505  da += (1 << hsub) * c; \
506  a += (1 << hsub) * c; \
507  k += c; \
508  } \
509  for (; k < kmax; k++) { \
510  int alpha_v, alpha_h, alpha; \
511  \
512  /* average alpha for color components, improve quality */ \
513  if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { \
514  alpha = (a[0] + a[src->linesize[3]] + \
515  a[1] + a[src->linesize[3]+1]) >> 2; \
516  } else if (hsub || vsub) { \
517  alpha_h = hsub && k+1 < src_wp ? \
518  (a[0] + a[1]) >> 1 : a[0]; \
519  alpha_v = vsub && j+1 < src_hp ? \
520  (a[0] + a[src->linesize[3]]) >> 1 : a[0]; \
521  alpha = (alpha_v + alpha_h) >> 1; \
522  } else \
523  alpha = a[0]; \
524  /* if the main channel has an alpha channel, alpha has to be calculated */ \
525  /* to create an un-premultiplied (straight) alpha value */ \
526  if (main_has_alpha && alpha != 0 && alpha != max) { \
527  /* average alpha for color components, improve quality */ \
528  uint8_t alpha_d; \
529  if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { \
530  alpha_d = (da[0] + da[dst->linesize[3]] + \
531  da[1] + da[dst->linesize[3]+1]) >> 2; \
532  } else if (hsub || vsub) { \
533  alpha_h = hsub && k+1 < src_wp ? \
534  (da[0] + da[1]) >> 1 : da[0]; \
535  alpha_v = vsub && j+1 < src_hp ? \
536  (da[0] + da[dst->linesize[3]]) >> 1 : da[0]; \
537  alpha_d = (alpha_v + alpha_h) >> 1; \
538  } else \
539  alpha_d = da[0]; \
540  alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); \
541  } \
542  if (straight) { \
543  if (nbits > 8) \
544  *d = (*d * (max - alpha) + *s * alpha) / max; \
545  else \
546  *d = FAST_DIV255(*d * (255 - alpha) + *s * alpha); \
547  } else { \
548  if (nbits > 8) { \
549  if (i && yuv) \
550  *d = av_clip((*d * (max - alpha) + *s * alpha) / max + *s - mid, -mid, mid) + mid; \
551  else \
552  *d = av_clip_uintp2((*d * (max - alpha) + *s * alpha) / max + *s - (16<<(nbits-8)),\
553  nbits);\
554  } else { \
555  if (i && yuv) \
556  *d = av_clip(FAST_DIV255((*d - mid) * (max - alpha)) + *s - mid, -mid, mid) + mid; \
557  else \
558  *d = av_clip_uint8(FAST_DIV255(*d * (255 - alpha)) + *s - 16); \
559  } \
560  } \
561  s++; \
562  d += dst_step; \
563  da += 1 << hsub; \
564  a += 1 << hsub; \
565  } \
566  dp += dst->linesize[dst_plane] / bytes; \
567  sp += src->linesize[i] / bytes; \
568  ap += (1 << vsub) * src->linesize[3] / bytes; \
569  dap += (1 << vsub) * dst->linesize[3] / bytes; \
570  } \
571 }
572 DEFINE_BLEND_PLANE(8, 8)
573 DEFINE_BLEND_PLANE(16, 10)
574 
575 #define DEFINE_ALPHA_COMPOSITE(depth, nbits) \
576 static inline void alpha_composite_##depth##_##nbits##bits(const AVFrame *src, const AVFrame *dst, \
577  int src_w, int src_h, \
578  int dst_w, int dst_h, \
579  int x, int y, \
580  int jobnr, int nb_jobs) \
581 { \
582  uint##depth##_t alpha; /* the amount of overlay to blend on to main */ \
583  uint##depth##_t *s, *sa, *d, *da; \
584  int i, imax, j, jmax; \
585  int slice_start, slice_end; \
586  const uint##depth##_t max = (1 << nbits) - 1; \
587  int bytes = depth / 8; \
588  \
589  imax = FFMIN3(-y + dst_h, FFMIN(src_h, dst_h), y + src_h); \
590  i = FFMAX(-y, 0); \
591  \
592  slice_start = i + (imax * jobnr) / nb_jobs; \
593  slice_end = i + ((imax * (jobnr+1)) / nb_jobs); \
594  \
595  sa = (uint##depth##_t *)(src->data[3] + (slice_start) * src->linesize[3]); \
596  da = (uint##depth##_t *)(dst->data[3] + (y + slice_start) * dst->linesize[3]); \
597  \
598  for (i = slice_start; i < slice_end; i++) { \
599  j = FFMAX(-x, 0); \
600  s = sa + j; \
601  d = da + x+j; \
602  \
603  for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) { \
604  alpha = *s; \
605  if (alpha != 0 && alpha != max) { \
606  uint8_t alpha_d = *d; \
607  alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); \
608  } \
609  if (alpha == max) \
610  *d = *s; \
611  else if (alpha > 0) { \
612  /* apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha */ \
613  if (nbits > 8) \
614  *d += (max - *d) * *s / max; \
615  else \
616  *d += FAST_DIV255((max - *d) * *s); \
617  } \
618  d += 1; \
619  s += 1; \
620  } \
621  da += dst->linesize[3] / bytes; \
622  sa += src->linesize[3] / bytes; \
623  } \
624 }
627 
628 #define DEFINE_BLEND_SLICE_YUV(depth, nbits) \
629 static av_always_inline void blend_slice_yuv_##depth##_##nbits##bits(AVFilterContext *ctx, \
630  AVFrame *dst, const AVFrame *src, \
631  int hsub, int vsub, \
632  int main_has_alpha, \
633  int x, int y, \
634  int is_straight, \
635  int jobnr, int nb_jobs) \
636 { \
637  OverlayContext *s = ctx->priv; \
638  const int src_w = src->width; \
639  const int src_h = src->height; \
640  const int dst_w = dst->width; \
641  const int dst_h = dst->height; \
642  \
643  blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, \
644  x, y, main_has_alpha, s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, \
645  s->main_desc->comp[0].step, is_straight, 1, jobnr, nb_jobs); \
646  blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, \
647  x, y, main_has_alpha, s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, \
648  s->main_desc->comp[1].step, is_straight, 1, jobnr, nb_jobs); \
649  blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, \
650  x, y, main_has_alpha, s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, \
651  s->main_desc->comp[2].step, is_straight, 1, jobnr, nb_jobs); \
652  \
653  if (main_has_alpha) \
654  alpha_composite_##depth##_##nbits##bits(src, dst, src_w, src_h, dst_w, dst_h, x, y, \
655  jobnr, nb_jobs); \
656 }
659 
661  AVFrame *dst, const AVFrame *src,
662  int hsub, int vsub,
663  int main_has_alpha,
664  int x, int y,
665  int is_straight,
666  int jobnr,
667  int nb_jobs)
668 {
669  OverlayContext *s = ctx->priv;
670  const int src_w = src->width;
671  const int src_h = src->height;
672  const int dst_w = dst->width;
673  const int dst_h = dst->height;
674 
675  blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
676  s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 0,
677  jobnr, nb_jobs);
678  blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
679  s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, s->main_desc->comp[2].step, is_straight, 0,
680  jobnr, nb_jobs);
681  blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
682  s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 0,
683  jobnr, nb_jobs);
684 
685  if (main_has_alpha)
686  alpha_composite_8_8bits(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, nb_jobs);
687 }
688 
689 #define DEFINE_BLEND_SLICE_PLANAR_FMT(format_, blend_slice_fn_suffix_, hsub_, vsub_, main_has_alpha_, direct_) \
690 static int blend_slice_##format_(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
691 { \
692  OverlayContext *s = ctx->priv; \
693  ThreadData *td = arg; \
694  blend_slice_##blend_slice_fn_suffix_(ctx, td->dst, td->src, \
695  hsub_, vsub_, main_has_alpha_, \
696  s->x, s->y, direct_, \
697  jobnr, nb_jobs); \
698  return 0; \
699 }
700 
701 // FMT FN H V A D
702 DEFINE_BLEND_SLICE_PLANAR_FMT(yuv420, yuv_8_8bits, 1, 1, 0, 1)
703 DEFINE_BLEND_SLICE_PLANAR_FMT(yuva420, yuv_8_8bits, 1, 1, 1, 1)
704 DEFINE_BLEND_SLICE_PLANAR_FMT(yuv420p10, yuv_16_10bits, 1, 1, 0, 1)
705 DEFINE_BLEND_SLICE_PLANAR_FMT(yuva420p10, yuv_16_10bits, 1, 1, 1, 1)
706 DEFINE_BLEND_SLICE_PLANAR_FMT(yuv422p10, yuv_16_10bits, 1, 0, 0, 1)
707 DEFINE_BLEND_SLICE_PLANAR_FMT(yuva422p10, yuv_16_10bits, 1, 0, 1, 1)
708 DEFINE_BLEND_SLICE_PLANAR_FMT(yuv422, yuv_8_8bits, 1, 0, 0, 1)
709 DEFINE_BLEND_SLICE_PLANAR_FMT(yuva422, yuv_8_8bits, 1, 0, 1, 1)
710 DEFINE_BLEND_SLICE_PLANAR_FMT(yuv444, yuv_8_8bits, 0, 0, 0, 1)
711 DEFINE_BLEND_SLICE_PLANAR_FMT(yuva444, yuv_8_8bits, 0, 0, 1, 1)
712 DEFINE_BLEND_SLICE_PLANAR_FMT(yuv444p10, yuv_16_10bits, 0, 0, 0, 1)
713 DEFINE_BLEND_SLICE_PLANAR_FMT(yuva444p10, yuv_16_10bits, 0, 0, 1, 1)
714 DEFINE_BLEND_SLICE_PLANAR_FMT(gbrp, planar_rgb, 0, 0, 0, 1)
715 DEFINE_BLEND_SLICE_PLANAR_FMT(gbrap, planar_rgb, 0, 0, 1, 1)
716 DEFINE_BLEND_SLICE_PLANAR_FMT(yuv420_pm, yuv_8_8bits, 1, 1, 0, 0)
717 DEFINE_BLEND_SLICE_PLANAR_FMT(yuva420_pm, yuv_8_8bits, 1, 1, 1, 0)
718 DEFINE_BLEND_SLICE_PLANAR_FMT(yuv422_pm, yuv_8_8bits, 1, 0, 0, 0)
719 DEFINE_BLEND_SLICE_PLANAR_FMT(yuva422_pm, yuv_8_8bits, 1, 0, 1, 0)
720 DEFINE_BLEND_SLICE_PLANAR_FMT(yuv444_pm, yuv_8_8bits, 0, 0, 0, 0)
721 DEFINE_BLEND_SLICE_PLANAR_FMT(yuva444_pm, yuv_8_8bits, 0, 0, 1, 0)
722 DEFINE_BLEND_SLICE_PLANAR_FMT(gbrp_pm, planar_rgb, 0, 0, 0, 0)
723 DEFINE_BLEND_SLICE_PLANAR_FMT(gbrap_pm, planar_rgb, 0, 0, 1, 0)
724 
725 #define DEFINE_BLEND_SLICE_PACKED_FMT(format_, blend_slice_fn_suffix_, main_has_alpha_, direct_) \
726 static int blend_slice_##format_(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
727 { \
728  OverlayContext *s = ctx->priv; \
729  ThreadData *td = arg; \
730  blend_slice_packed_##blend_slice_fn_suffix_(ctx, td->dst, td->src, \
731  main_has_alpha_, \
732  s->x, s->y, direct_, \
733  jobnr, nb_jobs); \
734  return 0; \
735 }
736 
737 // FMT FN A D
740 DEFINE_BLEND_SLICE_PACKED_FMT(rgb_pm, rgb, 0, 0)
741 DEFINE_BLEND_SLICE_PACKED_FMT(rgba_pm, rgb, 1, 0)
742 
744 {
745  OverlayContext *s = inlink->dst->priv;
746  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
747 
748  av_image_fill_max_pixsteps(s->main_pix_step, NULL, pix_desc);
749 
750  s->hsub = pix_desc->log2_chroma_w;
751  s->vsub = pix_desc->log2_chroma_h;
752 
753  s->main_desc = pix_desc;
754 
755  s->main_is_packed_rgb =
756  ff_fill_rgba_map(s->main_rgba_map, inlink->format) >= 0;
757  s->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
758  switch (s->format) {
760  s->blend_slice = s->main_has_alpha ? blend_slice_yuva420 : blend_slice_yuv420;
761  break;
763  s->blend_slice = s->main_has_alpha ? blend_slice_yuva420p10 : blend_slice_yuv420p10;
764  break;
766  s->blend_slice = s->main_has_alpha ? blend_slice_yuva422 : blend_slice_yuv422;
767  break;
769  s->blend_slice = s->main_has_alpha ? blend_slice_yuva422p10 : blend_slice_yuv422p10;
770  break;
772  s->blend_slice = s->main_has_alpha ? blend_slice_yuva444 : blend_slice_yuv444;
773  break;
775  s->blend_slice = s->main_has_alpha ? blend_slice_yuva444p10 : blend_slice_yuv444p10;
776  break;
777  case OVERLAY_FORMAT_RGB:
778  s->blend_slice = s->main_has_alpha ? blend_slice_rgba : blend_slice_rgb;
779  break;
780  case OVERLAY_FORMAT_GBRP:
781  s->blend_slice = s->main_has_alpha ? blend_slice_gbrap : blend_slice_gbrp;
782  break;
783  case OVERLAY_FORMAT_AUTO:
784  switch (inlink->format) {
785  case AV_PIX_FMT_YUVA420P:
786  s->blend_slice = blend_slice_yuva420;
787  break;
789  s->blend_slice = blend_slice_yuva420p10;
790  break;
791  case AV_PIX_FMT_YUVA422P:
792  s->blend_slice = blend_slice_yuva422;
793  break;
795  s->blend_slice = blend_slice_yuva422p10;
796  break;
797  case AV_PIX_FMT_YUVA444P:
798  s->blend_slice = blend_slice_yuva444;
799  break;
801  s->blend_slice = blend_slice_yuva444p10;
802  break;
803  case AV_PIX_FMT_ARGB:
804  case AV_PIX_FMT_RGBA:
805  case AV_PIX_FMT_BGRA:
806  case AV_PIX_FMT_ABGR:
807  s->blend_slice = blend_slice_rgba;
808  break;
809  case AV_PIX_FMT_GBRAP:
810  s->blend_slice = blend_slice_gbrap;
811  break;
812  default:
813  av_assert0(0);
814  break;
815  }
816  break;
817  }
818 
819  if (!s->alpha_format)
820  goto end;
821 
822  switch (s->format) {
824  s->blend_slice = s->main_has_alpha ? blend_slice_yuva420_pm : blend_slice_yuv420_pm;
825  break;
827  s->blend_slice = s->main_has_alpha ? blend_slice_yuva422_pm : blend_slice_yuv422_pm;
828  break;
830  s->blend_slice = s->main_has_alpha ? blend_slice_yuva444_pm : blend_slice_yuv444_pm;
831  break;
832  case OVERLAY_FORMAT_RGB:
833  s->blend_slice = s->main_has_alpha ? blend_slice_rgba_pm : blend_slice_rgb_pm;
834  break;
835  case OVERLAY_FORMAT_GBRP:
836  s->blend_slice = s->main_has_alpha ? blend_slice_gbrap_pm : blend_slice_gbrp_pm;
837  break;
838  case OVERLAY_FORMAT_AUTO:
839  switch (inlink->format) {
840  case AV_PIX_FMT_YUVA420P:
841  s->blend_slice = blend_slice_yuva420_pm;
842  break;
843  case AV_PIX_FMT_YUVA422P:
844  s->blend_slice = blend_slice_yuva422_pm;
845  break;
846  case AV_PIX_FMT_YUVA444P:
847  s->blend_slice = blend_slice_yuva444_pm;
848  break;
849  case AV_PIX_FMT_ARGB:
850  case AV_PIX_FMT_RGBA:
851  case AV_PIX_FMT_BGRA:
852  case AV_PIX_FMT_ABGR:
853  s->blend_slice = blend_slice_rgba_pm;
854  break;
855  case AV_PIX_FMT_GBRAP:
856  s->blend_slice = blend_slice_gbrap_pm;
857  break;
858  default:
859  av_assert0(0);
860  break;
861  }
862  break;
863  }
864 
865 end:
866 #if ARCH_X86
867  ff_overlay_init_x86(s, s->format, inlink->format,
868  s->alpha_format, s->main_has_alpha);
869 #endif
870 
871  return 0;
872 }
873 
874 static int do_blend(FFFrameSync *fs)
875 {
876  AVFilterContext *ctx = fs->parent;
877  AVFrame *mainpic, *second;
878  OverlayContext *s = ctx->priv;
879  AVFilterLink *inlink = ctx->inputs[0];
881  int ret;
882 
883  ret = ff_framesync_dualinput_get_writable(fs, &mainpic, &second);
884  if (ret < 0)
885  return ret;
886  if (!second)
887  return ff_filter_frame(ctx->outputs[0], mainpic);
888 
889  if (s->eval_mode == EVAL_MODE_FRAME) {
890 
891  s->var_values[VAR_N] = inl->frame_count_out;
892  s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
893  NAN : mainpic->pts * av_q2d(inlink->time_base);
894 
895  s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = second->width;
896  s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = second->height;
897  s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = mainpic->width;
898  s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = mainpic->height;
899 
900  eval_expr(ctx);
901  av_log(ctx, AV_LOG_DEBUG, "n:%f t:%f x:%f xi:%d y:%f yi:%d\n",
902  s->var_values[VAR_N], s->var_values[VAR_T],
903  s->var_values[VAR_X], s->x,
904  s->var_values[VAR_Y], s->y);
905  }
906 
907  if (s->x < mainpic->width && s->x + second->width >= 0 &&
908  s->y < mainpic->height && s->y + second->height >= 0) {
909  ThreadData td;
910 
911  td.dst = mainpic;
912  td.src = second;
913  ff_filter_execute(ctx, s->blend_slice, &td, NULL, FFMIN(FFMAX(1, FFMIN3(s->y + second->height, FFMIN(second->height, mainpic->height), mainpic->height - s->y)),
915  }
916  return ff_filter_frame(ctx->outputs[0], mainpic);
917 }
918 
920 {
921  OverlayContext *s = ctx->priv;
922 
923  s->fs.on_event = do_blend;
924  return 0;
925 }
926 
928 {
929  OverlayContext *s = ctx->priv;
930  return ff_framesync_activate(&s->fs);
931 }
932 
933 #define OFFSET(x) offsetof(OverlayContext, x)
934 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
935 #define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
936 
937 static const AVOption overlay_options[] = {
938  { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, 0, 0, TFLAGS },
939  { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, 0, 0, TFLAGS },
940  { "eof_action", "Action to take when encountering EOF from secondary input ",
941  OFFSET(fs.opt_eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
942  EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, .unit = "eof_action" },
943  { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, .unit = "eof_action" },
944  { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, .unit = "eof_action" },
945  { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, .unit = "eof_action" },
946  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_FRAME}, 0, EVAL_MODE_NB-1, FLAGS, .unit = "eval" },
947  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
948  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
949  { "shortest", "force termination when the shortest input terminates", OFFSET(fs.opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
950  { "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, .unit = "format" },
951  { "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" },
952  { "yuv420p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420P10}, .flags = FLAGS, .unit = "format" },
953  { "yuv422", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422}, .flags = FLAGS, .unit = "format" },
954  { "yuv422p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422P10}, .flags = FLAGS, .unit = "format" },
955  { "yuv444", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444}, .flags = FLAGS, .unit = "format" },
956  { "yuv444p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444P10}, .flags = FLAGS, .unit = "format" },
957  { "rgb", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" },
958  { "gbrp", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_GBRP}, .flags = FLAGS, .unit = "format" },
959  { "auto", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_AUTO}, .flags = FLAGS, .unit = "format" },
960  { "repeatlast", "repeat overlay of the last overlay frame", OFFSET(fs.opt_repeatlast), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
961  { "alpha", "alpha format", OFFSET(alpha_format), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, .unit = "alpha_format" },
962  { "straight", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, .flags = FLAGS, .unit = "alpha_format" },
963  { "premultiplied", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, .flags = FLAGS, .unit = "alpha_format" },
964  { NULL }
965 };
966 
968 
970  {
971  .name = "main",
972  .type = AVMEDIA_TYPE_VIDEO,
973  .config_props = config_input_main,
974  },
975  {
976  .name = "overlay",
977  .type = AVMEDIA_TYPE_VIDEO,
978  .config_props = config_input_overlay,
979  },
980 };
981 
983  {
984  .name = "default",
985  .type = AVMEDIA_TYPE_VIDEO,
986  .config_props = config_output,
987  },
988 };
989 
991  .p.name = "overlay",
992  .p.description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
993  .p.priv_class = &overlay_class,
996  .preinit = overlay_framesync_preinit,
997  .init = init,
998  .uninit = uninit,
999  .priv_size = sizeof(OverlayContext),
1000  .activate = activate,
1005 };
flags
const SwsFlags flags[]
Definition: swscale.c:61
formats
formats
Definition: signature.h:47
VAR_MAIN_H
@ VAR_MAIN_H
Definition: vf_drawtext.c:129
ff_framesync_configure
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:137
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
blend_slice_packed_rgb
static av_always_inline void blend_slice_packed_rgb(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int main_has_alpha, int x, int y, int is_straight, int jobnr, int nb_jobs)
Blend image in src to destination buffer dst at position (x, y).
Definition: vf_overlay.c:360
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
VAR_OH
@ VAR_OH
Definition: scale_eval.c:46
set_expr
static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
Definition: vf_overlay.c:108
OVERLAY
#define OVERLAY
Definition: vf_overlay.c:63
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:435
ff_framesync_uninit
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:301
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1053
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3341
avfilter_vf_overlay_outputs
static const AVFilterPad avfilter_vf_overlay_outputs[]
Definition: vf_overlay.c:982
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
VAR_Y
@ VAR_Y
Definition: vf_blend.c:54
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
AVFrame::width
int width
Definition: frame.h:482
do_blend
static int do_blend(FFFrameSync *fs)
Definition: vf_overlay.c:874
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:573
AVOption
AVOption.
Definition: opt.h:429
EOF_ACTION_ENDALL
@ EOF_ACTION_ENDALL
Definition: framesync.h:28
query_formats
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
Definition: vf_overlay.c:161
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:528
VAR_HSUB
@ VAR_HSUB
Definition: boxblur.c:41
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
mathematics.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:203
FFFrameSync
Frame sync structure.
Definition: framesync.h:168
video.h
VAR_MAIN_W
@ VAR_MAIN_W
Definition: vf_drawtext.c:130
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:574
VAR_X
@ VAR_X
Definition: vf_blend.c:54
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:74
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:710
var_names
static const char *const var_names[]
Definition: vf_overlay.c:48
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
VAR_T
@ VAR_T
Definition: aeval.c:53
rgb
Definition: rpzaenc.c:60
VAR_VSUB
@ VAR_VSUB
Definition: boxblur.c:42
OVERLAY_FORMAT_RGB
@ OVERLAY_FORMAT_RGB
Definition: vf_overlay.h:48
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1710
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
FAST_DIV255
#define FAST_DIV255(x)
Definition: vf_overlay.c:348
OVERLAY_FORMAT_YUV422P10
@ OVERLAY_FORMAT_YUV422P10
Definition: vf_overlay.h:45
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:358
R
#define R
Definition: vf_overlay.c:65
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
DEFINE_BLEND_SLICE_PACKED_FMT
#define DEFINE_BLEND_SLICE_PACKED_FMT(format_, blend_slice_fn_suffix_, main_has_alpha_, direct_)
Definition: vf_overlay.c:725
ff_overlay_init_x86
void ff_overlay_init_x86(OverlayContext *s, int format, int pix_format, int alpha_format, int main_has_alpha)
Definition: vf_overlay_init.c:35
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:531
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
config_input_main
static int config_input_main(AVFilterLink *inlink)
Definition: vf_overlay.c:743
av_cold
#define av_cold
Definition: attributes.h:90
FFFilter
Definition: filters.h:265
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_overlay.c:76
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
OVERLAY_FORMAT_YUV422
@ OVERLAY_FORMAT_YUV422
Definition: vf_overlay.h:44
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:678
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
filters.h
VAR_MW
@ VAR_MW
Definition: vf_overlay.h:28
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:792
UNPREMULTIPLY_ALPHA
#define UNPREMULTIPLY_ALPHA(x, y)
Definition: vf_overlay.c:354
AVExpr
Definition: eval.c:158
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
ff_fmt_is_in
int ff_fmt_is_in(int fmt, const int *fmts)
Tell if an integer is contained in the provided -1-terminated list of integers.
Definition: formats.c:406
vf_overlay.h
B
#define B
Definition: vf_overlay.c:67
VAR_N
@ VAR_N
Definition: noise.c:48
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
eval_expr
static void eval_expr(AVFilterContext *ctx)
Definition: vf_overlay.c:96
EOF_ACTION_PASS
@ EOF_ACTION_PASS
Definition: framesync.h:29
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
NAN
#define NAN
Definition: mathematics.h:115
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
option
option
Definition: libkvazaar.c:314
ThreadData::dst
AVFrame * dst
Definition: vf_blend.c:58
config_input_overlay
static int config_input_overlay(AVFilterLink *inlink)
Definition: vf_overlay.c:284
NULL
#define NULL
Definition: coverity.c:32
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_overlay.c:77
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:200
ThreadData::src
const uint8_t * src
Definition: vf_bm3d.c:54
isnan
#define isnan(x)
Definition: libm.h:342
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_overlay.c:80
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
OverlayContext
Definition: vf_overlay.h:54
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:529
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
AVFilterFormatsConfig
Lists of formats / etc.
Definition: avfilter.h:109
avfilter_vf_overlay_inputs
static const AVFilterPad avfilter_vf_overlay_inputs[]
Definition: vf_overlay.c:969
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
TFLAGS
#define TFLAGS
Definition: vf_overlay.c:935
eval.h
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
A
#define A
Definition: vf_overlay.c:68
overlay_options
static const AVOption overlay_options[]
Definition: vf_overlay.c:937
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
ff_framesync_init_dualinput
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:372
OVERLAY_FORMAT_NB
@ OVERLAY_FORMAT_NB
Definition: vf_overlay.h:51
OVERLAY_FORMAT_YUV420P10
@ OVERLAY_FORMAT_YUV420P10
Definition: vf_overlay.h:43
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
OVERLAY_FORMAT_YUV420
@ OVERLAY_FORMAT_YUV420
Definition: vf_overlay.h:42
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
VAR_MH
@ VAR_MH
Definition: vf_overlay.h:29
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_overlay.c:330
ff_vf_overlay
const FFFilter ff_vf_overlay
Definition: vf_overlay.c:990
OVERLAY_FORMAT_YUV444P10
@ OVERLAY_FORMAT_YUV444P10
Definition: vf_overlay.h:47
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:575
OVERLAY_FORMAT_AUTO
@ OVERLAY_FORMAT_AUTO
Definition: vf_overlay.h:50
DEFINE_ALPHA_COMPOSITE
#define DEFINE_ALPHA_COMPOSITE(depth, nbits)
Definition: vf_overlay.c:575
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
DEFINE_BLEND_SLICE_PLANAR_FMT
#define DEFINE_BLEND_SLICE_PLANAR_FMT(format_, blend_slice_fn_suffix_, hsub_, vsub_, main_has_alpha_, direct_)
Definition: vf_overlay.c:689
FLAGS
#define FLAGS
Definition: vf_overlay.c:934
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
blend_slice_planar_rgb
static av_always_inline void blend_slice_planar_rgb(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int hsub, int vsub, int main_has_alpha, int x, int y, int is_straight, int jobnr, int nb_jobs)
Definition: vf_overlay.c:660
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
common.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:840
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
EvalMode
EvalMode
Definition: af_volume.h:39
FILTER_QUERY_FUNC2
#define FILTER_QUERY_FUNC2(func)
Definition: filters.h:239
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_NV21
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
Definition: pixfmt.h:97
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
VAR_OVERLAY_H
@ VAR_OVERLAY_H
Definition: vf_overlay.h:31
VAR_OW
@ VAR_OW
Definition: scale_eval.c:45
normalize_xy
static int normalize_xy(double d, int chroma_sub)
Definition: vf_overlay.c:89
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:734
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_overlay.c:129
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
DEFINE_BLEND_PLANE
#define DEFINE_BLEND_PLANE(depth, nbits)
Definition: vf_overlay.c:447
MAIN
#define MAIN
Definition: vf_overlay.c:62
EOF_ACTION_REPEAT
@ EOF_ACTION_REPEAT
Definition: framesync.h:27
AVFrame::height
int height
Definition: frame.h:482
ff_set_common_formats_from_list2
int ff_set_common_formats_from_list2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, const int *fmts)
Definition: formats.c:1016
framesync.h
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1658
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
activate
static int activate(AVFilterContext *ctx)
Definition: vf_overlay.c:927
G
#define G
Definition: vf_overlay.c:66
av_image_fill_max_pixsteps
void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], const AVPixFmtDescriptor *pixdesc)
Compute the max pixel step for each plane of an image with a format described by pixdesc.
Definition: imgutils.c:35
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:257
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
OVERLAY_FORMAT_GBRP
@ OVERLAY_FORMAT_GBRP
Definition: vf_overlay.h:49
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:150
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:269
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
OVERLAY_FORMAT_YUV444
@ OVERLAY_FORMAT_YUV444
Definition: vf_overlay.h:46
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FRAMESYNC_DEFINE_CLASS
FRAMESYNC_DEFINE_CLASS(overlay, OverlayContext, fs)
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_overlay.c:75
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:81
DEFINE_BLEND_SLICE_YUV
#define DEFINE_BLEND_SLICE_YUV(depth, nbits)
Definition: vf_overlay.c:628
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:188
imgutils.h
timestamp.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_framesync_activate
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
Definition: framesync.c:352
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
ff_framesync_dualinput_get_writable
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
Definition: framesync.c:410
drawutils.h
alpha_pix_fmts
static enum AVPixelFormat alpha_pix_fmts[]
Definition: vf_overlay.c:154
OFFSET
#define OFFSET(x)
Definition: vf_overlay.c:933
VAR_OVERLAY_W
@ VAR_OVERLAY_W
Definition: vf_overlay.h:30
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
src
#define src
Definition: vp8dsp.c:248
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3261
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_overlay.c:919