FFmpeg
vf_cropdetect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 A'rpi
3  * This file is part of FFmpeg.
4  *
5  * FFmpeg is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * FFmpeg is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19 
20 /**
21  * @file
22  * border detection filter
23  * Ported from MPlayer libmpcodecs/vf_cropdetect.c.
24  */
25 
26 #include "libavutil/imgutils.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/opt.h"
31 #include "libavutil/qsort.h"
32 
33 #include "avfilter.h"
34 #include "filters.h"
35 #include "video.h"
36 #include "edge_common.h"
37 
38 typedef struct CropDetectContext {
39  const AVClass *class;
40  int x1, y1, x2, y2;
41  float limit;
43  int round;
44  int skip;
46  int frame_nb;
47  int max_pixsteps[4];
49  int mode;
52  int bitdepth;
53  float low, high;
54  uint8_t low_u8, high_u8;
55  uint8_t *filterbuf;
56  uint8_t *tmpbuf;
57  uint16_t *gradients;
58  char *directions;
59  int *bboxes[4];
61 
62 static const enum AVPixelFormat pix_fmts[] = {
77 };
78 
79 enum CropMode {
83 };
84 
85 static int comp(const int *a,const int *b)
86 {
87  return FFDIFFSIGN(*a, *b);
88 }
89 
90 static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
91 {
92  int total = 0;
93  int div = len;
94  const uint16_t *src16 = (const uint16_t *)src;
95 
96  switch (bpp) {
97  case 1:
98  while (len >= 8) {
99  total += src[ 0] + src[ stride] + src[2*stride] + src[3*stride]
100  + src[4*stride] + src[5*stride] + src[6*stride] + src[7*stride];
101  src += 8*stride;
102  len -= 8;
103  }
104  while (--len >= 0) {
105  total += src[0];
106  src += stride;
107  }
108  break;
109  case 2:
110  stride >>= 1;
111  while (len >= 8) {
112  total += src16[ 0] + src16[ stride] + src16[2*stride] + src16[3*stride]
113  + src16[4*stride] + src16[5*stride] + src16[6*stride] + src16[7*stride];
114  src16 += 8*stride;
115  len -= 8;
116  }
117  while (--len >= 0) {
118  total += src16[0];
119  src16 += stride;
120  }
121  break;
122  case 3:
123  case 4:
124  while (len >= 4) {
125  total += src[0] + src[1 ] + src[2 ]
126  + src[ stride] + src[1+ stride] + src[2+ stride]
127  + src[2*stride] + src[1+2*stride] + src[2+2*stride]
128  + src[3*stride] + src[1+3*stride] + src[2+3*stride];
129  src += 4*stride;
130  len -= 4;
131  }
132  while (--len >= 0) {
133  total += src[0] + src[1] + src[2];
134  src += stride;
135  }
136  div *= 3;
137  break;
138  }
139  total /= div;
140 
141  av_log(ctx, AV_LOG_DEBUG, "total:%d\n", total);
142  return total;
143 }
144 
145 static int checkline_edge(void *ctx, const unsigned char *src, int stride, int len, int bpp)
146 {
147  const uint16_t *src16 = (const uint16_t *)src;
148 
149  switch (bpp) {
150  case 1:
151  while (--len >= 0) {
152  if (src[0]) return 0;
153  src += stride;
154  }
155  break;
156  case 2:
157  stride >>= 1;
158  while (--len >= 0) {
159  if (src16[0]) return 0;
160  src16 += stride;
161  }
162  break;
163  case 3:
164  case 4:
165  while (--len >= 0) {
166  if (src[0] || src[1] || src[2]) return 0;
167  src += stride;
168  }
169  break;
170  }
171 
172  return 1;
173 }
174 
176 {
177  CropDetectContext *s = ctx->priv;
178 
179  s->frame_nb = -1 * s->skip;
180  s->low_u8 = s->low * 255. + .5;
181  s->high_u8 = s->high * 255. + .5;
182 
183  av_log(ctx, AV_LOG_VERBOSE, "limit:%f round:%d skip:%d reset_count:%d\n",
184  s->limit, s->round, s->skip, s->reset_count);
185 
186  return 0;
187 }
188 
190 {
191  CropDetectContext *s = ctx->priv;
192 
193  av_freep(&s->tmpbuf);
194  av_freep(&s->filterbuf);
195  av_freep(&s->gradients);
196  av_freep(&s->directions);
197  av_freep(&s->bboxes[0]);
198  av_freep(&s->bboxes[1]);
199  av_freep(&s->bboxes[2]);
200  av_freep(&s->bboxes[3]);
201 }
202 
204 {
205  AVFilterContext *ctx = inlink->dst;
206  CropDetectContext *s = ctx->priv;
208  const int bufsize = inlink->w * inlink->h;
209 
210  av_image_fill_max_pixsteps(s->max_pixsteps, NULL, desc);
211 
212  s->bitdepth = desc->comp[0].depth;
213 
214  if (s->limit < 1.0)
215  s->limit_upscaled = s->limit * ((1 << s->bitdepth) - 1);
216  else
217  s->limit_upscaled = s->limit;
218 
219  s->x1 = inlink->w - 1;
220  s->y1 = inlink->h - 1;
221  s->x2 = 0;
222  s->y2 = 0;
223 
224  s->window_size = FFMAX(s->reset_count, 15);
225  s->tmpbuf = av_malloc(bufsize);
226  s->filterbuf = av_malloc(bufsize * s->max_pixsteps[0]);
227  s->gradients = av_calloc(bufsize, sizeof(*s->gradients));
228  s->directions = av_malloc(bufsize);
229  s->bboxes[0] = av_malloc(s->window_size * sizeof(*s->bboxes[0]));
230  s->bboxes[1] = av_malloc(s->window_size * sizeof(*s->bboxes[1]));
231  s->bboxes[2] = av_malloc(s->window_size * sizeof(*s->bboxes[2]));
232  s->bboxes[3] = av_malloc(s->window_size * sizeof(*s->bboxes[3]));
233 
234  if (!s->tmpbuf || !s->filterbuf || !s->gradients || !s->directions ||
235  !s->bboxes[0] || !s->bboxes[1] || !s->bboxes[2] || !s->bboxes[3])
236  return AVERROR(ENOMEM);
237 
238  return 0;
239 }
240 
241 #define SET_META(key, value) \
242  av_dict_set_int(metadata, key, value, 0)
243 
245 {
246  AVFilterContext *ctx = inlink->dst;
247  CropDetectContext *s = ctx->priv;
248  int bpp = s->max_pixsteps[0];
249  int w, h, x, y, shrink_by, i;
250  AVDictionary **metadata;
251  int outliers, last_y;
252  int limit_upscaled = lrint(s->limit_upscaled);
253  char limit_str[22];
254 
255  const int inw = inlink->w;
256  const int inh = inlink->h;
257  uint8_t *tmpbuf = s->tmpbuf;
258  uint8_t *filterbuf = s->filterbuf;
259  uint16_t *gradients = s->gradients;
260  int8_t *directions = s->directions;
261  const AVFrameSideData *sd = NULL;
262  int scan_w, scan_h, bboff;
263 
264  void (*sobel)(int w, int h, uint16_t *dst, int dst_linesize,
265  int8_t *dir, int dir_linesize,
266  const uint8_t *src, int src_linesize, int src_stride) = (bpp == 2) ? &ff_sobel_16 : &ff_sobel_8;
267  void (*gaussian_blur)(int w, int h,
268  uint8_t *dst, int dst_linesize,
269  const uint8_t *src, int src_linesize, int src_stride) = (bpp == 2) ? &ff_gaussian_blur_16 : &ff_gaussian_blur_8;
270 
271 
272  // ignore first s->skip frames
273  if (++s->frame_nb > 0) {
274  metadata = &frame->metadata;
275 
276  // Reset the crop area every reset_count frames, if reset_count is > 0
277  if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
278  s->x1 = frame->width - 1;
279  s->y1 = frame->height - 1;
280  s->x2 = 0;
281  s->y2 = 0;
282  s->frame_nb = 1;
283  }
284 
285 #define FIND(DST, FROM, NOEND, INC, STEP0, STEP1, LEN) \
286  outliers = 0;\
287  for (last_y = y = FROM; NOEND; y = y INC) {\
288  if (checkline(ctx, frame->data[0] + STEP0 * y, STEP1, LEN, bpp) > limit_upscaled) {\
289  if (++outliers > s->max_outliers) { \
290  DST = last_y;\
291  break;\
292  }\
293  } else\
294  last_y = y INC;\
295  }
296 
297  if (s->mode == MODE_BLACK) {
298  FIND(s->y1, 0, y < s->y1, +1, frame->linesize[0], bpp, frame->width);
299  FIND(s->y2, frame->height - 1, y > FFMAX(s->y2, s->y1), -1, frame->linesize[0], bpp, frame->width);
300  FIND(s->x1, 0, y < s->x1, +1, bpp, frame->linesize[0], frame->height);
301  FIND(s->x2, frame->width - 1, y > FFMAX(s->x2, s->x1), -1, bpp, frame->linesize[0], frame->height);
302  } else { // MODE_MV_EDGES
304  s->x1 = 0;
305  s->y1 = 0;
306  s->x2 = inw - 1;
307  s->y2 = inh - 1;
308 
309  if (!sd) {
310  av_log(ctx, AV_LOG_WARNING, "Cannot detect: no motion vectors available");
311  } else {
312  // gaussian filter to reduce noise
313  gaussian_blur(inw, inh,
314  filterbuf, inw*bpp,
315  frame->data[0], frame->linesize[0], bpp);
316 
317  // compute the 16-bits gradients and directions for the next step
318  sobel(inw, inh, gradients, inw, directions, inw, filterbuf, inw*bpp, bpp);
319 
320  // non_maximum_suppression() will actually keep & clip what's necessary and
321  // ignore the rest, so we need a clean output buffer
322  memset(tmpbuf, 0, inw * inh);
323  ff_non_maximum_suppression(inw, inh, tmpbuf, inw, directions, inw, gradients, inw);
324 
325 
326  // keep high values, or low values surrounded by high values
327  ff_double_threshold(s->low_u8, s->high_u8, inw, inh,
328  tmpbuf, inw, tmpbuf, inw);
329 
330  // scan all MVs and store bounding box
331  s->x1 = inw - 1;
332  s->y1 = inh - 1;
333  s->x2 = 0;
334  s->y2 = 0;
335  for (i = 0; i < sd->size / sizeof(AVMotionVector); i++) {
336  const AVMotionVector *mv = (const AVMotionVector*)sd->data + i;
337  const int mx = mv->dst_x - mv->src_x;
338  const int my = mv->dst_y - mv->src_y;
339 
340  if (mv->dst_x >= 0 && mv->dst_x < inw &&
341  mv->dst_y >= 0 && mv->dst_y < inh &&
342  mv->src_x >= 0 && mv->src_x < inw &&
343  mv->src_y >= 0 && mv->src_y < inh &&
344  mx * mx + my * my >= s->mv_threshold * s->mv_threshold) {
345  s->x1 = mv->dst_x < s->x1 ? mv->dst_x : s->x1;
346  s->y1 = mv->dst_y < s->y1 ? mv->dst_y : s->y1;
347  s->x2 = mv->dst_x > s->x2 ? mv->dst_x : s->x2;
348  s->y2 = mv->dst_y > s->y2 ? mv->dst_y : s->y2;
349  }
350  }
351 
352  // assert x1<x2, y1<y2
353  if (s->x1 > s->x2) FFSWAP(int, s->x1, s->x2);
354  if (s->y1 > s->y2) FFSWAP(int, s->y1, s->y2);
355 
356  // scan outward looking for 0-edge-lines in edge image
357  scan_w = s->x2 - s->x1;
358  scan_h = s->y2 - s->y1;
359 
360 #define FIND_EDGE(DST, FROM, NOEND, INC, STEP0, STEP1, LEN) \
361  for (last_y = y = FROM; NOEND; y = y INC) { \
362  if (checkline_edge(ctx, tmpbuf + STEP0 * y, STEP1, LEN, bpp)) { \
363  if (last_y INC == y) { \
364  DST = y; \
365  break; \
366  } else \
367  last_y = y; \
368  } \
369  } \
370  if (!(NOEND)) { \
371  DST = y -(INC); \
372  }
373 
374  FIND_EDGE(s->y1, s->y1, y >= 0, -1, inw, bpp, scan_w);
375  FIND_EDGE(s->y2, s->y2, y < inh, +1, inw, bpp, scan_w);
376  FIND_EDGE(s->x1, s->x1, y >= 0, -1, bpp, inw, scan_h);
377  FIND_EDGE(s->x2, s->x2, y < inw, +1, bpp, inw, scan_h);
378 
379  // queue bboxes
380  bboff = (s->frame_nb - 1) % s->window_size;
381  s->bboxes[0][bboff] = s->x1;
382  s->bboxes[1][bboff] = s->x2;
383  s->bboxes[2][bboff] = s->y1;
384  s->bboxes[3][bboff] = s->y2;
385 
386  // sort queue
387  bboff = FFMIN(s->frame_nb, s->window_size);
388  AV_QSORT(s->bboxes[0], bboff, int, comp);
389  AV_QSORT(s->bboxes[1], bboff, int, comp);
390  AV_QSORT(s->bboxes[2], bboff, int, comp);
391  AV_QSORT(s->bboxes[3], bboff, int, comp);
392 
393  // return median of window_size elems
394  s->x1 = s->bboxes[0][bboff/2];
395  s->x2 = s->bboxes[1][bboff/2];
396  s->y1 = s->bboxes[2][bboff/2];
397  s->y2 = s->bboxes[3][bboff/2];
398  }
399  }
400 
401  // round x and y (up), important for yuv colorspaces
402  // make sure they stay rounded!
403  x = (s->x1+1) & ~1;
404  y = (s->y1+1) & ~1;
405 
406  w = s->x2 - x + 1;
407  h = s->y2 - y + 1;
408 
409  // w and h must be divisible by 2 as well because of yuv
410  // colorspace problems.
411  if (s->round <= 1)
412  s->round = 16;
413  if (s->round % 2)
414  s->round *= 2;
415 
416  shrink_by = w % s->round;
417  w -= shrink_by;
418  x += (shrink_by/2 + 1) & ~1;
419 
420  shrink_by = h % s->round;
421  h -= shrink_by;
422  y += (shrink_by/2 + 1) & ~1;
423 
424  SET_META("lavfi.cropdetect.x1", s->x1);
425  SET_META("lavfi.cropdetect.x2", s->x2);
426  SET_META("lavfi.cropdetect.y1", s->y1);
427  SET_META("lavfi.cropdetect.y2", s->y2);
428  SET_META("lavfi.cropdetect.w", w);
429  SET_META("lavfi.cropdetect.h", h);
430  SET_META("lavfi.cropdetect.x", x);
431  SET_META("lavfi.cropdetect.y", y);
432 
433  snprintf(limit_str, sizeof(limit_str), "%f", s->limit);
434  av_dict_set(metadata, "lavfi.cropdetect.limit", limit_str, 0);
435 
437  "x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f limit:%f crop=%d:%d:%d:%d\n",
438  s->x1, s->x2, s->y1, s->y2, w, h, x, y, frame->pts,
439  frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
440  s->limit, w, h, x, y);
441  }
442 
443  return ff_filter_frame(inlink->dst->outputs[0], frame);
444 }
445 
446 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
447  char *res, int res_len, int flags)
448 {
449  CropDetectContext *s = ctx->priv;
450  float old_limit = s->limit;
451  int ret;
452 
453  if ((ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags)) < 0)
454  return ret;
455 
456  if (old_limit != s->limit) {
457  if (s->limit < 1.0)
458  s->limit_upscaled = s->limit * ((1 << s->bitdepth) - 1);
459  else
460  s->limit_upscaled = s->limit;
461  s->frame_nb = s->reset_count;
462  }
463 
464  return 0;
465 }
466 
467 #define OFFSET(x) offsetof(CropDetectContext, x)
468 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
469 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
470 
471 static const AVOption cropdetect_options[] = {
472  { "limit", "Threshold below which the pixel is considered black", OFFSET(limit), AV_OPT_TYPE_FLOAT, { .dbl = 24.0/255 }, 0, 65535, TFLAGS },
473  { "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, INT_MAX, FLAGS },
474  { "reset", "Recalculate the crop area after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
475  { "skip", "Number of initial frames to skip", OFFSET(skip), AV_OPT_TYPE_INT, { .i64 = 2 }, 0, INT_MAX, FLAGS },
476  { "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
477  { "max_outliers", "Threshold count of outliers", OFFSET(max_outliers),AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
478  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_BLACK}, 0, MODE_NB-1, FLAGS, .unit = "mode" },
479  { "black", "detect black pixels surrounding the video", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BLACK}, INT_MIN, INT_MAX, FLAGS, .unit = "mode" },
480  { "mvedges", "detect motion and edged surrounding the video", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MV_EDGES}, INT_MIN, INT_MAX, FLAGS, .unit = "mode" },
481  { "high", "Set high threshold for edge detection", OFFSET(high), AV_OPT_TYPE_FLOAT, {.dbl=25/255.}, 0, 1, FLAGS },
482  { "low", "Set low threshold for edge detection", OFFSET(low), AV_OPT_TYPE_FLOAT, {.dbl=15/255.}, 0, 1, FLAGS },
483  { "mv_threshold", "motion vector threshold when estimating video window size", OFFSET(mv_threshold), AV_OPT_TYPE_INT, {.i64=8}, 0, 100, FLAGS},
484  { NULL }
485 };
486 
487 AVFILTER_DEFINE_CLASS(cropdetect);
488 
490  {
491  .name = "default",
492  .type = AVMEDIA_TYPE_VIDEO,
493  .config_props = config_input,
494  .filter_frame = filter_frame,
495  },
496 };
497 
499  .name = "cropdetect",
500  .description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
501  .priv_size = sizeof(CropDetectContext),
502  .priv_class = &cropdetect_class,
503  .init = init,
504  .uninit = uninit,
509  .process_command = process_command,
510 };
CropDetectContext::high_u8
uint8_t high_u8
Definition: vf_cropdetect.c:54
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_cropdetect.c:175
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: filters.h:242
gaussian_blur
void fn() gaussian_blur(int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int src_stride)
Definition: edge_template.c:72
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:951
AVMotionVector
Definition: motion_vector.h:24
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1062
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3170
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
SET_META
#define SET_META(key, value)
Definition: vf_cropdetect.c:241
CropDetectContext::filterbuf
uint8_t * filterbuf
Definition: vf_cropdetect.c:55
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
w
uint8_t w
Definition: llviddspenc.c:38
CropDetectContext::tmpbuf
uint8_t * tmpbuf
Definition: vf_cropdetect.c:56
edge_common.h
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:41
CropMode
CropMode
Definition: vf_cropdetect.c:79
CropDetectContext::low
float low
Definition: vf_cropdetect.c:53
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:502
high
int high
Definition: dovi_rpuenc.c:38
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AVDictionary
Definition: dict.c:34
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
comp
static int comp(const int *a, const int *b)
Definition: vf_cropdetect.c:85
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
MODE_BLACK
@ MODE_BLACK
Definition: vf_cropdetect.c:80
video.h
checkline
static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
Definition: vf_cropdetect.c:90
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
CropDetectContext::gradients
uint16_t * gradients
Definition: vf_cropdetect.c:57
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:500
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: macros.h:45
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:505
lrint
#define lrint
Definition: tablegen.h:53
CropDetectContext::window_size
int window_size
Definition: vf_cropdetect.c:50
AVFrameSideData::size
size_t size
Definition: frame.h:268
av_cold
#define av_cold
Definition: attributes.h:90
CropDetectContext::skip
int skip
Definition: vf_cropdetect.c:44
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:514
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
motion_vector.h
CropDetectContext
Definition: vf_cropdetect.c:38
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:515
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
CropDetectContext::low_u8
uint8_t low_u8
Definition: vf_cropdetect.c:54
filters.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:499
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:513
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(cropdetect)
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CropDetectContext::y1
int y1
Definition: vf_cropdetect.c:40
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
CropDetectContext::max_pixsteps
int max_pixsteps[4]
Definition: vf_cropdetect.c:47
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
if
if(ret)
Definition: filter_design.txt:179
CropDetectContext::limit_upscaled
float limit_upscaled
Definition: vf_cropdetect.c:42
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_cropdetect.c:244
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
NULL
#define NULL
Definition: coverity.c:32
FLAGS
#define FLAGS
Definition: vf_cropdetect.c:468
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
CropDetectContext::y2
int y2
Definition: vf_cropdetect.c:40
cropdetect_options
static const AVOption cropdetect_options[]
Definition: vf_cropdetect.c:471
ff_non_maximum_suppression
void ff_non_maximum_suppression(int w, int h, uint8_t *dst, int dst_linesize, const int8_t *dir, int dir_linesize, const uint16_t *src, int src_linesize)
Filters rounded gradients to drop all non-maxima pixels in the magnitude image Expects gradients gene...
Definition: edge_common.c:60
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:503
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
TFLAGS
#define TFLAGS
Definition: vf_cropdetect.c:469
CropDetectContext::x1
int x1
Definition: vf_cropdetect.c:40
qsort.h
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
ff_double_threshold
void ff_double_threshold(int low, int high, int w, int h, uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize)
Filters all pixels in src to keep all pixels > high, and keep all pixels > low where all surrounding ...
Definition: edge_common.c:89
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_cropdetect.c:62
FIND
#define FIND(DST, FROM, NOEND, INC, STEP0, STEP1, LEN)
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:507
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:509
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
OFFSET
#define OFFSET(x)
Definition: vf_cropdetect.c:467
CropDetectContext::max_outliers
int max_outliers
Definition: vf_cropdetect.c:48
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:901
CropDetectContext::reset_count
int reset_count
Definition: vf_cropdetect.c:45
CropDetectContext::bboxes
int * bboxes[4]
Definition: vf_cropdetect.c:59
MODE_MV_EDGES
@ MODE_MV_EDGES
Definition: vf_cropdetect.c:81
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
CropDetectContext::limit
float limit
Definition: vf_cropdetect.c:41
CropDetectContext::round
int round
Definition: vf_cropdetect.c:43
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_cropdetect.c:203
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
CropDetectContext::frame_nb
int frame_nb
Definition: vf_cropdetect.c:46
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:182
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
Definition: opt.h:271
FIND_EDGE
#define FIND_EDGE(DST, FROM, NOEND, INC, STEP0, STEP1, LEN)
CropDetectContext::directions
char * directions
Definition: vf_cropdetect.c:58
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AV_QSORT
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
Definition: qsort.h:33
round
static av_always_inline av_const double round(double x)
Definition: libm.h:444
internal.h
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MODE_NB
@ MODE_NB
Definition: vf_cropdetect.c:82
AV_PIX_FMT_NV21
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
Definition: pixfmt.h:97
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:501
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
stride
#define stride
Definition: h264pred_template.c:537
CropDetectContext::mv_threshold
int mv_threshold
Definition: vf_cropdetect.c:51
AVFilter
Filter definition.
Definition: avfilter.h:201
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
CropDetectContext::bitdepth
int bitdepth
Definition: vf_cropdetect.c:52
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:506
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:511
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_cropdetect.c:446
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:168
sobel
void fn() sobel(int w, int h, uint16_t *dst, int dst_linesize, int8_t *dir, int dir_linesize, const uint8_t *src, int src_linesize, int src_stride)
Definition: edge_template.c:41
CropDetectContext::x2
int x2
Definition: vf_cropdetect.c:40
av_image_fill_max_pixsteps
void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], const AVPixFmtDescriptor *pixdesc)
Compute the max pixel step for each plane of an image with a format described by pixdesc.
Definition: imgutils.c:35
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
checkline_edge
static int checkline_edge(void *ctx, const unsigned char *src, int stride, int len, int bpp)
Definition: vf_cropdetect.c:145
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
avfilter_vf_cropdetect_inputs
static const AVFilterPad avfilter_vf_cropdetect_inputs[]
Definition: vf_cropdetect.c:489
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
h
h
Definition: vp9dsp_template.c:2070
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:512
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_cropdetect.c:189
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
snprintf
#define snprintf
Definition: snprintf.h:34
CropDetectContext::high
float high
Definition: vf_cropdetect.c:53
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
CropDetectContext::mode
int mode
Definition: vf_cropdetect.c:49
src
#define src
Definition: vp8dsp.c:248
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:510
ff_vf_cropdetect
const AVFilter ff_vf_cropdetect
Definition: vf_cropdetect.c:498