FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 #include "graph/graphprint.h"
25 
26 #include "libavfilter/avfilter.h"
27 #include "libavfilter/buffersink.h"
28 #include "libavfilter/buffersrc.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
34 #include "libavutil/downmix_info.h"
35 #include "libavutil/mem.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/time.h"
41 #include "libavutil/timestamp.h"
42 
43 // FIXME private header, used for mid_pred()
44 #include "libavcodec/mathops.h"
45 
46 typedef struct FilterGraphPriv {
48 
49  // name used for logging
50  char log_name[32];
51 
52  int is_simple;
53  // true when the filtergraph contains only meta filters
54  // that do not modify the frame data
55  int is_meta;
56  // source filters are present in the graph
59 
60  unsigned nb_outputs_done;
61 
63 
64  // frame for temporarily holding output from the filtergraph
66  // frame for sending output to the encoder
68 
70  unsigned sch_idx;
72 
74 {
75  return (FilterGraphPriv*)fg;
76 }
77 
78 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
79 {
80  return (const FilterGraphPriv*)fg;
81 }
82 
83 // data that is local to the filter thread and not visible outside of it
84 typedef struct FilterGraphThread {
86 
88 
89  // Temporary buffer for output frames, since on filtergraph reset
90  // we cannot send them to encoders immediately.
91  // The output index is stored in frame opaque.
93 
94  // index of the next input to request from the scheduler
95  unsigned next_in;
96  // set to 1 after at least one frame passed through this output
97  int got_frame;
98 
99  // EOF status of each input/output, as received by the thread
100  uint8_t *eof_in;
101  uint8_t *eof_out;
103 
104 typedef struct InputFilterPriv {
106 
108 
109  // used to hold submitted input
111 
112  // For inputs bound to a filtergraph output
114 
115  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
116  // same as type otherwise
118 
119  int eof;
120  int bound;
122  uint64_t nb_dropped;
123 
124  // parameters configured for this input
125  int format;
126 
127  int width, height;
132 
135 
137 
140 
142 
144 
148 
151 
152  struct {
153  AVFrame *frame;
154 
157 
158  /// marks if sub2video_update should force an initialization
159  unsigned int initialize;
160  } sub2video;
162 
164 {
165  return (InputFilterPriv*)ifilter;
166 }
167 
168 typedef struct FPSConvContext {
170  /* number of frames emitted by the video-encoding sync code */
172  /* history of nb_frames_prev, i.e. the number of times the
173  * previous frame was duplicated by vsync code in recent
174  * do_video_out() calls */
176 
177  uint64_t dup_warning;
178 
181 
183 
189 
190 typedef struct OutputFilterPriv {
192 
193  void *log_parent;
194  char log_name[32];
195 
196  int needed;
197 
198  /* desired output stream properties */
199  int format;
200  int width, height;
206 
207  unsigned crop_top;
208  unsigned crop_bottom;
209  unsigned crop_left;
210  unsigned crop_right;
211 
214 
215  // time base in which the output is sent to our downstream
216  // does not need to match the filtersink's timebase
218  // at least one frame with the above timebase was sent
219  // to our downstream, so it cannot change anymore
221 
223 
226 
227  // those are only set if no format is specified and the encoder gives us multiple options
228  // They point directly to the relevant lists of the encoder.
229  const int *formats;
231  const int *sample_rates;
235 
237 
241  // offset for output timestamps, in AV_TIME_BASE_Q
245 
246  unsigned flags;
248 
250 {
251  return (OutputFilterPriv*)ofilter;
252 }
253 
254 typedef struct FilterCommand {
255  char *target;
256  char *command;
257  char *arg;
258 
259  double time;
261 } FilterCommand;
262 
263 static void filter_command_free(void *opaque, uint8_t *data)
264 {
266 
267  av_freep(&fc->target);
268  av_freep(&fc->command);
269  av_freep(&fc->arg);
270 
271  av_free(data);
272 }
273 
275 {
276  AVFrame *frame = ifp->sub2video.frame;
277  int ret;
278 
280 
281  frame->width = ifp->width;
282  frame->height = ifp->height;
283  frame->format = ifp->format;
284  frame->colorspace = ifp->color_space;
285  frame->color_range = ifp->color_range;
286  frame->alpha_mode = ifp->alpha_mode;
287 
289  if (ret < 0)
290  return ret;
291 
292  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
293 
294  return 0;
295 }
296 
297 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
298  AVSubtitleRect *r)
299 {
300  uint32_t *pal, *dst2;
301  uint8_t *src, *src2;
302  int x, y;
303 
304  if (r->type != SUBTITLE_BITMAP) {
305  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
306  return;
307  }
308  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
309  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
310  r->x, r->y, r->w, r->h, w, h
311  );
312  return;
313  }
314 
315  dst += r->y * dst_linesize + r->x * 4;
316  src = r->data[0];
317  pal = (uint32_t *)r->data[1];
318  for (y = 0; y < r->h; y++) {
319  dst2 = (uint32_t *)dst;
320  src2 = src;
321  for (x = 0; x < r->w; x++)
322  *(dst2++) = pal[*(src2++)];
323  dst += dst_linesize;
324  src += r->linesize[0];
325  }
326 }
327 
329 {
330  AVFrame *frame = ifp->sub2video.frame;
331  int ret;
332 
333  av_assert1(frame->data[0]);
334  ifp->sub2video.last_pts = frame->pts = pts;
338  if (ret != AVERROR_EOF && ret < 0)
340  "Error while add the frame to buffer source(%s).\n",
341  av_err2str(ret));
342 }
343 
344 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
345  const AVSubtitle *sub)
346 {
347  AVFrame *frame = ifp->sub2video.frame;
348  int8_t *dst;
349  int dst_linesize;
350  int num_rects;
351  int64_t pts, end_pts;
352 
353  if (sub) {
354  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
355  AV_TIME_BASE_Q, ifp->time_base);
356  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
357  AV_TIME_BASE_Q, ifp->time_base);
358  num_rects = sub->num_rects;
359  } else {
360  /* If we are initializing the system, utilize current heartbeat
361  PTS as the start time, and show until the following subpicture
362  is received. Otherwise, utilize the previous subpicture's end time
363  as the fall-back value. */
364  pts = ifp->sub2video.initialize ?
365  heartbeat_pts : ifp->sub2video.end_pts;
366  end_pts = INT64_MAX;
367  num_rects = 0;
368  }
369  if (sub2video_get_blank_frame(ifp) < 0) {
371  "Impossible to get a blank canvas.\n");
372  return;
373  }
374  dst = frame->data [0];
375  dst_linesize = frame->linesize[0];
376  for (int i = 0; i < num_rects; i++)
377  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
378  sub2video_push_ref(ifp, pts);
379  ifp->sub2video.end_pts = end_pts;
380  ifp->sub2video.initialize = 0;
381 }
382 
383 /* Define a function for appending a list of allowed formats
384  * to an AVBPrint. If nonempty, the list will have a header. */
385 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
386 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
387 { \
388  if (ofp->var == none && !ofp->supported_list) \
389  return; \
390  av_bprintf(bprint, #name "="); \
391  if (ofp->var != none) { \
392  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
393  } else { \
394  const type *p; \
395  \
396  for (p = ofp->supported_list; *p != none; p++) { \
397  av_bprintf(bprint, printf_format "|", get_name(*p)); \
398  } \
399  if (bprint->len > 0) \
400  bprint->str[--bprint->len] = '\0'; \
401  } \
402  av_bprint_chars(bprint, ':', 1); \
403 }
404 
407 
410 
412  "%d", )
413 
414 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
416 
417 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
419 
420 DEF_CHOOSE_FORMAT(alpha_modes, enum AVAlphaMode, alpha_mode, alpha_modes,
422 
423 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
424 {
425  if (av_channel_layout_check(&ofp->ch_layout)) {
426  av_bprintf(bprint, "channel_layouts=");
427  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
428  } else if (ofp->ch_layouts) {
429  const AVChannelLayout *p;
430 
431  av_bprintf(bprint, "channel_layouts=");
432  for (p = ofp->ch_layouts; p->nb_channels; p++) {
434  av_bprintf(bprint, "|");
435  }
436  if (bprint->len > 0)
437  bprint->str[--bprint->len] = '\0';
438  } else
439  return;
440  av_bprint_chars(bprint, ':', 1);
441 }
442 
443 static int read_binary(void *logctx, const char *path,
444  uint8_t **data, int *len)
445 {
446  AVIOContext *io = NULL;
447  int64_t fsize;
448  int ret;
449 
450  *data = NULL;
451  *len = 0;
452 
453  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
454  if (ret < 0) {
455  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
456  path, av_err2str(ret));
457  return ret;
458  }
459 
460  fsize = avio_size(io);
461  if (fsize < 0 || fsize > INT_MAX) {
462  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
463  ret = AVERROR(EIO);
464  goto fail;
465  }
466 
467  *data = av_malloc(fsize);
468  if (!*data) {
469  ret = AVERROR(ENOMEM);
470  goto fail;
471  }
472 
473  ret = avio_read(io, *data, fsize);
474  if (ret != fsize) {
475  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
476  ret = ret < 0 ? ret : AVERROR(EIO);
477  goto fail;
478  }
479 
480  *len = fsize;
481 
482  ret = 0;
483 fail:
484  avio_close(io);
485  if (ret < 0) {
486  av_freep(data);
487  *len = 0;
488  }
489  return ret;
490 }
491 
492 static int filter_opt_apply(void *logctx, AVFilterContext *f,
493  const char *key, const char *val)
494 {
495  const AVOption *o = NULL;
496  int ret;
497 
499  if (ret >= 0)
500  return 0;
501 
502  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
504  if (!o)
505  goto err_apply;
506 
507  // key is a valid option name prefixed with '/'
508  // interpret value as a path from which to load the actual option value
509  key++;
510 
511  if (o->type == AV_OPT_TYPE_BINARY) {
512  uint8_t *data;
513  int len;
514 
515  ret = read_binary(logctx, val, &data, &len);
516  if (ret < 0)
517  goto err_load;
518 
520  av_freep(&data);
521  } else {
522  char *data = read_file_to_string(val);
523  if (!data) {
524  ret = AVERROR(EIO);
525  goto err_load;
526  }
527 
529  av_freep(&data);
530  }
531  if (ret < 0)
532  goto err_apply;
533 
534  return 0;
535 
536 err_apply:
537  av_log(logctx, AV_LOG_ERROR,
538  "Error applying option '%s' to filter '%s': %s\n",
539  key, f->filter->name, av_err2str(ret));
540  return ret;
541 err_load:
542  av_log(logctx, AV_LOG_ERROR,
543  "Error loading value for option '%s' from file '%s'\n",
544  key, val);
545  return ret;
546 }
547 
548 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
549 {
550  for (size_t i = 0; i < seg->nb_chains; i++) {
551  AVFilterChain *ch = seg->chains[i];
552 
553  for (size_t j = 0; j < ch->nb_filters; j++) {
554  AVFilterParams *p = ch->filters[j];
555  const AVDictionaryEntry *e = NULL;
556 
557  av_assert0(p->filter);
558 
559  while ((e = av_dict_iterate(p->opts, e))) {
560  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
561  if (ret < 0)
562  return ret;
563  }
564 
565  av_dict_free(&p->opts);
566  }
567  }
568 
569  return 0;
570 }
571 
572 static int graph_parse(void *logctx,
573  AVFilterGraph *graph, const char *desc,
575  AVBufferRef *hw_device)
576 {
578  int ret;
579 
580  *inputs = NULL;
581  *outputs = NULL;
582 
583  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
584  if (ret < 0)
585  return ret;
586 
588  if (ret < 0)
589  goto fail;
590 
591  if (hw_device) {
592  for (int i = 0; i < graph->nb_filters; i++) {
593  AVFilterContext *f = graph->filters[i];
594 
595  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
596  continue;
597  f->hw_device_ctx = av_buffer_ref(hw_device);
598  if (!f->hw_device_ctx) {
599  ret = AVERROR(ENOMEM);
600  goto fail;
601  }
602  }
603  }
604 
605  ret = graph_opts_apply(logctx, seg);
606  if (ret < 0)
607  goto fail;
608 
610 
611 fail:
613  return ret;
614 }
615 
616 // Filters can be configured only if the formats of all inputs are known.
618 {
619  for (int i = 0; i < fg->nb_inputs; i++) {
621  if (ifp->format < 0)
622  return 0;
623  }
624  return 1;
625 }
626 
627 static int filter_thread(void *arg);
628 
629 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
630 {
631  AVFilterContext *ctx = inout->filter_ctx;
632  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
633  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
634 
635  if (nb_pads > 1)
636  return av_strdup(ctx->filter->name);
637  return av_asprintf("%s:%s", ctx->filter->name,
638  avfilter_pad_get_name(pads, inout->pad_idx));
639 }
640 
641 static const char *ofilter_item_name(void *obj)
642 {
643  OutputFilterPriv *ofp = obj;
644  return ofp->log_name;
645 }
646 
647 static const AVClass ofilter_class = {
648  .class_name = "OutputFilter",
649  .version = LIBAVUTIL_VERSION_INT,
650  .item_name = ofilter_item_name,
651  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
652  .category = AV_CLASS_CATEGORY_FILTER,
653 };
654 
656 {
657  OutputFilterPriv *ofp;
658  OutputFilter *ofilter;
659 
660  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
661  if (!ofp)
662  return NULL;
663 
664  ofilter = &ofp->ofilter;
665  ofilter->class = &ofilter_class;
666  ofp->log_parent = fg;
667  ofilter->graph = fg;
668  ofilter->type = type;
669  ofp->format = -1;
673  ofilter->index = fg->nb_outputs - 1;
674 
675  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
676  av_get_media_type_string(type)[0], ofilter->index);
677 
678  return ofilter;
679 }
680 
681 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
682  const ViewSpecifier *vs)
683 {
684  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
685  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
687  int ret;
688 
689  av_assert0(!ifp->bound);
690  ifp->bound = 1;
691 
692  if (ifilter->type != ist->par->codec_type &&
693  !(ifilter->type == AVMEDIA_TYPE_VIDEO && ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
694  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
696  return AVERROR(EINVAL);
697  }
698 
699  ifp->type_src = ist->st->codecpar->codec_type;
700 
701  ifp->opts.fallback = av_frame_alloc();
702  if (!ifp->opts.fallback)
703  return AVERROR(ENOMEM);
704 
705  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
706  vs, &ifp->opts, &src);
707  if (ret < 0)
708  return ret;
709 
710  ifilter->input_name = av_strdup(ifp->opts.name);
711  if (!ifilter->input_name)
712  return AVERROR(EINVAL);
713 
714  ret = sch_connect(fgp->sch,
715  src, SCH_FILTER_IN(fgp->sch_idx, ifilter->index));
716  if (ret < 0)
717  return ret;
718 
719  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
720  ifp->sub2video.frame = av_frame_alloc();
721  if (!ifp->sub2video.frame)
722  return AVERROR(ENOMEM);
723 
724  ifp->width = ifp->opts.sub2video_width;
725  ifp->height = ifp->opts.sub2video_height;
726 
727  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
728  palettes for all rectangles are identical or compatible */
729  ifp->format = AV_PIX_FMT_RGB32;
730 
731  ifp->time_base = AV_TIME_BASE_Q;
732 
733  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
734  ifp->width, ifp->height);
735  }
736 
737  return 0;
738 }
739 
741  const ViewSpecifier *vs)
742 {
745  int ret;
746 
747  av_assert0(!ifp->bound);
748  ifp->bound = 1;
749 
750  if (ifp->ifilter.type != dec->type) {
751  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
753  return AVERROR(EINVAL);
754  }
755 
756  ifp->type_src = ifp->ifilter.type;
757 
758  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
759  if (ret < 0)
760  return ret;
761 
762  ifp->ifilter.input_name = av_strdup(ifp->opts.name);
763  if (!ifp->ifilter.input_name)
764  return AVERROR(EINVAL);
765 
766  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
767  if (ret < 0)
768  return ret;
769 
770  return 0;
771 }
772 
773 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
774  const AVChannelLayout *layout_requested)
775 {
776  int i, err;
777 
778  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
779  /* Pass the layout through for all orders but UNSPEC */
780  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
781  if (err < 0)
782  return err;
783  return 0;
784  }
785 
786  /* Requested layout is of order UNSPEC */
787  if (!layouts_allowed) {
788  /* Use the default native layout for the requested amount of channels when the
789  encoder doesn't have a list of supported layouts */
790  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
791  return 0;
792  }
793  /* Encoder has a list of supported layouts. Pick the first layout in it with the
794  same amount of channels as the requested layout */
795  for (i = 0; layouts_allowed[i].nb_channels; i++) {
796  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
797  break;
798  }
799  if (layouts_allowed[i].nb_channels) {
800  /* Use it if one is found */
801  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
802  if (err < 0)
803  return err;
804  return 0;
805  }
806  /* If no layout for the amount of channels requested was found, use the default
807  native layout for it. */
808  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
809 
810  return 0;
811 }
812 
813 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
814  const OutputFilterOptions *opts)
815 {
816  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
817  FilterGraph *fg = ofilter->graph;
818  FilterGraphPriv *fgp = fgp_from_fg(fg);
819  int ret;
820 
821  av_assert0(!ofilter->bound);
822  av_assert0(!opts->enc ||
823  ofilter->type == opts->enc->type);
824 
825  ofp->needed = ofilter->bound = 1;
826  av_freep(&ofilter->linklabel);
827 
828  ofp->flags |= opts->flags;
829  ofp->ts_offset = opts->ts_offset;
830  ofp->enc_timebase = opts->output_tb;
831 
832  ofp->trim_start_us = opts->trim_start_us;
833  ofp->trim_duration_us = opts->trim_duration_us;
834 
835  ofilter->output_name = av_strdup(opts->name);
836  if (!ofilter->output_name)
837  return AVERROR(EINVAL);
838 
839  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
840  if (ret < 0)
841  return ret;
842 
843  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
844  if (ret < 0)
845  return ret;
846 
847  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
848  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
849 
850  if (fgp->is_simple) {
851  // for simple filtergraph there is just one output,
852  // so use only graph-level information for logging
853  ofp->log_parent = NULL;
854  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
855  } else
856  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
857 
858  switch (ofilter->type) {
859  case AVMEDIA_TYPE_VIDEO:
860  ofp->width = opts->width;
861  ofp->height = opts->height;
862  if (opts->format != AV_PIX_FMT_NONE) {
863  ofp->format = opts->format;
864  } else
865  ofp->formats = opts->formats;
866 
867  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
868  ofp->color_space = opts->color_space;
869  else
870  ofp->color_spaces = opts->color_spaces;
871 
872  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
873  ofp->color_range = opts->color_range;
874  else
875  ofp->color_ranges = opts->color_ranges;
876 
877  if (opts->alpha_mode != AVALPHA_MODE_UNSPECIFIED)
878  ofp->alpha_mode = opts->alpha_mode;
879  else
880  ofp->alpha_modes = opts->alpha_modes;
881 
883 
884  ofp->fps.last_frame = av_frame_alloc();
885  if (!ofp->fps.last_frame)
886  return AVERROR(ENOMEM);
887 
888  ofp->fps.vsync_method = opts->vsync_method;
889  ofp->fps.framerate = opts->frame_rate;
890  ofp->fps.framerate_max = opts->max_frame_rate;
891  ofp->fps.framerate_supported = opts->frame_rates;
892 
893  // reduce frame rate for mpeg4 to be within the spec limits
894  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
895  ofp->fps.framerate_clip = 65535;
896 
897  ofp->fps.dup_warning = 1000;
898 
899  break;
900  case AVMEDIA_TYPE_AUDIO:
901  if (opts->format != AV_SAMPLE_FMT_NONE) {
902  ofp->format = opts->format;
903  } else {
904  ofp->formats = opts->formats;
905  }
906  if (opts->sample_rate) {
907  ofp->sample_rate = opts->sample_rate;
908  } else
909  ofp->sample_rates = opts->sample_rates;
910  if (opts->ch_layout.nb_channels) {
911  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
912  if (ret < 0)
913  return ret;
914  } else {
915  ofp->ch_layouts = opts->ch_layouts;
916  }
917  break;
918  }
919 
920  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofilter->index),
921  SCH_ENC(sched_idx_enc));
922  if (ret < 0)
923  return ret;
924 
925  return 0;
926 }
927 
929  const OutputFilterOptions *opts)
930 {
931  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
932 
933  av_assert0(!ofilter->bound);
934  av_assert0(ofilter->type == ifp->ifilter.type);
935 
936  ofp->needed = ofilter->bound = 1;
937  av_freep(&ofilter->linklabel);
938 
939  ofilter->output_name = av_strdup(opts->name);
940  if (!ofilter->output_name)
941  return AVERROR(EINVAL);
942 
943  ifp->ofilter_src = ofilter;
944 
945  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
946 
947  return 0;
948 }
949 
950 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
951 {
953  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
955  char name[32];
956  int ret;
957 
958  av_assert0(!ifp->bound);
959  ifp->bound = 1;
960 
961  if (ifp->ifilter.type != ofilter_src->type) {
962  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
963  av_get_media_type_string(ofilter_src->type),
965  return AVERROR(EINVAL);
966  }
967 
968  ifp->type_src = ifp->ifilter.type;
969 
970  memset(&opts, 0, sizeof(opts));
971 
972  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->ifilter.index);
973  opts.name = name;
974 
975  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
976  if (ret < 0)
977  return ret;
978 
979  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
980  SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
981  if (ret < 0)
982  return ret;
983 
984  return 0;
985 }
986 
988 {
989  InputFilterPriv *ifp;
990  InputFilter *ifilter;
991 
992  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
993  if (!ifp)
994  return NULL;
995 
996  ifilter = &ifp->ifilter;
997  ifilter->graph = fg;
998 
999  ifp->frame = av_frame_alloc();
1000  if (!ifp->frame)
1001  return NULL;
1002 
1003  ifilter->index = fg->nb_inputs - 1;
1004  ifp->format = -1;
1008 
1010  if (!ifp->frame_queue)
1011  return NULL;
1012 
1013  return ifilter;
1014 }
1015 
1017 {
1018  FilterGraph *fg = *pfg;
1019  FilterGraphPriv *fgp;
1020 
1021  if (!fg)
1022  return;
1023  fgp = fgp_from_fg(fg);
1024 
1025  for (int j = 0; j < fg->nb_inputs; j++) {
1026  InputFilter *ifilter = fg->inputs[j];
1027  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1028 
1029  if (ifp->frame_queue) {
1030  AVFrame *frame;
1031  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1032  av_frame_free(&frame);
1033  av_fifo_freep2(&ifp->frame_queue);
1034  }
1035  av_frame_free(&ifp->sub2video.frame);
1036 
1037  av_frame_free(&ifp->frame);
1038  av_frame_free(&ifp->opts.fallback);
1039 
1041  av_freep(&ifilter->linklabel);
1042  av_freep(&ifp->opts.name);
1044  av_freep(&ifilter->name);
1045  av_freep(&ifilter->input_name);
1046  av_freep(&fg->inputs[j]);
1047  }
1048  av_freep(&fg->inputs);
1049  for (int j = 0; j < fg->nb_outputs; j++) {
1050  OutputFilter *ofilter = fg->outputs[j];
1051  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1052 
1053  av_frame_free(&ofp->fps.last_frame);
1054  av_dict_free(&ofp->sws_opts);
1055  av_dict_free(&ofp->swr_opts);
1056 
1057  av_freep(&ofilter->linklabel);
1058  av_freep(&ofilter->name);
1059  av_freep(&ofilter->output_name);
1060  av_freep(&ofilter->apad);
1063  av_freep(&fg->outputs[j]);
1064  }
1065  av_freep(&fg->outputs);
1066  av_freep(&fg->graph_desc);
1067 
1068  av_frame_free(&fgp->frame);
1069  av_frame_free(&fgp->frame_enc);
1070 
1071  av_freep(pfg);
1072 }
1073 
1074 static const char *fg_item_name(void *obj)
1075 {
1076  const FilterGraphPriv *fgp = obj;
1077 
1078  return fgp->log_name;
1079 }
1080 
1081 static const AVClass fg_class = {
1082  .class_name = "FilterGraph",
1083  .version = LIBAVUTIL_VERSION_INT,
1084  .item_name = fg_item_name,
1085  .category = AV_CLASS_CATEGORY_FILTER,
1086 };
1087 
1088 int fg_create(FilterGraph **pfg, char **graph_desc, Scheduler *sch,
1089  const OutputFilterOptions *opts)
1090 {
1091  FilterGraphPriv *fgp;
1092  FilterGraph *fg;
1093 
1095  AVFilterGraph *graph;
1096  int ret = 0;
1097 
1098  fgp = av_mallocz(sizeof(*fgp));
1099  if (!fgp) {
1100  av_freep(graph_desc);
1101  return AVERROR(ENOMEM);
1102  }
1103  fg = &fgp->fg;
1104 
1105  if (pfg) {
1106  *pfg = fg;
1107  fg->index = -1;
1108  } else {
1110  if (ret < 0) {
1111  av_freep(graph_desc);
1112  av_freep(&fgp);
1113  return ret;
1114  }
1115 
1116  fg->index = nb_filtergraphs - 1;
1117  }
1118 
1119  fg->class = &fg_class;
1120  fg->graph_desc = *graph_desc;
1122  fgp->nb_threads = -1;
1123  fgp->sch = sch;
1124 
1125  *graph_desc = NULL;
1126 
1127  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1128 
1129  fgp->frame = av_frame_alloc();
1130  fgp->frame_enc = av_frame_alloc();
1131  if (!fgp->frame || !fgp->frame_enc)
1132  return AVERROR(ENOMEM);
1133 
1134  /* this graph is only used for determining the kinds of inputs
1135  * and outputs we have, and is discarded on exit from this function */
1136  graph = avfilter_graph_alloc();
1137  if (!graph)
1138  return AVERROR(ENOMEM);;
1139  graph->nb_threads = 1;
1140 
1141  ret = graph_parse(fg, graph, fg->graph_desc, &inputs, &outputs,
1143  if (ret < 0)
1144  goto fail;
1145 
1146  for (unsigned i = 0; i < graph->nb_filters; i++) {
1147  const AVFilter *f = graph->filters[i]->filter;
1148  if ((!avfilter_filter_pad_count(f, 0) &&
1149  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1150  !strcmp(f->name, "apad")) {
1151  fgp->have_sources = 1;
1152  break;
1153  }
1154  }
1155 
1156  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1157  InputFilter *const ifilter = ifilter_alloc(fg);
1158 
1159  if (!ifilter) {
1160  ret = AVERROR(ENOMEM);
1161  goto fail;
1162  }
1163 
1164  ifilter->linklabel = cur->name;
1165  cur->name = NULL;
1166 
1167  ifilter->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1168  cur->pad_idx);
1169 
1170  if (ifilter->type != AVMEDIA_TYPE_VIDEO && ifilter->type != AVMEDIA_TYPE_AUDIO) {
1171  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1172  "currently.\n");
1173  ret = AVERROR(ENOSYS);
1174  goto fail;
1175  }
1176 
1177  ifilter->name = describe_filter_link(fg, cur, 1);
1178  if (!ifilter->name) {
1179  ret = AVERROR(ENOMEM);
1180  goto fail;
1181  }
1182  }
1183 
1184  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1185  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1186  cur->pad_idx);
1187  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1188  OutputFilterPriv *ofp;
1189 
1190  if (!ofilter) {
1191  ret = AVERROR(ENOMEM);
1192  goto fail;
1193  }
1194  ofp = ofp_from_ofilter(ofilter);
1195 
1196  ofilter->linklabel = cur->name;
1197  cur->name = NULL;
1198 
1199  ofilter->name = describe_filter_link(fg, cur, 0);
1200  if (!ofilter->name) {
1201  ret = AVERROR(ENOMEM);
1202  goto fail;
1203  }
1204 
1205  // opts should only be needed in this function to fill fields from filtergraphs
1206  // whose output is meant to be treated as if it was stream, e.g. merged HEIF
1207  // tile groups.
1208  if (opts) {
1209  ofp->flags = opts->flags;
1210  ofp->side_data = opts->side_data;
1211  ofp->nb_side_data = opts->nb_side_data;
1212 
1213  ofp->crop_top = opts->crop_top;
1214  ofp->crop_bottom = opts->crop_bottom;
1215  ofp->crop_left = opts->crop_left;
1216  ofp->crop_right = opts->crop_right;
1217 
1220  if (sd)
1221  memcpy(ofp->displaymatrix, sd->data, sizeof(ofp->displaymatrix));
1222  }
1223  }
1224 
1225  if (!fg->nb_outputs) {
1226  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1227  ret = AVERROR(ENOSYS);
1228  goto fail;
1229  }
1230 
1231  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1232  filter_thread, fgp);
1233  if (ret < 0)
1234  goto fail;
1235  fgp->sch_idx = ret;
1236 
1237 fail:
1240  avfilter_graph_free(&graph);
1241 
1242  if (ret < 0)
1243  return ret;
1244 
1245  return 0;
1246 }
1247 
1249  InputStream *ist,
1250  char **graph_desc,
1251  Scheduler *sch, unsigned sched_idx_enc,
1252  const OutputFilterOptions *opts)
1253 {
1254  const enum AVMediaType type = ist->par->codec_type;
1255  FilterGraph *fg;
1256  FilterGraphPriv *fgp;
1257  int ret;
1258 
1259  ret = fg_create(pfg, graph_desc, sch, NULL);
1260  if (ret < 0)
1261  return ret;
1262  fg = *pfg;
1263  fgp = fgp_from_fg(fg);
1264 
1265  fgp->is_simple = 1;
1266 
1267  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1268  av_get_media_type_string(type)[0], opts->name);
1269 
1270  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1271  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1272  "to have exactly 1 input and 1 output. "
1273  "However, it had %d input(s) and %d output(s). Please adjust, "
1274  "or use a complex filtergraph (-filter_complex) instead.\n",
1275  *graph_desc, fg->nb_inputs, fg->nb_outputs);
1276  return AVERROR(EINVAL);
1277  }
1278  if (fg->outputs[0]->type != type) {
1279  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1280  "it to %s output stream\n",
1283  return AVERROR(EINVAL);
1284  }
1285 
1286  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1287  if (ret < 0)
1288  return ret;
1289 
1290  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1291  if (ret < 0)
1292  return ret;
1293 
1294  if (opts->nb_threads >= 0)
1295  fgp->nb_threads = opts->nb_threads;
1296 
1297  return 0;
1298 }
1299 
1300 static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter, int commit)
1301 {
1302  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1303  InputStream *ist = NULL;
1304  enum AVMediaType type = ifilter->type;
1306  const char *spec;
1307  char *p;
1308  int i, ret;
1309 
1310  if (ifilter->linklabel && !strncmp(ifilter->linklabel, "dec:", 4)) {
1311  // bind to a standalone decoder
1312  int dec_idx;
1313 
1314  dec_idx = strtol(ifilter->linklabel + 4, &p, 0);
1315  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1316  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1317  dec_idx, fg->graph_desc);
1318  return AVERROR(EINVAL);
1319  }
1320 
1321  if (type == AVMEDIA_TYPE_VIDEO) {
1322  spec = *p == ':' ? p + 1 : p;
1323  ret = view_specifier_parse(&spec, &vs);
1324  if (ret < 0)
1325  return ret;
1326  }
1327 
1328  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1329  if (ret < 0)
1330  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1331  ifilter->name);
1332  return ret;
1333  } else if (ifilter->linklabel) {
1335  AVFormatContext *s;
1336  AVStream *st = NULL;
1337  int file_idx;
1338 
1339  // try finding an unbound filtergraph output with this label
1340  for (int i = 0; i < nb_filtergraphs; i++) {
1341  FilterGraph *fg_src = filtergraphs[i];
1342 
1343  if (fg == fg_src)
1344  continue;
1345 
1346  for (int j = 0; j < fg_src->nb_outputs; j++) {
1347  OutputFilter *ofilter = fg_src->outputs[j];
1348 
1349  if (!ofilter->bound && ofilter->linklabel &&
1350  !strcmp(ofilter->linklabel, ifilter->linklabel)) {
1351  if (commit) {
1352  av_log(fg, AV_LOG_VERBOSE,
1353  "Binding input with label '%s' to filtergraph output %d:%d\n",
1354  ifilter->linklabel, i, j);
1355 
1356  ret = ifilter_bind_fg(ifp, fg_src, j);
1357  if (ret < 0) {
1358  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1359  ifilter->linklabel);
1360  return ret;
1361  }
1362  } else
1363  ofp_from_ofilter(ofilter)->needed = 1;
1364  return 0;
1365  }
1366  }
1367  }
1368 
1369  // bind to an explicitly specified demuxer stream
1370  file_idx = strtol(ifilter->linklabel, &p, 0);
1371  if (file_idx < 0 || file_idx >= nb_input_files) {
1372  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1373  file_idx, fg->graph_desc);
1374  return AVERROR(EINVAL);
1375  }
1376  s = input_files[file_idx]->ctx;
1377 
1378  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1379  if (ret < 0) {
1380  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1381  return ret;
1382  }
1383 
1384  if (type == AVMEDIA_TYPE_VIDEO) {
1385  spec = ss.remainder ? ss.remainder : "";
1386  ret = view_specifier_parse(&spec, &vs);
1387  if (ret < 0) {
1389  return ret;
1390  }
1391  }
1392 
1393  for (i = 0; i < s->nb_streams; i++) {
1394  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1395  if (stream_type != type &&
1396  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1397  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1398  continue;
1399  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1400  st = s->streams[i];
1401  break;
1402  }
1403  }
1405  if (!st) {
1406  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1407  "matches no streams.\n", p, fg->graph_desc);
1408  return AVERROR(EINVAL);
1409  }
1410  ist = input_files[file_idx]->streams[st->index];
1411 
1412  if (commit)
1413  av_log(fg, AV_LOG_VERBOSE,
1414  "Binding input with label '%s' to input stream %d:%d\n",
1415  ifilter->linklabel, ist->file->index, ist->index);
1416  } else {
1417  // try finding an unbound filtergraph output
1418  for (int i = 0; i < nb_filtergraphs; i++) {
1419  FilterGraph *fg_src = filtergraphs[i];
1420 
1421  if (fg == fg_src)
1422  continue;
1423 
1424  for (int j = 0; j < fg_src->nb_outputs; j++) {
1425  OutputFilter *ofilter = fg_src->outputs[j];
1426 
1427  if (!ofilter->bound) {
1428  if (commit) {
1429  av_log(fg, AV_LOG_VERBOSE,
1430  "Binding unlabeled filtergraph input to filtergraph output %d:%d\n", i, j);
1431 
1432  ret = ifilter_bind_fg(ifp, fg_src, j);
1433  if (ret < 0) {
1434  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %d:%d\n", i, j);
1435  return ret;
1436  }
1437  } else
1438  ofp_from_ofilter(ofilter)->needed = 1;
1439  return 0;
1440  }
1441  }
1442  }
1443 
1444  ist = ist_find_unused(type);
1445  if (!ist) {
1446  av_log(fg, AV_LOG_FATAL,
1447  "Cannot find an unused %s input stream to feed the "
1448  "unlabeled input pad %s.\n",
1449  av_get_media_type_string(type), ifilter->name);
1450  return AVERROR(EINVAL);
1451  }
1452 
1453  if (commit)
1454  av_log(fg, AV_LOG_VERBOSE,
1455  "Binding unlabeled input %d to input stream %d:%d\n",
1456  ifilter->index, ist->file->index, ist->index);
1457  }
1458  av_assert0(ist);
1459 
1460  if (commit) {
1461  ret = ifilter_bind_ist(ifilter, ist, &vs);
1462  if (ret < 0) {
1463  av_log(fg, AV_LOG_ERROR,
1464  "Error binding an input stream to complex filtergraph input %s.\n",
1465  ifilter->name);
1466  return ret;
1467  }
1468  }
1469 
1470  return 0;
1471 }
1472 
1473 static int bind_inputs(FilterGraph *fg, int commit)
1474 {
1475  // bind filtergraph inputs to input streams or other filtergraphs
1476  for (int i = 0; i < fg->nb_inputs; i++) {
1478  int ret;
1479 
1480  if (ifp->bound)
1481  continue;
1482 
1483  ret = fg_complex_bind_input(fg, &ifp->ifilter, commit);
1484  if (ret < 0)
1485  return ret;
1486  }
1487 
1488  return 0;
1489 }
1490 
1492 {
1493  int ret;
1494 
1495  for (int i = 0; i < nb_filtergraphs; i++) {
1496  ret = bind_inputs(filtergraphs[i], 0);
1497  if (ret < 0)
1498  return ret;
1499  }
1500 
1501  // check that all outputs were bound
1502  for (int i = nb_filtergraphs - 1; i >= 0; i--) {
1503  FilterGraph *fg = filtergraphs[i];
1505 
1506  for (int j = 0; j < fg->nb_outputs; j++) {
1507  OutputFilter *output = fg->outputs[j];
1508  if (!ofp_from_ofilter(output)->needed) {
1509  if (!fg->is_internal) {
1510  av_log(fg, AV_LOG_FATAL,
1511  "Filter '%s' has output %d (%s) unconnected\n",
1512  output->name, j,
1513  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1514  return AVERROR(EINVAL);
1515  }
1516 
1517  av_log(fg, AV_LOG_DEBUG,
1518  "Internal filter '%s' has output %d (%s) unconnected. Removing graph\n",
1519  output->name, j,
1520  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1521  sch_remove_filtergraph(fgp->sch, fgp->sch_idx);
1522  fg_free(&filtergraphs[i]);
1523  nb_filtergraphs--;
1524  if (nb_filtergraphs > 0)
1525  memmove(&filtergraphs[i],
1526  &filtergraphs[i + 1],
1527  (nb_filtergraphs - i) * sizeof(*filtergraphs));
1528  break;
1529  }
1530  }
1531  }
1532 
1533  for (int i = 0; i < nb_filtergraphs; i++) {
1534  ret = bind_inputs(filtergraphs[i], 1);
1535  if (ret < 0)
1536  return ret;
1537  }
1538 
1539  return 0;
1540 }
1541 
1542 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1543  AVFilterContext **last_filter, int *pad_idx,
1544  const char *filter_name)
1545 {
1546  AVFilterGraph *graph = (*last_filter)->graph;
1548  const AVFilter *trim;
1549  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1550  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1551  int ret = 0;
1552 
1553  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1554  return 0;
1555 
1556  trim = avfilter_get_by_name(name);
1557  if (!trim) {
1558  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1559  "recording time.\n", name);
1560  return AVERROR_FILTER_NOT_FOUND;
1561  }
1562 
1563  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1564  if (!ctx)
1565  return AVERROR(ENOMEM);
1566 
1567  if (duration != INT64_MAX) {
1568  ret = av_opt_set_int(ctx, "durationi", duration,
1570  }
1571  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1572  ret = av_opt_set_int(ctx, "starti", start_time,
1574  }
1575  if (ret < 0) {
1576  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1577  return ret;
1578  }
1579 
1581  if (ret < 0)
1582  return ret;
1583 
1584  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1585  if (ret < 0)
1586  return ret;
1587 
1588  *last_filter = ctx;
1589  *pad_idx = 0;
1590  return 0;
1591 }
1592 
1593 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1594  const char *filter_name, const char *args)
1595 {
1596  AVFilterGraph *graph = (*last_filter)->graph;
1597  const AVFilter *filter = avfilter_get_by_name(filter_name);
1599  int ret;
1600 
1601  if (!filter)
1602  return AVERROR_BUG;
1603 
1605  filter,
1606  filter_name, args, NULL, graph);
1607  if (ret < 0)
1608  return ret;
1609 
1610  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1611  if (ret < 0)
1612  return ret;
1613 
1614  *last_filter = ctx;
1615  *pad_idx = 0;
1616  return 0;
1617 }
1618 
1620  OutputFilter *ofilter, AVFilterInOut *out)
1621 {
1622  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1623  AVFilterContext *last_filter = out->filter_ctx;
1624  AVBPrint bprint;
1625  int pad_idx = out->pad_idx;
1626  int ret;
1627  char name[255];
1628 
1629  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1631  avfilter_get_by_name("buffersink"),
1632  name, NULL, NULL, graph);
1633 
1634  if (ret < 0)
1635  return ret;
1636 
1637  if (ofp->flags & OFILTER_FLAG_CROP) {
1638  char crop_buf[64];
1639  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1640  ofp->crop_left, ofp->crop_right,
1641  ofp->crop_top, ofp->crop_bottom,
1642  ofp->crop_left, ofp->crop_top);
1643  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1644  if (ret < 0)
1645  return ret;
1646  }
1647 
1648  if (ofp->flags & OFILTER_FLAG_AUTOROTATE) {
1649  int32_t *displaymatrix = ofp->displaymatrix;
1650  double theta;
1651 
1652  theta = get_rotation(displaymatrix);
1653 
1654  if (fabs(theta - 90) < 1.0) {
1655  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1656  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1657  } else if (fabs(theta - 180) < 1.0) {
1658  if (displaymatrix[0] < 0) {
1659  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1660  if (ret < 0)
1661  return ret;
1662  }
1663  if (displaymatrix[4] < 0) {
1664  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1665  }
1666  } else if (fabs(theta - 270) < 1.0) {
1667  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1668  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1669  } else if (fabs(theta) > 1.0) {
1670  char rotate_buf[64];
1671  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1672  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1673  } else if (fabs(theta) < 1.0) {
1674  if (displaymatrix && displaymatrix[4] < 0) {
1675  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1676  }
1677  }
1678  if (ret < 0)
1679  return ret;
1680 
1682  }
1683 
1684  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1685  char args[255];
1687  const AVDictionaryEntry *e = NULL;
1688 
1689  snprintf(args, sizeof(args), "%d:%d",
1690  ofp->width, ofp->height);
1691 
1692  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1693  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1694  }
1695 
1696  snprintf(name, sizeof(name), "scaler_out_%s", ofilter->output_name);
1698  name, args, NULL, graph)) < 0)
1699  return ret;
1700  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1701  return ret;
1702 
1703  last_filter = filter;
1704  pad_idx = 0;
1705  }
1706 
1708  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1710  choose_pix_fmts(ofp, &bprint);
1711  choose_color_spaces(ofp, &bprint);
1712  choose_color_ranges(ofp, &bprint);
1713  choose_alpha_modes(ofp, &bprint);
1714  if (!av_bprint_is_complete(&bprint))
1715  return AVERROR(ENOMEM);
1716 
1717  if (bprint.len) {
1719 
1721  avfilter_get_by_name("format"),
1722  "format", bprint.str, NULL, graph);
1723  av_bprint_finalize(&bprint, NULL);
1724  if (ret < 0)
1725  return ret;
1726  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1727  return ret;
1728 
1729  last_filter = filter;
1730  pad_idx = 0;
1731  }
1732 
1733  snprintf(name, sizeof(name), "trim_out_%s", ofilter->output_name);
1734  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1735  &last_filter, &pad_idx, name);
1736  if (ret < 0)
1737  return ret;
1738 
1739 
1740  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1741  return ret;
1742 
1743  return 0;
1744 }
1745 
1747  OutputFilter *ofilter, AVFilterInOut *out)
1748 {
1749  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1750  AVFilterContext *last_filter = out->filter_ctx;
1751  int pad_idx = out->pad_idx;
1752  AVBPrint args;
1753  char name[255];
1754  int ret;
1755 
1756  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1758  avfilter_get_by_name("abuffersink"),
1759  name, NULL, NULL, graph);
1760  if (ret < 0)
1761  return ret;
1762 
1763 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1764  AVFilterContext *filt_ctx; \
1765  \
1766  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1767  "similarly to -af " filter_name "=%s.\n", arg); \
1768  \
1769  ret = avfilter_graph_create_filter(&filt_ctx, \
1770  avfilter_get_by_name(filter_name), \
1771  filter_name, arg, NULL, graph); \
1772  if (ret < 0) \
1773  goto fail; \
1774  \
1775  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1776  if (ret < 0) \
1777  goto fail; \
1778  \
1779  last_filter = filt_ctx; \
1780  pad_idx = 0; \
1781 } while (0)
1783 
1784  choose_sample_fmts(ofp, &args);
1785  choose_sample_rates(ofp, &args);
1786  choose_channel_layouts(ofp, &args);
1787  if (!av_bprint_is_complete(&args)) {
1788  ret = AVERROR(ENOMEM);
1789  goto fail;
1790  }
1791  if (args.len) {
1793 
1794  snprintf(name, sizeof(name), "format_out_%s", ofilter->output_name);
1796  avfilter_get_by_name("aformat"),
1797  name, args.str, NULL, graph);
1798  if (ret < 0)
1799  goto fail;
1800 
1801  ret = avfilter_link(last_filter, pad_idx, format, 0);
1802  if (ret < 0)
1803  goto fail;
1804 
1805  last_filter = format;
1806  pad_idx = 0;
1807  }
1808 
1809  if (ofilter->apad) {
1810  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1811  fgp->have_sources = 1;
1812  }
1813 
1814  snprintf(name, sizeof(name), "trim for output %s", ofilter->output_name);
1815  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1816  &last_filter, &pad_idx, name);
1817  if (ret < 0)
1818  goto fail;
1819 
1820  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1821  goto fail;
1822 fail:
1823  av_bprint_finalize(&args, NULL);
1824 
1825  return ret;
1826 }
1827 
1829  OutputFilter *ofilter, AVFilterInOut *out)
1830 {
1831  switch (ofilter->type) {
1832  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1833  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1834  default: av_assert0(0); return 0;
1835  }
1836 }
1837 
1839 {
1840  ifp->sub2video.last_pts = INT64_MIN;
1841  ifp->sub2video.end_pts = INT64_MIN;
1842 
1843  /* sub2video structure has been (re-)initialized.
1844  Mark it as such so that the system will be
1845  initialized with the first received heartbeat. */
1846  ifp->sub2video.initialize = 1;
1847 }
1848 
1850  InputFilter *ifilter, AVFilterInOut *in)
1851 {
1852  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1853 
1854  AVFilterContext *last_filter;
1855  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1856  const AVPixFmtDescriptor *desc;
1857  char name[255];
1858  int ret, pad_idx = 0;
1860  if (!par)
1861  return AVERROR(ENOMEM);
1862 
1863  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1864  sub2video_prepare(ifp);
1865 
1866  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1867  ifp->opts.name);
1868 
1869  ifilter->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1870  if (!ifilter->filter) {
1871  ret = AVERROR(ENOMEM);
1872  goto fail;
1873  }
1874 
1875  par->format = ifp->format;
1876  par->time_base = ifp->time_base;
1877  par->frame_rate = ifp->opts.framerate;
1878  par->width = ifp->width;
1879  par->height = ifp->height;
1880  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1881  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1882  par->color_space = ifp->color_space;
1883  par->color_range = ifp->color_range;
1884  par->alpha_mode = ifp->alpha_mode;
1885  par->hw_frames_ctx = ifp->hw_frames_ctx;
1886  par->side_data = ifp->side_data;
1887  par->nb_side_data = ifp->nb_side_data;
1888 
1889  ret = av_buffersrc_parameters_set(ifilter->filter, par);
1890  if (ret < 0)
1891  goto fail;
1892  av_freep(&par);
1893 
1894  ret = avfilter_init_dict(ifilter->filter, NULL);
1895  if (ret < 0)
1896  goto fail;
1897 
1898  last_filter = ifilter->filter;
1899 
1901  av_assert0(desc);
1902 
1903  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1904  char crop_buf[64];
1905  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1906  ifp->opts.crop_left, ifp->opts.crop_right,
1907  ifp->opts.crop_top, ifp->opts.crop_bottom,
1908  ifp->opts.crop_left, ifp->opts.crop_top);
1909  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1910  if (ret < 0)
1911  return ret;
1912  }
1913 
1914  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1915  ifp->displaymatrix_applied = 0;
1916  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1917  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1918  int32_t *displaymatrix = ifp->displaymatrix;
1919  double theta;
1920 
1921  theta = get_rotation(displaymatrix);
1922 
1923  if (fabs(theta - 90) < 1.0) {
1924  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1925  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1926  } else if (fabs(theta - 180) < 1.0) {
1927  if (displaymatrix[0] < 0) {
1928  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1929  if (ret < 0)
1930  return ret;
1931  }
1932  if (displaymatrix[4] < 0) {
1933  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1934  }
1935  } else if (fabs(theta - 270) < 1.0) {
1936  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1937  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1938  } else if (fabs(theta) > 1.0) {
1939  char rotate_buf[64];
1940  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1941  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1942  } else if (fabs(theta) < 1.0) {
1943  if (displaymatrix && displaymatrix[4] < 0) {
1944  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1945  }
1946  }
1947  if (ret < 0)
1948  return ret;
1949 
1950  ifp->displaymatrix_applied = 1;
1951  }
1952 
1953  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1954  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1955  &last_filter, &pad_idx, name);
1956  if (ret < 0)
1957  return ret;
1958 
1959  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1960  return ret;
1961  return 0;
1962 fail:
1963  av_freep(&par);
1964 
1965  return ret;
1966 }
1967 
1969  InputFilter *ifilter, AVFilterInOut *in)
1970 {
1971  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1972  AVFilterContext *last_filter;
1973  AVBufferSrcParameters *par;
1974  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1975  AVBPrint args;
1976  char name[255];
1977  int ret, pad_idx = 0;
1978 
1980  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1981  ifp->time_base.num, ifp->time_base.den,
1982  ifp->sample_rate,
1984  if (av_channel_layout_check(&ifp->ch_layout) &&
1986  av_bprintf(&args, ":channel_layout=");
1988  } else
1989  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1990  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1991 
1992  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
1993  name, args.str, NULL,
1994  graph)) < 0)
1995  return ret;
1997  if (!par)
1998  return AVERROR(ENOMEM);
1999  par->side_data = ifp->side_data;
2000  par->nb_side_data = ifp->nb_side_data;
2001  ret = av_buffersrc_parameters_set(ifilter->filter, par);
2002  av_free(par);
2003  if (ret < 0)
2004  return ret;
2005  last_filter = ifilter->filter;
2006 
2007  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
2008  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
2009  &last_filter, &pad_idx, name);
2010  if (ret < 0)
2011  return ret;
2012 
2013  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
2014  return ret;
2015 
2016  return 0;
2017 }
2018 
2020  InputFilter *ifilter, AVFilterInOut *in)
2021 {
2022  switch (ifilter->type) {
2023  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
2024  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
2025  default: av_assert0(0); return 0;
2026  }
2027 }
2028 
2030 {
2031  for (int i = 0; i < fg->nb_outputs; i++)
2032  fg->outputs[i]->filter = NULL;
2033  for (int i = 0; i < fg->nb_inputs; i++)
2034  fg->inputs[i]->filter = NULL;
2035  avfilter_graph_free(&fgt->graph);
2036 }
2037 
2039 {
2040  return f->nb_inputs == 0 &&
2041  (!strcmp(f->filter->name, "buffer") ||
2042  !strcmp(f->filter->name, "abuffer"));
2043 }
2044 
2045 static int graph_is_meta(AVFilterGraph *graph)
2046 {
2047  for (unsigned i = 0; i < graph->nb_filters; i++) {
2048  const AVFilterContext *f = graph->filters[i];
2049 
2050  /* in addition to filters flagged as meta, also
2051  * disregard sinks and buffersources (but not other sources,
2052  * since they introduce data we are not aware of)
2053  */
2054  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
2055  f->nb_outputs == 0 ||
2057  return 0;
2058  }
2059  return 1;
2060 }
2061 
2062 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
2063 
2065 {
2066  FilterGraphPriv *fgp = fgp_from_fg(fg);
2067  AVBufferRef *hw_device;
2068  AVFilterInOut *inputs, *outputs, *cur;
2069  int ret = AVERROR_BUG, i, simple = filtergraph_is_simple(fg);
2070  int have_input_eof = 0;
2071  const char *graph_desc = fg->graph_desc;
2072 
2073  cleanup_filtergraph(fg, fgt);
2074  fgt->graph = avfilter_graph_alloc();
2075  if (!fgt->graph)
2076  return AVERROR(ENOMEM);
2077 
2078  if (simple) {
2079  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2080 
2081  if (filter_nbthreads) {
2082  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
2083  if (ret < 0)
2084  goto fail;
2085  } else if (fgp->nb_threads >= 0) {
2086  ret = av_opt_set_int(fgt->graph, "threads", fgp->nb_threads, 0);
2087  if (ret < 0)
2088  return ret;
2089  }
2090 
2091  if (av_dict_count(ofp->sws_opts)) {
2093  &fgt->graph->scale_sws_opts,
2094  '=', ':');
2095  if (ret < 0)
2096  goto fail;
2097  }
2098 
2099  if (av_dict_count(ofp->swr_opts)) {
2100  char *args;
2101  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
2102  if (ret < 0)
2103  goto fail;
2104  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
2105  av_free(args);
2106  }
2107  } else {
2109  }
2110 
2111  if (filter_buffered_frames) {
2112  ret = av_opt_set_int(fgt->graph, "max_buffered_frames", filter_buffered_frames, 0);
2113  if (ret < 0)
2114  return ret;
2115  }
2116 
2117  hw_device = hw_device_for_filter();
2118 
2119  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
2120  if (ret < 0)
2121  goto fail;
2122 
2123  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
2124  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
2127  goto fail;
2128  }
2130 
2131  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
2132  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
2133  if (ret < 0) {
2135  goto fail;
2136  }
2137  }
2139 
2140  if (fgp->disable_conversions)
2142  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
2143  goto fail;
2144 
2145  fgp->is_meta = graph_is_meta(fgt->graph);
2146 
2147  /* limit the lists of allowed formats to the ones selected, to
2148  * make sure they stay the same if the filtergraph is reconfigured later */
2149  for (int i = 0; i < fg->nb_outputs; i++) {
2150  const AVFrameSideData *const *sd;
2151  int nb_sd;
2152  OutputFilter *ofilter = fg->outputs[i];
2153  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2154  AVFilterContext *sink = ofilter->filter;
2155 
2156  ofp->format = av_buffersink_get_format(sink);
2157 
2158  ofp->width = av_buffersink_get_w(sink);
2159  ofp->height = av_buffersink_get_h(sink);
2163 
2164  // If the timing parameters are not locked yet, get the tentative values
2165  // here but don't lock them. They will only be used if no output frames
2166  // are ever produced.
2167  if (!ofp->tb_out_locked) {
2169  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
2170  fr.num > 0 && fr.den > 0)
2171  ofp->fps.framerate = fr;
2172  ofp->tb_out = av_buffersink_get_time_base(sink);
2173  }
2175 
2178  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
2179  if (ret < 0)
2180  goto fail;
2181  sd = av_buffersink_get_side_data(sink, &nb_sd);
2182  if (nb_sd)
2183  for (int j = 0; j < nb_sd; j++) {
2186  if (ret < 0) {
2188  goto fail;
2189  }
2190  }
2191  }
2192 
2193  for (int i = 0; i < fg->nb_inputs; i++) {
2194  InputFilter *ifilter = fg->inputs[i];
2196  AVFrame *tmp;
2197  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2198  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2199  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2200  } else {
2201  if (ifp->type_src == AVMEDIA_TYPE_VIDEO) {
2202  if (ifp->displaymatrix_applied)
2204  }
2205  ret = av_buffersrc_add_frame(ifilter->filter, tmp);
2206  }
2207  av_frame_free(&tmp);
2208  if (ret < 0)
2209  goto fail;
2210  }
2211  }
2212 
2213  /* send the EOFs for the finished inputs */
2214  for (int i = 0; i < fg->nb_inputs; i++) {
2215  InputFilter *ifilter = fg->inputs[i];
2216  if (fgt->eof_in[i]) {
2217  ret = av_buffersrc_add_frame(ifilter->filter, NULL);
2218  if (ret < 0)
2219  goto fail;
2220  have_input_eof = 1;
2221  }
2222  }
2223 
2224  if (have_input_eof) {
2225  // make sure the EOF propagates to the end of the graph
2227  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2228  goto fail;
2229  }
2230 
2231  return 0;
2232 fail:
2233  cleanup_filtergraph(fg, fgt);
2234  return ret;
2235 }
2236 
2238 {
2239  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2240  AVFrameSideData *sd;
2241  int ret;
2242 
2243  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2244  if (ret < 0)
2245  return ret;
2246 
2247  ifp->time_base = (ifilter->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2248  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2249  frame->time_base;
2250 
2251  ifp->format = frame->format;
2252 
2253  ifp->width = frame->width;
2254  ifp->height = frame->height;
2255  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2256  ifp->color_space = frame->colorspace;
2257  ifp->color_range = frame->color_range;
2258  ifp->alpha_mode = frame->alpha_mode;
2259 
2260  ifp->sample_rate = frame->sample_rate;
2261  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2262  if (ret < 0)
2263  return ret;
2264 
2266  for (int i = 0; i < frame->nb_side_data; i++) {
2267  const AVSideDataDescriptor *desc = av_frame_side_data_desc(frame->side_data[i]->type);
2268 
2269  if (!(desc->props & AV_SIDE_DATA_PROP_GLOBAL) ||
2270  frame->side_data[i]->type == AV_FRAME_DATA_DISPLAYMATRIX)
2271  continue;
2272 
2274  &ifp->nb_side_data,
2275  frame->side_data[i], 0);
2276  if (ret < 0)
2277  return ret;
2278  }
2279 
2281  if (sd)
2282  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2283  ifp->displaymatrix_present = !!sd;
2284 
2285  /* Copy downmix related side data to InputFilterPriv so it may be propagated
2286  * to the filter chain even though it's not "global", as filters like aresample
2287  * require this information during init and not when remixing a frame */
2289  if (sd) {
2291  &ifp->nb_side_data, sd, 0);
2292  if (ret < 0)
2293  return ret;
2294  memcpy(&ifp->downmixinfo, sd->data, sizeof(ifp->downmixinfo));
2295  }
2296  ifp->downmixinfo_present = !!sd;
2297 
2298  return 0;
2299 }
2300 
2302 {
2303  const OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2304  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2305 
2306  if (!ifp->opts.framerate.num) {
2307  ifp->opts.framerate = ofp->fps.framerate;
2308  if (ifp->opts.framerate.num > 0 && ifp->opts.framerate.den > 0)
2309  ifp->opts.flags |= IFILTER_FLAG_CFR;
2310  }
2311 
2312  for (int i = 0; i < ofp->nb_side_data; i++) {
2315  if (ret < 0)
2316  return ret;
2317  }
2318 
2319  return 0;
2320 }
2321 
2323 {
2324  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2325  return fgp->is_simple;
2326 }
2327 
2328 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2329  double time, const char *target,
2330  const char *command, const char *arg, int all_filters)
2331 {
2332  int ret;
2333 
2334  if (!graph)
2335  return;
2336 
2337  if (time < 0) {
2338  char response[4096];
2339  ret = avfilter_graph_send_command(graph, target, command, arg,
2340  response, sizeof(response),
2341  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2342  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2343  fg->index, ret, response);
2344  } else if (!all_filters) {
2345  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2346  } else {
2347  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2348  if (ret < 0)
2349  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2350  }
2351 }
2352 
2353 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2354 {
2355  int nb_requests, nb_requests_max = -1;
2356  int best_input = -1;
2357 
2358  for (int i = 0; i < fg->nb_inputs; i++) {
2359  InputFilter *ifilter = fg->inputs[i];
2360 
2361  if (fgt->eof_in[i])
2362  continue;
2363 
2364  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
2365  if (nb_requests > nb_requests_max) {
2366  nb_requests_max = nb_requests;
2367  best_input = i;
2368  }
2369  }
2370 
2371  av_assert0(best_input >= 0);
2372 
2373  return best_input;
2374 }
2375 
2377 {
2378  OutputFilter *ofilter = &ofp->ofilter;
2379  FPSConvContext *fps = &ofp->fps;
2380  AVRational tb = (AVRational){ 0, 0 };
2381  AVRational fr;
2382  const FrameData *fd;
2383 
2384  fd = frame_data_c(frame);
2385 
2386  // apply -enc_time_base
2387  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2388  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2389  av_log(ofp, AV_LOG_ERROR,
2390  "Demuxing timebase not available - cannot use it for encoding\n");
2391  return AVERROR(EINVAL);
2392  }
2393 
2394  switch (ofp->enc_timebase.num) {
2395  case 0: break;
2396  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2397  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2398  default: tb = ofp->enc_timebase; break;
2399  }
2400 
2401  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2402  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2403  goto finish;
2404  }
2405 
2406  fr = fps->framerate;
2407  if (!fr.num) {
2408  AVRational fr_sink = av_buffersink_get_frame_rate(ofilter->filter);
2409  if (fr_sink.num > 0 && fr_sink.den > 0)
2410  fr = fr_sink;
2411  }
2412 
2413  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2414  if (!fr.num && !fps->framerate_max.num) {
2415  fr = (AVRational){25, 1};
2416  av_log(ofp, AV_LOG_WARNING,
2417  "No information "
2418  "about the input framerate is available. Falling "
2419  "back to a default value of 25fps. Use the -r option "
2420  "if you want a different framerate.\n");
2421  }
2422 
2423  if (fps->framerate_max.num &&
2424  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2425  !fr.den))
2426  fr = fps->framerate_max;
2427  }
2428 
2429  if (fr.num > 0) {
2430  if (fps->framerate_supported) {
2431  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2432  fr = fps->framerate_supported[idx];
2433  }
2434  if (fps->framerate_clip) {
2435  av_reduce(&fr.num, &fr.den,
2436  fr.num, fr.den, fps->framerate_clip);
2437  }
2438  }
2439 
2440  if (!(tb.num > 0 && tb.den > 0))
2441  tb = av_inv_q(fr);
2442  if (!(tb.num > 0 && tb.den > 0))
2443  tb = frame->time_base;
2444 
2445  fps->framerate = fr;
2446 finish:
2447  ofp->tb_out = tb;
2448  ofp->tb_out_locked = 1;
2449 
2450  return 0;
2451 }
2452 
2453 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2454  AVRational tb_dst, int64_t start_time)
2455 {
2456  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2457 
2458  AVRational tb = tb_dst;
2459  AVRational filter_tb = frame->time_base;
2460  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2461 
2462  if (frame->pts == AV_NOPTS_VALUE)
2463  goto early_exit;
2464 
2465  tb.den <<= extra_bits;
2466  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2468  float_pts /= 1 << extra_bits;
2469  // when float_pts is not exactly an integer,
2470  // avoid exact midpoints to reduce the chance of rounding differences, this
2471  // can be removed in case the fps code is changed to work with integers
2472  if (float_pts != llrint(float_pts))
2473  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2474 
2475  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2477  frame->time_base = tb_dst;
2478 
2479 early_exit:
2480 
2481  if (debug_ts) {
2482  av_log(logctx, AV_LOG_INFO,
2483  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2484  frame ? av_ts2str(frame->pts) : "NULL",
2485  av_ts2timestr(frame->pts, &tb_dst),
2486  float_pts, tb_dst.num, tb_dst.den);
2487  }
2488 
2489  return float_pts;
2490 }
2491 
2492 /* Convert frame timestamps to the encoder timebase and decide how many times
2493  * should this (and possibly previous) frame be repeated in order to conform to
2494  * desired target framerate (if any).
2495  */
2497  int64_t *nb_frames, int64_t *nb_frames_prev)
2498 {
2499  OutputFilter *ofilter = &ofp->ofilter;
2500  FPSConvContext *fps = &ofp->fps;
2501  double delta0, delta, sync_ipts, duration;
2502 
2503  if (!frame) {
2504  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2505  fps->frames_prev_hist[1],
2506  fps->frames_prev_hist[2]);
2507 
2508  if (!*nb_frames && fps->last_dropped) {
2509  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2510  fps->last_dropped++;
2511  }
2512 
2513  goto finish;
2514  }
2515 
2516  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2517 
2518  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2519  ofp->tb_out, ofp->ts_offset);
2520  /* delta0 is the "drift" between the input frame and
2521  * where it would fall in the output. */
2522  delta0 = sync_ipts - ofp->next_pts;
2523  delta = delta0 + duration;
2524 
2525  // tracks the number of times the PREVIOUS frame should be duplicated,
2526  // mostly for variable framerate (VFR)
2527  *nb_frames_prev = 0;
2528  /* by default, we output a single frame */
2529  *nb_frames = 1;
2530 
2531  if (delta0 < 0 &&
2532  delta > 0 &&
2535  && fps->vsync_method != VSYNC_DROP
2536 #endif
2537  ) {
2538  if (delta0 < -0.6) {
2539  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2540  } else
2541  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2542  sync_ipts = ofp->next_pts;
2543  duration += delta0;
2544  delta0 = 0;
2545  }
2546 
2547  switch (fps->vsync_method) {
2548  case VSYNC_VSCFR:
2549  if (fps->frame_number == 0 && delta0 >= 0.5) {
2550  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2551  delta = duration;
2552  delta0 = 0;
2553  ofp->next_pts = llrint(sync_ipts);
2554  }
2555  case VSYNC_CFR:
2556  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2557  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2558  *nb_frames = 0;
2559  } else if (delta < -1.1)
2560  *nb_frames = 0;
2561  else if (delta > 1.1) {
2562  *nb_frames = llrintf(delta);
2563  if (delta0 > 1.1)
2564  *nb_frames_prev = llrintf(delta0 - 0.6);
2565  }
2566  frame->duration = 1;
2567  break;
2568  case VSYNC_VFR:
2569  if (delta <= -0.6)
2570  *nb_frames = 0;
2571  else if (delta > 0.6)
2572  ofp->next_pts = llrint(sync_ipts);
2573  frame->duration = llrint(duration);
2574  break;
2575 #if FFMPEG_OPT_VSYNC_DROP
2576  case VSYNC_DROP:
2577 #endif
2578  case VSYNC_PASSTHROUGH:
2579  ofp->next_pts = llrint(sync_ipts);
2580  frame->duration = llrint(duration);
2581  break;
2582  default:
2583  av_assert0(0);
2584  }
2585 
2586 finish:
2587  memmove(fps->frames_prev_hist + 1,
2588  fps->frames_prev_hist,
2589  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2590  fps->frames_prev_hist[0] = *nb_frames_prev;
2591 
2592  if (*nb_frames_prev == 0 && fps->last_dropped) {
2593  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2594  av_log(ofp, AV_LOG_VERBOSE,
2595  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2596  fps->frame_number, fps->last_frame->pts);
2597  }
2598  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2599  uint64_t nb_frames_dup;
2600  if (*nb_frames > dts_error_threshold * 30) {
2601  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2602  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2603  *nb_frames = 0;
2604  return;
2605  }
2606  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2607  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2608  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2609  if (nb_frames_dup > fps->dup_warning) {
2610  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2611  fps->dup_warning *= 10;
2612  }
2613  }
2614 
2615  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2616  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2617 }
2618 
2620 {
2622  int ret;
2623 
2624  // we are finished and no frames were ever seen at this output,
2625  // at least initialize the encoder with a dummy frame
2626  if (!fgt->got_frame) {
2627  AVFrame *frame = fgt->frame;
2628  FrameData *fd;
2629 
2630  frame->time_base = ofp->tb_out;
2631  frame->format = ofp->format;
2632 
2633  frame->width = ofp->width;
2634  frame->height = ofp->height;
2635  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2636 
2637  frame->sample_rate = ofp->sample_rate;
2638  if (ofp->ch_layout.nb_channels) {
2639  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2640  if (ret < 0)
2641  return ret;
2642  }
2643 
2644  fd = frame_data(frame);
2645  if (!fd)
2646  return AVERROR(ENOMEM);
2647 
2650  ofp->side_data, ofp->nb_side_data, 0);
2651  if (ret < 0)
2652  return ret;
2653 
2654  fd->frame_rate_filter = ofp->fps.framerate;
2655 
2656  av_assert0(!frame->buf[0]);
2657 
2658  av_log(ofp, AV_LOG_WARNING,
2659  "No filtered frames for output stream, trying to "
2660  "initialize anyway.\n");
2661 
2662  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame);
2663  if (ret < 0) {
2665  return ret;
2666  }
2667  }
2668 
2669  fgt->eof_out[ofp->ofilter.index] = 1;
2670 
2671  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, NULL);
2672  return (ret == AVERROR_EOF) ? 0 : ret;
2673 }
2674 
2676  AVFrame *frame)
2677 {
2679  AVFrame *frame_prev = ofp->fps.last_frame;
2680  enum AVMediaType type = ofp->ofilter.type;
2681 
2682  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2683 
2684  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2685  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2686 
2687  for (int64_t i = 0; i < nb_frames; i++) {
2688  AVFrame *frame_out;
2689  int ret;
2690 
2691  if (type == AVMEDIA_TYPE_VIDEO) {
2692  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2693  frame_prev : frame;
2694  if (!frame_in)
2695  break;
2696 
2697  frame_out = fgp->frame_enc;
2698  ret = av_frame_ref(frame_out, frame_in);
2699  if (ret < 0)
2700  return ret;
2701 
2702  frame_out->pts = ofp->next_pts;
2703 
2704  if (ofp->fps.dropped_keyframe) {
2705  frame_out->flags |= AV_FRAME_FLAG_KEY;
2706  ofp->fps.dropped_keyframe = 0;
2707  }
2708  } else {
2709  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2710  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2712 
2713  frame->time_base = ofp->tb_out;
2714  frame->duration = av_rescale_q(frame->nb_samples,
2715  (AVRational){ 1, frame->sample_rate },
2716  ofp->tb_out);
2717 
2718  ofp->next_pts = frame->pts + frame->duration;
2719 
2720  frame_out = frame;
2721  }
2722 
2723  // send the frame to consumers
2724  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame_out);
2725  if (ret < 0) {
2726  av_frame_unref(frame_out);
2727 
2728  if (!fgt->eof_out[ofp->ofilter.index]) {
2729  fgt->eof_out[ofp->ofilter.index] = 1;
2730  fgp->nb_outputs_done++;
2731  }
2732 
2733  return ret == AVERROR_EOF ? 0 : ret;
2734  }
2735 
2736  if (type == AVMEDIA_TYPE_VIDEO) {
2737  ofp->fps.frame_number++;
2738  ofp->next_pts++;
2739 
2740  if (i == nb_frames_prev && frame)
2741  frame->flags &= ~AV_FRAME_FLAG_KEY;
2742  }
2743 
2744  fgt->got_frame = 1;
2745  }
2746 
2747  if (frame && frame_prev) {
2748  av_frame_unref(frame_prev);
2749  av_frame_move_ref(frame_prev, frame);
2750  }
2751 
2752  if (!frame)
2753  return close_output(ofp, fgt);
2754 
2755  return 0;
2756 }
2757 
2759  AVFrame *frame)
2760 {
2763  FrameData *fd;
2764  int ret;
2765 
2768  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->ofilter.index]) {
2769  ret = fg_output_frame(ofp, fgt, NULL);
2770  return (ret < 0) ? ret : 1;
2771  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2772  return 1;
2773  } else if (ret < 0) {
2774  av_log(ofp, AV_LOG_WARNING,
2775  "Error in retrieving a frame from the filtergraph: %s\n",
2776  av_err2str(ret));
2777  return ret;
2778  }
2779 
2780  if (fgt->eof_out[ofp->ofilter.index]) {
2782  return 0;
2783  }
2784 
2786 
2787  if (debug_ts)
2788  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2789  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2790  frame->time_base.num, frame->time_base.den);
2791 
2792  // Choose the output timebase the first time we get a frame.
2793  if (!ofp->tb_out_locked) {
2794  ret = choose_out_timebase(ofp, frame);
2795  if (ret < 0) {
2796  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2798  return ret;
2799  }
2800  }
2801 
2802  fd = frame_data(frame);
2803  if (!fd) {
2805  return AVERROR(ENOMEM);
2806  }
2807 
2809  if (!fgt->got_frame) {
2811  ofp->side_data, ofp->nb_side_data, 0);
2812  if (ret < 0)
2813  return ret;
2814  }
2815 
2817 
2818  // only use bits_per_raw_sample passed through from the decoder
2819  // if the filtergraph did not touch the frame data
2820  if (!fgp->is_meta)
2821  fd->bits_per_raw_sample = 0;
2822 
2823  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2824  if (!frame->duration) {
2826  if (fr.num > 0 && fr.den > 0)
2827  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2828  }
2829 
2830  fd->frame_rate_filter = ofp->fps.framerate;
2831  }
2832 
2833  ret = fg_output_frame(ofp, fgt, frame);
2835  if (ret < 0)
2836  return ret;
2837 
2838  return 0;
2839 }
2840 
2841 /* retrieve all frames available at filtergraph outputs
2842  * and send them to consumers */
2844  AVFrame *frame)
2845 {
2846  FilterGraphPriv *fgp = fgp_from_fg(fg);
2847  int did_step = 0;
2848 
2849  // graph not configured, just select the input to request
2850  if (!fgt->graph) {
2851  for (int i = 0; i < fg->nb_inputs; i++) {
2853  if (ifp->format < 0 && !fgt->eof_in[i]) {
2854  fgt->next_in = i;
2855  return 0;
2856  }
2857  }
2858 
2859  // This state - graph is not configured, but all inputs are either
2860  // initialized or EOF - should be unreachable because sending EOF to a
2861  // filter without even a fallback format should fail
2862  av_assert0(0);
2863  return AVERROR_BUG;
2864  }
2865 
2866  while (fgp->nb_outputs_done < fg->nb_outputs) {
2867  int ret;
2868 
2869  /* Reap all buffers present in the buffer sinks */
2870  for (int i = 0; i < fg->nb_outputs; i++) {
2872 
2873  ret = 0;
2874  while (!ret) {
2875  ret = fg_output_step(ofp, fgt, frame);
2876  if (ret < 0)
2877  return ret;
2878  }
2879  }
2880 
2881  // return after one iteration, so that scheduler can rate-control us
2882  if (did_step && fgp->have_sources)
2883  return 0;
2884 
2886  if (ret == AVERROR(EAGAIN)) {
2887  fgt->next_in = choose_input(fg, fgt);
2888  return 0;
2889  } else if (ret < 0) {
2890  if (ret == AVERROR_EOF)
2891  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2892  else
2893  av_log(fg, AV_LOG_ERROR,
2894  "Error requesting a frame from the filtergraph: %s\n",
2895  av_err2str(ret));
2896  return ret;
2897  }
2898  fgt->next_in = fg->nb_inputs;
2899 
2900  did_step = 1;
2901  }
2902 
2903  return AVERROR_EOF;
2904 }
2905 
2907 {
2908  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2909  int64_t pts2;
2910 
2911  /* subtitles seem to be usually muxed ahead of other streams;
2912  if not, subtracting a larger time here is necessary */
2913  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2914 
2915  /* do not send the heartbeat frame if the subtitle is already ahead */
2916  if (pts2 <= ifp->sub2video.last_pts)
2917  return;
2918 
2919  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2920  /* if we have hit the end of the current displayed subpicture,
2921  or if we need to initialize the system, update the
2922  overlaid subpicture and its start/end times */
2923  sub2video_update(ifp, pts2 + 1, NULL);
2924  else
2925  sub2video_push_ref(ifp, pts2);
2926 }
2927 
2928 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2929 {
2930  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2931  int ret;
2932 
2933  if (buffer) {
2934  AVFrame *tmp;
2935 
2936  if (!frame)
2937  return 0;
2938 
2939  tmp = av_frame_alloc();
2940  if (!tmp)
2941  return AVERROR(ENOMEM);
2942 
2944 
2945  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2946  if (ret < 0) {
2947  av_frame_free(&tmp);
2948  return ret;
2949  }
2950 
2951  return 0;
2952  }
2953 
2954  // heartbeat frame
2955  if (frame && !frame->buf[0]) {
2956  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2957  return 0;
2958  }
2959 
2960  if (!frame) {
2961  if (ifp->sub2video.end_pts < INT64_MAX)
2962  sub2video_update(ifp, INT64_MAX, NULL);
2963 
2964  return av_buffersrc_add_frame(ifilter->filter, NULL);
2965  }
2966 
2967  ifp->width = frame->width ? frame->width : ifp->width;
2968  ifp->height = frame->height ? frame->height : ifp->height;
2969 
2970  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2971 
2972  return 0;
2973 }
2974 
2975 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2976  int64_t pts, AVRational tb)
2977 {
2978  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2979  int ret;
2980 
2981  if (fgt->eof_in[ifilter->index])
2982  return 0;
2983 
2984  fgt->eof_in[ifilter->index] = 1;
2985 
2986  if (ifilter->filter) {
2987  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2989 
2991  if (ret < 0)
2992  return ret;
2993  } else {
2994  if (ifp->format < 0) {
2995  // the filtergraph was never configured, use the fallback parameters
2996  ifp->format = ifp->opts.fallback->format;
2997  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2998  ifp->width = ifp->opts.fallback->width;
2999  ifp->height = ifp->opts.fallback->height;
3001  ifp->color_space = ifp->opts.fallback->colorspace;
3002  ifp->color_range = ifp->opts.fallback->color_range;
3003  ifp->alpha_mode = ifp->opts.fallback->alpha_mode;
3004  ifp->time_base = ifp->opts.fallback->time_base;
3005 
3007  &ifp->opts.fallback->ch_layout);
3008  if (ret < 0)
3009  return ret;
3010 
3012  ret = clone_side_data(&ifp->side_data, &ifp->nb_side_data,
3013  ifp->opts.fallback->side_data,
3014  ifp->opts.fallback->nb_side_data, 0);
3015  if (ret < 0)
3016  return ret;
3017 
3018  if (ifilter_has_all_input_formats(ifilter->graph)) {
3019  ret = configure_filtergraph(ifilter->graph, fgt);
3020  if (ret < 0) {
3021  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
3022  return ret;
3023  }
3024  }
3025  }
3026 
3027  if (ifp->format < 0) {
3028  av_log(ifilter->graph, AV_LOG_ERROR,
3029  "Cannot determine format of input %s after EOF\n",
3030  ifp->opts.name);
3031  return AVERROR_INVALIDDATA;
3032  }
3033  }
3034 
3035  return 0;
3036 }
3037 
3039  VIDEO_CHANGED = (1 << 0),
3040  AUDIO_CHANGED = (1 << 1),
3041  MATRIX_CHANGED = (1 << 2),
3042  DOWNMIX_CHANGED = (1 << 3),
3043  HWACCEL_CHANGED = (1 << 4)
3044 };
3045 
3046 static const char *unknown_if_null(const char *str)
3047 {
3048  return str ? str : "unknown";
3049 }
3050 
3052  InputFilter *ifilter, AVFrame *frame)
3053 {
3054  FilterGraphPriv *fgp = fgp_from_fg(fg);
3055  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
3056  FrameData *fd;
3057  AVFrameSideData *sd;
3058  int need_reinit = 0, ret;
3059 
3060  /* determine if the parameters for this input changed */
3061  switch (ifilter->type) {
3062  case AVMEDIA_TYPE_AUDIO:
3063  if (ifp->format != frame->format ||
3064  ifp->sample_rate != frame->sample_rate ||
3065  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
3066  need_reinit |= AUDIO_CHANGED;
3067  break;
3068  case AVMEDIA_TYPE_VIDEO:
3069  if (ifp->format != frame->format ||
3070  ifp->width != frame->width ||
3071  ifp->height != frame->height ||
3072  ifp->color_space != frame->colorspace ||
3073  ifp->color_range != frame->color_range ||
3074  ifp->alpha_mode != frame->alpha_mode)
3075  need_reinit |= VIDEO_CHANGED;
3076  break;
3077  }
3078 
3080  if (!ifp->displaymatrix_present ||
3081  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
3082  need_reinit |= MATRIX_CHANGED;
3083  } else if (ifp->displaymatrix_present)
3084  need_reinit |= MATRIX_CHANGED;
3085 
3087  if (!ifp->downmixinfo_present ||
3088  memcmp(sd->data, &ifp->downmixinfo, sizeof(ifp->downmixinfo)))
3089  need_reinit |= DOWNMIX_CHANGED;
3090  } else if (ifp->downmixinfo_present)
3091  need_reinit |= DOWNMIX_CHANGED;
3092 
3093  if (need_reinit && fgt->graph && (ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)) {
3094  ifp->nb_dropped++;
3095  av_log_once(fg, AV_LOG_WARNING, AV_LOG_DEBUG, &ifp->drop_warned, "Avoiding reinit; dropping frame pts: %s bound for %s\n", av_ts2str(frame->pts), ifilter->name);
3097  return 0;
3098  }
3099 
3100  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
3101  need_reinit = 0;
3102 
3103  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
3104  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
3105  need_reinit |= HWACCEL_CHANGED;
3106 
3107  if (need_reinit) {
3109  if (ret < 0)
3110  return ret;
3111 
3112  /* Inputs bound to a filtergraph output will have some fields unset.
3113  * Handle them here */
3114  if (ifp->ofilter_src) {
3116  if (ret < 0)
3117  return ret;
3118  }
3119  }
3120 
3121  /* (re)init the graph if possible, otherwise buffer the frame and return */
3122  if (need_reinit || !fgt->graph) {
3123  AVFrame *tmp = av_frame_alloc();
3124 
3125  if (!tmp)
3126  return AVERROR(ENOMEM);
3127 
3128  if (!ifilter_has_all_input_formats(fg)) {
3130 
3131  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
3132  if (ret < 0)
3133  av_frame_free(&tmp);
3134 
3135  return ret;
3136  }
3137 
3138  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
3139  av_frame_free(&tmp);
3140  if (ret < 0)
3141  return ret;
3142 
3143  if (fgt->graph) {
3144  AVBPrint reason;
3146  if (need_reinit & AUDIO_CHANGED) {
3147  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
3148  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
3149  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
3150  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
3151  }
3152  if (need_reinit & VIDEO_CHANGED) {
3153  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
3154  const char *color_space_name = av_color_space_name(frame->colorspace);
3155  const char *color_range_name = av_color_range_name(frame->color_range);
3156  const char *alpha_mode = av_alpha_mode_name(frame->alpha_mode);
3157  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, %s alpha,",
3158  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
3159  unknown_if_null(color_space_name), frame->width, frame->height,
3160  unknown_if_null(alpha_mode));
3161  }
3162  if (need_reinit & MATRIX_CHANGED)
3163  av_bprintf(&reason, "display matrix changed, ");
3164  if (need_reinit & DOWNMIX_CHANGED)
3165  av_bprintf(&reason, "downmix medatata changed, ");
3166  if (need_reinit & HWACCEL_CHANGED)
3167  av_bprintf(&reason, "hwaccel changed, ");
3168  if (reason.len > 1)
3169  reason.str[reason.len - 2] = '\0'; // remove last comma
3170  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
3171  } else {
3172  /* Choke all input to avoid buffering excessive frames while the
3173  * initial filter graph is being configured, and before we have a
3174  * preferred input */
3175  sch_filter_choke_inputs(fgp->sch, fgp->sch_idx);
3176  }
3177 
3178  ret = configure_filtergraph(fg, fgt);
3179  if (ret < 0) {
3180  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
3181  return ret;
3182  }
3183  }
3184 
3185  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
3186  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
3187  frame->time_base = ifp->time_base;
3188 
3189  if (ifp->displaymatrix_applied)
3191 
3192  fd = frame_data(frame);
3193  if (!fd)
3194  return AVERROR(ENOMEM);
3196 
3199  if (ret < 0) {
3201  if (ret != AVERROR_EOF)
3202  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
3203  return ret;
3204  }
3205 
3206  return 0;
3207 }
3208 
3209 static void fg_thread_set_name(const FilterGraph *fg)
3210 {
3211  char name[16];
3212  if (filtergraph_is_simple(fg)) {
3213  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
3214  snprintf(name, sizeof(name), "%cf%s",
3216  ofp->ofilter.output_name);
3217  } else {
3218  snprintf(name, sizeof(name), "fc%d", fg->index);
3219  }
3220 
3222 }
3223 
3225 {
3226  if (fgt->frame_queue_out) {
3227  AVFrame *frame;
3228  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
3229  av_frame_free(&frame);
3231  }
3232 
3233  av_frame_free(&fgt->frame);
3234  av_freep(&fgt->eof_in);
3235  av_freep(&fgt->eof_out);
3236 
3237  avfilter_graph_free(&fgt->graph);
3238 
3239  memset(fgt, 0, sizeof(*fgt));
3240 }
3241 
3242 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
3243 {
3244  memset(fgt, 0, sizeof(*fgt));
3245 
3246  fgt->frame = av_frame_alloc();
3247  if (!fgt->frame)
3248  goto fail;
3249 
3250  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
3251  if (!fgt->eof_in)
3252  goto fail;
3253 
3254  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
3255  if (!fgt->eof_out)
3256  goto fail;
3257 
3259  if (!fgt->frame_queue_out)
3260  goto fail;
3261 
3262  return 0;
3263 
3264 fail:
3265  fg_thread_uninit(fgt);
3266  return AVERROR(ENOMEM);
3267 }
3268 
3269 static int filter_thread(void *arg)
3270 {
3271  FilterGraphPriv *fgp = arg;
3272  FilterGraph *fg = &fgp->fg;
3273 
3274  FilterGraphThread fgt;
3275  int ret = 0, input_status = 0;
3276 
3277  ret = fg_thread_init(&fgt, fg);
3278  if (ret < 0)
3279  goto finish;
3280 
3281  fg_thread_set_name(fg);
3282 
3283  // if we have all input parameters the graph can now be configured
3285  ret = configure_filtergraph(fg, &fgt);
3286  if (ret < 0) {
3287  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
3288  av_err2str(ret));
3289  goto finish;
3290  }
3291  }
3292 
3293  while (1) {
3294  InputFilter *ifilter;
3295  InputFilterPriv *ifp = NULL;
3296  enum FrameOpaque o;
3297  unsigned input_idx = fgt.next_in;
3298 
3299  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
3300  &input_idx, fgt.frame);
3301  if (input_status == AVERROR_EOF) {
3302  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3303  break;
3304  } else if (input_status == AVERROR(EAGAIN)) {
3305  // should only happen when we didn't request any input
3306  av_assert0(input_idx == fg->nb_inputs);
3307  goto read_frames;
3308  }
3309  av_assert0(input_status >= 0);
3310 
3311  o = (intptr_t)fgt.frame->opaque;
3312 
3313  o = (intptr_t)fgt.frame->opaque;
3314 
3315  // message on the control stream
3316  if (input_idx == fg->nb_inputs) {
3317  FilterCommand *fc;
3318 
3319  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3320 
3321  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3322  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3323  fc->all_filters);
3324  av_frame_unref(fgt.frame);
3325  continue;
3326  }
3327 
3328  // we received an input frame or EOF
3329  ifilter = fg->inputs[input_idx];
3330  ifp = ifp_from_ifilter(ifilter);
3331 
3332  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3333  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3334  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3335  !fgt.graph);
3336  } else if (fgt.frame->buf[0]) {
3337  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3338  } else {
3340  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3341  }
3342  av_frame_unref(fgt.frame);
3343  if (ret == AVERROR_EOF) {
3344  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3345  input_idx);
3346  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
3347  continue;
3348  }
3349  if (ret < 0)
3350  goto finish;
3351 
3352 read_frames:
3353  // retrieve all newly available frames
3354  ret = read_frames(fg, &fgt, fgt.frame);
3355  if (ret == AVERROR_EOF) {
3356  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3357  if (ifp && ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)
3358  av_log(fg, AV_LOG_INFO, "Total changed input frames dropped : %"PRId64"\n", ifp->nb_dropped);
3359  break;
3360  } else if (ret < 0) {
3361  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3362  av_err2str(ret));
3363  goto finish;
3364  }
3365  }
3366 
3367  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3369 
3370  if (fgt.eof_out[i] || !fgt.graph)
3371  continue;
3372 
3373  ret = fg_output_frame(ofp, &fgt, NULL);
3374  if (ret < 0)
3375  goto finish;
3376  }
3377 
3378 finish:
3379 
3381  print_filtergraph(fg, fgt.graph);
3382 
3383  // EOF is normal termination
3384  if (ret == AVERROR_EOF)
3385  ret = 0;
3386 
3387  fg_thread_uninit(&fgt);
3388 
3389  return ret;
3390 }
3391 
3392 void fg_send_command(FilterGraph *fg, double time, const char *target,
3393  const char *command, const char *arg, int all_filters)
3394 {
3395  FilterGraphPriv *fgp = fgp_from_fg(fg);
3396  AVBufferRef *buf;
3397  FilterCommand *fc;
3398 
3399  fc = av_mallocz(sizeof(*fc));
3400  if (!fc)
3401  return;
3402 
3403  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3404  if (!buf) {
3405  av_freep(&fc);
3406  return;
3407  }
3408 
3409  fc->target = av_strdup(target);
3410  fc->command = av_strdup(command);
3411  fc->arg = av_strdup(arg);
3412  if (!fc->target || !fc->command || !fc->arg) {
3413  av_buffer_unref(&buf);
3414  return;
3415  }
3416 
3417  fc->time = time;
3418  fc->all_filters = all_filters;
3419 
3420  fgp->frame->buf[0] = buf;
3421  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3422 
3423  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3424 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
InputFilterPriv::nb_dropped
uint64_t nb_dropped
Definition: ffmpeg_filter.c:122
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2082
formats
formats
Definition: signature.h:47
AVBufferSrcParameters::side_data
AVFrameSideData ** side_data
Definition: buffersrc.h:124
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:2019
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:95
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:469
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:678
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:367
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:631
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:386
av_clip
#define av_clip
Definition: common.h:100
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2574
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:384
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:309
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:70
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:106
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2087
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2353
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1554
FrameData::nb_side_data
int nb_side_data
Definition: ffmpeg.h:735
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:65
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:443
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:69
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:97
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:615
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:134
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:987
FrameData
Definition: ffmpeg.h:713
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2328
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:155
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:287
OutputFilter::apad
char * apad
Definition: ffmpeg.h:399
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:659
clone_side_data
static int clone_side_data(AVFrameSideData ***dst, int *nb_dst, AVFrameSideData *const *src, int nb_src, unsigned int flags)
Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
Definition: ffmpeg_utils.h:50
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:421
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:411
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:155
AVBufferSrcParameters::nb_side_data
int nb_side_data
Definition: buffersrc.h:125
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:285
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:37
AVFrame::nb_side_data
int nb_side_data
Definition: frame.h:625
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2237
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1012
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:647
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:3043
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:62
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:1042
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:136
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1828
av_alpha_mode_name
const char * av_alpha_mode_name(enum AVAlphaMode mode)
Definition: pixdesc.c:3921
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:257
AVSubtitleRect
Definition: avcodec.h:2055
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2086
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1754
OutputFilterPriv::crop_left
unsigned crop_left
Definition: ffmpeg_filter.c:209
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:1016
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:175
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:565
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:689
InputFile::index
int index
Definition: ffmpeg.h:525
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:757
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
AVFrame::width
int width
Definition: frame.h:499
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:50
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:57
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:813
AVOption
AVOption.
Definition: opt.h:429
InputFilterPriv::ofilter_src
OutputFilter * ofilter_src
Definition: ffmpeg_filter.c:113
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2675
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:190
FilterGraph::index
int index
Definition: ffmpeg.h:409
OutputFilter::index
int index
Definition: ffmpeg.h:388
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:133
data
const char data[16]
Definition: mxf.c:149
InputFilter::index
int index
Definition: ffmpeg.h:369
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:179
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:242
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2029
OutputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:205
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:412
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:3039
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:129
AVDictionary
Definition: dict.c:32
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:671
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:249
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: side_data.c:248
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:264
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1746
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:604
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:326
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2758
FilterGraphPriv
Definition: ffmpeg_filter.c:46
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:100
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2064
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:194
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1003
InputStream
Definition: ffmpeg.h:476
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:76
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:70
OutputFilterOptions
Definition: ffmpeg.h:307
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:272
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:293
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:140
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:167
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:89
OutputFilterPriv
Definition: ffmpeg_filter.c:190
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:3224
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:492
fail
#define fail()
Definition: checkasm.h:207
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:372
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:328
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:85
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
print_filtergraph
int print_filtergraph(FilterGraph *fg, AVFilterGraph *graph)
Definition: graphprint.c:954
samplefmt.h
OutputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:212
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:274
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:617
AVFrame::alpha_mode
enum AVAlphaMode alpha_mode
Indicates how the alpha channel of the video is to be handled.
Definition: frame.h:782
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:719
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:770
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1849
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
AVDownmixInfo
This structure describes optional metadata relevant to a downmix procedure.
Definition: downmix_info.h:58
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:2045
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:87
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:156
FrameData::tb
AVRational tb
Definition: ffmpeg.h:723
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:224
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:73
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:201
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputFilterPriv::sub2video
struct InputFilterPriv::@8 sub2video
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:180
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:396
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:103
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:273
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:39
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:839
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2906
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
OutputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:213
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:239
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:726
InputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:139
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2975
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:104
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:347
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:987
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:933
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:263
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:71
llrintf
#define llrintf(x)
Definition: libm.h:401
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:681
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:67
DOWNMIX_CHANGED
@ DOWNMIX_CHANGED
Definition: ffmpeg_filter.c:3042
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:110
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:413
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:641
AVDictionaryEntry::key
char * key
Definition: dict.h:91
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:118
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:740
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:397
InputFilter
Definition: ffmpeg.h:366
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:60
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:494
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:302
print_graphs_file
char * print_graphs_file
Definition: ffmpeg_opt.c:81
InputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:380
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2088
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:3242
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:275
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:284
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:367
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:296
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:244
av_buffersink_get_alpha_mode
enum AVAlphaMode av_buffersink_get_alpha_mode(const AVFilterContext *ctx)
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1074
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1187
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:230
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:200
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:286
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3772
AVBufferSrcParameters::alpha_mode
enum AVAlphaMode alpha_mode
Video only, the alpha mode.
Definition: buffersrc.h:130
AVFormatContext
Format I/O context.
Definition: avformat.h:1264
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:648
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:767
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:387
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1433
OutputFilterPriv::crop_right
unsigned crop_right
Definition: ffmpeg_filter.c:210
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:238
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:129
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:895
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:773
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:202
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:865
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:177
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:484
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:162
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:145
Decoder
Definition: ffmpeg.h:462
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:301
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:934
av_frame_side_data_remove
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
Definition: side_data.c:102
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:591
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:928
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:222
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:655
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2619
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:92
mathops.h
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:70
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:730
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1566
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:957
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1227
AVFilterGraph
Definition: avfilter.h:589
AV_FRAME_SIDE_DATA_FLAG_REPLACE
#define AV_FRAME_SIDE_DATA_FLAG_REPLACE
Don't add a new entry if another of the same type exists.
Definition: frame.h:1053
InputFilterPriv::downmixinfo_present
int downmixinfo_present
Definition: ffmpeg_filter.c:149
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:244
InputFilterOptions
Definition: ffmpeg.h:271
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char **graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1248
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:128
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:743
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:414
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:229
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:880
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:492
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:105
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:273
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:47
FilterGraphPriv::nb_threads
int nb_threads
Definition: ffmpeg_filter.c:62
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:191
FilterGraph
Definition: ffmpeg.h:407
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:946
OutputFilterPriv::crop_bottom
unsigned crop_bottom
Definition: ffmpeg_filter.c:208
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:78
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:289
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:374
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:754
AVAlphaMode
AVAlphaMode
Correlation between the alpha channel and color values.
Definition: pixfmt.h:810
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:280
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:594
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2322
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:66
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1991
FrameData::side_data
AVFrameSideData ** side_data
Definition: ffmpeg.h:734
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:265
f
f
Definition: af_crystalizer.c:122
OutputFilter::output_name
char * output_name
Definition: ffmpeg.h:392
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:3269
AVMediaType
AVMediaType
Definition: avutil.h:198
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:143
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:91
FilterGraphThread
Definition: ffmpeg_filter.c:84
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:147
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:233
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:85
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:107
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:751
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:590
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:220
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
sch_filter_choke_inputs
void sch_filter_choke_inputs(Scheduler *sch, unsigned fg_idx)
Called by filtergraph tasks to choke all filter inputs, preventing them from receiving more frames un...
Definition: ffmpeg_sched.c:2637
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:188
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:130
OutputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:236
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:3041
FilterCommand::time
double time
Definition: ffmpeg_filter.c:259
InputFilterPriv::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:159
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:146
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1483
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:544
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:58
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:473
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2085
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:101
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1541
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:182
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:725
OutputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:390
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:127
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:2038
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1491
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:3040
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2503
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter, int commit)
Definition: ffmpeg_filter.c:1300
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:233
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:3046
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:290
decoders
Decoder ** decoders
Definition: ffmpeg.c:114
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:193
nb_decoders
int nb_decoders
Definition: ffmpeg.c:115
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:401
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2843
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2038
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:3051
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:959
buffersink.h
av_buffersink_get_side_data
const AVFrameSideData *const * av_buffersink_get_side_data(const AVFilterContext *ctx, int *nb_side_data)
Definition: buffersink.c:380
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:204
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:260
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:187
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:171
filter_buffered_frames
int filter_buffered_frames
Definition: ffmpeg_opt.c:78
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:121
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:548
FPSConvContext
Definition: ffmpeg_filter.c:168
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:728
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: side_data.c:133
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3392
downmix_info.h
sch_remove_filtergraph
void sch_remove_filtergraph(Scheduler *sch, int idx)
Definition: ffmpeg_sched.c:460
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:52
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:295
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:209
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:69
src2
const pixel * src2
Definition: h264pred_template.c:421
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1968
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:700
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:185
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
needed
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
Definition: filter_design.txt:212
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
OutputFilterPriv::crop_top
unsigned crop_top
Definition: ffmpeg_filter.c:207
delta
float delta
Definition: vorbis_enc_data.h:430
print_graphs
int print_graphs
Definition: ffmpeg_opt.c:80
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:90
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:527
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:78
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:572
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:119
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
ifilter_parameters_from_ofilter
static int ifilter_parameters_from_ofilter(InputFilter *ifilter, OutputFilter *ofilter)
Definition: ffmpeg_filter.c:2301
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
IFILTER_FLAG_DROPCHANGED
@ IFILTER_FLAG_DROPCHANGED
Definition: ffmpeg.h:268
AVFrame::side_data
AVFrameSideData ** side_data
Definition: frame.h:624
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:111
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:308
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:203
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
OFILTER_FLAG_CROP
@ OFILTER_FLAG_CROP
Definition: ffmpeg.h:304
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:937
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:60
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
OutputFilterPriv::needed
int needed
Definition: ffmpeg_filter.c:196
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2627
AVFilter
Filter definition.
Definition: avfilter.h:216
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2496
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:163
OFILTER_FLAG_AUTOROTATE
@ OFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:303
mid_pred
#define mid_pred
Definition: mathops.h:97
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:92
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:744
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
AVALPHA_MODE_UNSPECIFIED
@ AVALPHA_MODE_UNSPECIFIED
Unknown alpha handling, or no alpha channel.
Definition: pixfmt.h:811
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:368
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:73
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:169
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:541
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1593
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:243
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:122
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:3038
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:524
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:992
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:88
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:225
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:499
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:750
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:267
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:385
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:629
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:120
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:918
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:480
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:932
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:141
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:117
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:55
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1542
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:266
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:183
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:615
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:950
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2376
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:246
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:231
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:330
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:297
InputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:138
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:958
OutputFilterPriv::alpha_modes
enum AVAlphaMode * alpha_modes
Definition: ffmpeg_filter.c:234
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:408
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
OutputFilter
Definition: ffmpeg.h:383
InputFilterPriv::drop_warned
int drop_warned
Definition: ffmpeg_filter.c:121
av_log_once
void av_log_once(void *avcl, int initial_level, int subsequent_level, int *state, const char *fmt,...)
Definition: log.c:450
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2928
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:105
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:78
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1619
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:130
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:323
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:232
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:492
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2453
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:404
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:83
llrint
#define llrint(x)
Definition: libm.h:396
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
bind_inputs
static int bind_inputs(FilterGraph *fg, int commit)
Definition: ffmpeg_filter.c:1473
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:482
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2547
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:90
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:79
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:255
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: side_data.c:62
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1081
fg_create
int fg_create(FilterGraph **pfg, char **graph_desc, Scheduler *sch, const OutputFilterOptions *opts)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1088
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:260
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:299
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:247
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:465
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:125
InputFilterPriv::end_pts
int64_t end_pts
Definition: ffmpeg_filter.c:156
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:112
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1151
int32_t
int32_t
Definition: audioconvert.c:56
InputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:131
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:344
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:617
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:199
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1453
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
graphprint.h
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:107
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:57
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:240
read_file_to_string
char * read_file_to_string(const char *filename)
Definition: cmdutils.c:1572
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
InputFilterPriv::downmixinfo
AVDownmixInfo downmixinfo
Definition: ffmpeg_filter.c:150
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:130
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:92
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:592
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:277
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:742
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:479
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:217
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:746
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:68
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:200
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:184
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:3209
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:173
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1838
FilterGraph::is_internal
int is_internal
Definition: ffmpeg.h:419
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2084
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:256
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:254
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:127
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:403
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:77
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:282
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
InputFilter::input_name
char * input_name
Definition: ffmpeg.h:376
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:104
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:186