FFmpeg
vf_bwdif_cuda.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2019 Philip Langdale <philipl@overt.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avassert.h"
22 #include "libavutil/hwcontext.h"
24 #include "libavutil/cuda_check.h"
25 #include "internal.h"
26 #include "yadif.h"
27 
28 #include "cuda/load_helper.h"
29 
30 extern const unsigned char ff_vf_bwdif_cuda_ptx_data[];
31 extern const unsigned int ff_vf_bwdif_cuda_ptx_len;
32 
33 typedef struct DeintCUDAContext {
35 
40 
41  CUmodule cu_module;
42  CUfunction cu_func_uchar;
43  CUfunction cu_func_uchar2;
44  CUfunction cu_func_ushort;
45  CUfunction cu_func_ushort2;
47 
48 #define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
49 #define ALIGN_UP(a, b) (((a) + (b) - 1) & ~((b) - 1))
50 #define BLOCKX 32
51 #define BLOCKY 16
52 
53 #define CHECK_CU(x) FF_CUDA_CHECK_DL(ctx, s->hwctx->internal->cuda_dl, x)
54 
55 static CUresult call_kernel(AVFilterContext *ctx, CUfunction func,
56  CUdeviceptr prev, CUdeviceptr cur, CUdeviceptr next,
57  CUarray_format format, int channels,
58  int src_width, // Width is pixels per channel
59  int src_height, // Height is pixels per channel
60  int src_pitch, // Pitch is bytes
61  CUdeviceptr dst,
62  int dst_width, // Width is pixels per channel
63  int dst_height, // Height is pixels per channel
64  int dst_pitch, // Pitch is pixels per channel
65  int parity, int tff, int clip_max)
66 {
67  DeintCUDAContext *s = ctx->priv;
68  YADIFContext *y = &s->yadif;
69  CudaFunctions *cu = s->hwctx->internal->cuda_dl;
70  CUtexObject tex_prev = 0, tex_cur = 0, tex_next = 0;
71  int is_field_end = y->current_field == YADIF_FIELD_END;
72  int ret;
73 
74  void *args[] = { &dst, &tex_prev, &tex_cur, &tex_next,
75  &dst_width, &dst_height, &dst_pitch,
76  &src_width, &src_height, &parity, &tff,
77  &is_field_end, &clip_max };
78 
79  CUDA_TEXTURE_DESC tex_desc = {
80  .filterMode = CU_TR_FILTER_MODE_POINT,
81  .flags = CU_TRSF_READ_AS_INTEGER,
82  };
83 
84  CUDA_RESOURCE_DESC res_desc = {
85  .resType = CU_RESOURCE_TYPE_PITCH2D,
86  .res.pitch2D.format = format,
87  .res.pitch2D.numChannels = channels,
88  .res.pitch2D.width = src_width,
89  .res.pitch2D.height = src_height,
90  .res.pitch2D.pitchInBytes = src_pitch,
91  };
92 
93  res_desc.res.pitch2D.devPtr = (CUdeviceptr)prev;
94  ret = CHECK_CU(cu->cuTexObjectCreate(&tex_prev, &res_desc, &tex_desc, NULL));
95  if (ret < 0)
96  goto exit;
97 
98  res_desc.res.pitch2D.devPtr = (CUdeviceptr)cur;
99  ret = CHECK_CU(cu->cuTexObjectCreate(&tex_cur, &res_desc, &tex_desc, NULL));
100  if (ret < 0)
101  goto exit;
102 
103  res_desc.res.pitch2D.devPtr = (CUdeviceptr)next;
104  ret = CHECK_CU(cu->cuTexObjectCreate(&tex_next, &res_desc, &tex_desc, NULL));
105  if (ret < 0)
106  goto exit;
107 
108  ret = CHECK_CU(cu->cuLaunchKernel(func,
109  DIV_UP(dst_width, BLOCKX), DIV_UP(dst_height, BLOCKY), 1,
110  BLOCKX, BLOCKY, 1,
111  0, s->hwctx->stream, args, NULL));
112 
113 exit:
114  if (tex_prev)
115  CHECK_CU(cu->cuTexObjectDestroy(tex_prev));
116  if (tex_cur)
117  CHECK_CU(cu->cuTexObjectDestroy(tex_cur));
118  if (tex_next)
119  CHECK_CU(cu->cuTexObjectDestroy(tex_next));
120 
121  return ret;
122 }
123 
124 static void filter(AVFilterContext *ctx, AVFrame *dst,
125  int parity, int tff)
126 {
127  DeintCUDAContext *s = ctx->priv;
128  YADIFContext *y = &s->yadif;
129  CudaFunctions *cu = s->hwctx->internal->cuda_dl;
130  CUcontext dummy;
131  int i, ret;
132 
133  ret = CHECK_CU(cu->cuCtxPushCurrent(s->hwctx->cuda_ctx));
134  if (ret < 0)
135  return;
136 
137  for (i = 0; i < y->csp->nb_components; i++) {
138  CUfunction func;
139  CUarray_format format;
140  int pixel_size, channels, clip_max;
141  const AVComponentDescriptor *comp = &y->csp->comp[i];
142 
143  if (comp->plane < i) {
144  // We process planes as a whole, so don't reprocess
145  // them for additional components
146  continue;
147  }
148 
149  pixel_size = (comp->depth + comp->shift) / 8;
150  channels = comp->step / pixel_size;
151  if (pixel_size > 2 || channels > 2) {
152  av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name);
153  goto exit;
154  }
155  switch (pixel_size) {
156  case 1:
157  func = channels == 1 ? s->cu_func_uchar : s->cu_func_uchar2;
158  format = CU_AD_FORMAT_UNSIGNED_INT8;
159  break;
160  case 2:
161  func = channels == 1 ? s->cu_func_ushort : s->cu_func_ushort2;
162  format = CU_AD_FORMAT_UNSIGNED_INT16;
163  break;
164  default:
165  av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name);
166  goto exit;
167  }
168 
169  clip_max = (1 << (comp->depth + comp->shift)) - 1;
170 
172  "Deinterlacing plane %d: pixel_size: %d channels: %d\n",
173  comp->plane, pixel_size, channels);
175  (CUdeviceptr)y->prev->data[i],
176  (CUdeviceptr)y->cur->data[i],
177  (CUdeviceptr)y->next->data[i],
178  format, channels,
179  AV_CEIL_RSHIFT(y->cur->width, i ? y->csp->log2_chroma_w : 0),
180  AV_CEIL_RSHIFT(y->cur->height, i ? y->csp->log2_chroma_h : 0),
181  y->cur->linesize[i],
182  (CUdeviceptr)dst->data[i],
183  AV_CEIL_RSHIFT(dst->width, i ? y->csp->log2_chroma_w : 0),
184  AV_CEIL_RSHIFT(dst->height, i ? y->csp->log2_chroma_h : 0),
185  dst->linesize[i] / comp->step,
186  parity, tff, clip_max);
187  }
188 
189  if (y->current_field == YADIF_FIELD_END) {
191  }
192 
193 exit:
194  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
195  return;
196 }
197 
199 {
200  CUcontext dummy;
201  DeintCUDAContext *s = ctx->priv;
202  YADIFContext *y = &s->yadif;
203 
204  if (s->hwctx && s->cu_module) {
205  CudaFunctions *cu = s->hwctx->internal->cuda_dl;
206  CHECK_CU(cu->cuCtxPushCurrent(s->hwctx->cuda_ctx));
207  CHECK_CU(cu->cuModuleUnload(s->cu_module));
208  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
209  }
210 
211  av_frame_free(&y->prev);
212  av_frame_free(&y->cur);
213  av_frame_free(&y->next);
214 
215  av_buffer_unref(&s->device_ref);
216  s->hwctx = NULL;
217  av_buffer_unref(&s->input_frames_ref);
218  s->input_frames = NULL;
219 }
220 
222 {
223  AVFilterContext *ctx = inlink->dst;
224  DeintCUDAContext *s = ctx->priv;
225 
226  if (!inlink->hw_frames_ctx) {
227  av_log(ctx, AV_LOG_ERROR, "A hardware frames reference is "
228  "required to associate the processing device.\n");
229  return AVERROR(EINVAL);
230  }
231 
232  s->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx);
233  if (!s->input_frames_ref) {
234  av_log(ctx, AV_LOG_ERROR, "A input frames reference create "
235  "failed.\n");
236  return AVERROR(ENOMEM);
237  }
238  s->input_frames = (AVHWFramesContext*)s->input_frames_ref->data;
239 
240  return 0;
241 }
242 
244 {
245  AVHWFramesContext *output_frames;
246  AVFilterContext *ctx = link->src;
247  DeintCUDAContext *s = ctx->priv;
248  YADIFContext *y = &s->yadif;
249  CudaFunctions *cu;
250  int ret = 0;
251  CUcontext dummy;
252 
253  av_assert0(s->input_frames);
254  s->device_ref = av_buffer_ref(s->input_frames->device_ref);
255  if (!s->device_ref) {
256  av_log(ctx, AV_LOG_ERROR, "A device reference create "
257  "failed.\n");
258  return AVERROR(ENOMEM);
259  }
260  s->hwctx = ((AVHWDeviceContext*)s->device_ref->data)->hwctx;
261  cu = s->hwctx->internal->cuda_dl;
262 
263  link->hw_frames_ctx = av_hwframe_ctx_alloc(s->device_ref);
264  if (!link->hw_frames_ctx) {
265  av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context "
266  "for output.\n");
267  ret = AVERROR(ENOMEM);
268  goto exit;
269  }
270 
271  output_frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
272 
273  output_frames->format = AV_PIX_FMT_CUDA;
274  output_frames->sw_format = s->input_frames->sw_format;
275  output_frames->width = ctx->inputs[0]->w;
276  output_frames->height = ctx->inputs[0]->h;
277 
278  output_frames->initial_pool_size = 4;
279 
281  if (ret < 0)
282  goto exit;
283 
285  if (ret < 0) {
286  av_log(ctx, AV_LOG_ERROR, "Failed to initialise CUDA frame "
287  "context for output: %d\n", ret);
288  goto exit;
289  }
290 
291  link->time_base = av_mul_q(ctx->inputs[0]->time_base, (AVRational){1, 2});
292  link->w = ctx->inputs[0]->w;
293  link->h = ctx->inputs[0]->h;
294 
295  if(y->mode & 1)
296  link->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
297  (AVRational){2, 1});
298 
299  if (link->w < 3 || link->h < 3) {
300  av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
301  ret = AVERROR(EINVAL);
302  goto exit;
303  }
304 
305  y->csp = av_pix_fmt_desc_get(output_frames->sw_format);
306  y->filter = filter;
307 
308  ret = CHECK_CU(cu->cuCtxPushCurrent(s->hwctx->cuda_ctx));
309  if (ret < 0)
310  goto exit;
311 
313  if (ret < 0)
314  goto exit;
315 
316  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uchar, s->cu_module, "bwdif_uchar"));
317  if (ret < 0)
318  goto exit;
319 
320  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uchar2, s->cu_module, "bwdif_uchar2"));
321  if (ret < 0)
322  goto exit;
323 
324  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_ushort, s->cu_module, "bwdif_ushort"));
325  if (ret < 0)
326  goto exit;
327 
328  ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_ushort2, s->cu_module, "bwdif_ushort2"));
329  if (ret < 0)
330  goto exit;
331 
332 exit:
333  CHECK_CU(cu->cuCtxPopCurrent(&dummy));
334 
335  return ret;
336 }
337 
338 static const AVClass bwdif_cuda_class = {
339  .class_name = "bwdif_cuda",
340  .item_name = av_default_item_name,
341  .option = ff_yadif_options,
342  .version = LIBAVUTIL_VERSION_INT,
343  .category = AV_CLASS_CATEGORY_FILTER,
344 };
345 
346 static const AVFilterPad deint_cuda_inputs[] = {
347  {
348  .name = "default",
349  .type = AVMEDIA_TYPE_VIDEO,
350  .filter_frame = ff_yadif_filter_frame,
351  .config_props = config_input,
352  },
353 };
354 
355 static const AVFilterPad deint_cuda_outputs[] = {
356  {
357  .name = "default",
358  .type = AVMEDIA_TYPE_VIDEO,
359  .request_frame = ff_yadif_request_frame,
360  .config_props = config_output,
361  },
362 };
363 
365  .name = "bwdif_cuda",
366  .description = NULL_IF_CONFIG_SMALL("Deinterlace CUDA frames"),
367  .priv_size = sizeof(DeintCUDAContext),
368  .priv_class = &bwdif_cuda_class,
374  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
375 };
func
int(* func)(AVBPrint *dst, const char *in, const char *arg)
Definition: jacosubdec.c:68
BLOCKY
#define BLOCKY
Definition: vf_bwdif_cuda.c:51
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_bwdif_cuda.c:221
DIV_UP
#define DIV_UP(a, b)
Definition: vf_bwdif_cuda.c:48
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
hwcontext_cuda_internal.h
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:364
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:80
bwdif_cuda_class
static const AVClass bwdif_cuda_class
Definition: vf_bwdif_cuda.c:338
config_output
static int config_output(AVFilterLink *link)
Definition: vf_bwdif_cuda.c:243
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
CHECK_CU
#define CHECK_CU(x)
Definition: vf_bwdif_cuda.c:53
ff_cuda_load_module
int ff_cuda_load_module(void *avctx, AVCUDADeviceContext *hwctx, CUmodule *cu_module, const unsigned char *data, const unsigned int length)
Loads a CUDA module and applies any decompression, if neccesary.
Definition: load_helper.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
YADIFContext::csp
const AVPixFmtDescriptor * csp
Definition: yadif.h:76
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
YADIFContext::mode
int mode
YADIFMode.
Definition: yadif.h:54
DeintCUDAContext::input_frames_ref
AVBufferRef * input_frames_ref
Definition: vf_bwdif_cuda.c:38
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVFrame::width
int width
Definition: frame.h:412
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
AVPixFmtDescriptor::name
const char * name
Definition: pixdesc.h:70
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
DeintCUDAContext::input_frames
AVHWFramesContext * input_frames
Definition: vf_bwdif_cuda.c:39
DeintCUDAContext::hwctx
AVCUDADeviceContext * hwctx
Definition: vf_bwdif_cuda.c:36
ff_vf_bwdif_cuda_ptx_len
const unsigned int ff_vf_bwdif_cuda_ptx_len
DeintCUDAContext::cu_func_uchar
CUfunction cu_func_uchar
Definition: vf_bwdif_cuda.c:42
dummy
int dummy
Definition: motion.c:66
DeintCUDAContext::cu_func_uchar2
CUfunction cu_func_uchar2
Definition: vf_bwdif_cuda.c:43
DeintCUDAContext::device_ref
AVBufferRef * device_ref
Definition: vf_bwdif_cuda.c:37
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
avassert.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
call_kernel
static CUresult call_kernel(AVFilterContext *ctx, CUfunction func, CUdeviceptr prev, CUdeviceptr cur, CUdeviceptr next, CUarray_format format, int channels, int src_width, int src_height, int src_pitch, CUdeviceptr dst, int dst_width, int dst_height, int dst_pitch, int parity, int tff, int clip_max)
Definition: vf_bwdif_cuda.c:55
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
s
#define s(width, name)
Definition: cbs_vp9.c:198
DeintCUDAContext::cu_func_ushort2
CUfunction cu_func_ushort2
Definition: vf_bwdif_cuda.c:45
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:31
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
load_helper.h
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
DeintCUDAContext::cu_module
CUmodule cu_module
Definition: vf_bwdif_cuda.c:41
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
AVPixFmtDescriptor::nb_components
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:71
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
DeintCUDAContext::yadif
YADIFContext yadif
Definition: vf_bwdif_cuda.c:34
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
ff_yadif_options
const AVOption ff_yadif_options[]
Definition: yadif_common.c:217
yadif.h
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
YADIFContext::filter
void(* filter)(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff)
Definition: yadif.h:65
parity
mcdeint parity
Definition: vf_mcdeint.c:281
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:467
DeintCUDAContext
Definition: vf_bwdif_cuda.c:33
YADIFContext::prev
AVFrame * prev
Definition: yadif.h:62
filter
static void filter(AVFilterContext *ctx, AVFrame *dst, int parity, int tff)
Definition: vf_bwdif_cuda.c:124
ff_vf_bwdif_cuda_ptx_data
const unsigned char ff_vf_bwdif_cuda_ptx_data[]
internal.h
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: internal.h:182
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
ff_vf_bwdif_cuda
const AVFilter ff_vf_bwdif_cuda
Definition: vf_bwdif_cuda.c:364
YADIFContext
Definition: yadif.h:51
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
AVFilter
Filter definition.
Definition: avfilter.h:166
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
AVCUDADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_cuda.h:42
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
cuda_check.h
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:752
deint_cuda_inputs
static const AVFilterPad deint_cuda_inputs[]
Definition: vf_bwdif_cuda.c:346
AVFrame::height
int height
Definition: frame.h:412
YADIFContext::next
AVFrame * next
Definition: yadif.h:61
YADIF_FIELD_END
@ YADIF_FIELD_END
The first or last field in a sequence.
Definition: yadif.h:47
ff_yadif_request_frame
int ff_yadif_request_frame(AVFilterLink *link)
Definition: yadif_common.c:178
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
YADIF_FIELD_NORMAL
@ YADIF_FIELD_NORMAL
A normal field in the middle of a sequence.
Definition: yadif.h:48
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
YADIFContext::current_field
int current_field
YADIFCurrentField.
Definition: yadif.h:88
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
BLOCKX
#define BLOCKX
Definition: vf_bwdif_cuda.c:50
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
hwcontext.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
deint_cuda_uninit
static av_cold void deint_cuda_uninit(AVFilterContext *ctx)
Definition: vf_bwdif_cuda.c:198
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
DeintCUDAContext::cu_func_ushort
CUfunction cu_func_ushort
Definition: vf_bwdif_cuda.c:44
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:285
YADIFContext::cur
AVFrame * cur
Definition: yadif.h:60
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
deint_cuda_outputs
static const AVFilterPad deint_cuda_outputs[]
Definition: vf_bwdif_cuda.c:355
ff_yadif_filter_frame
int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: yadif_common.c:100
ff_filter_init_hw_frames
int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link, int default_pool_size)
Perform any additional setup required for hardware frames.
Definition: avfilter.c:1547