Go to the documentation of this file.
36 #define OFFSET(x) offsetof(TInterlaceContext, x)
37 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
77 #define FULL_SCALE_YUVJ_FORMATS \
78 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
105 ptrdiff_t mref, ptrdiff_t pref,
int clip_max)
107 const uint8_t *srcp_above = srcp + mref;
108 const uint8_t *srcp_below = srcp + pref;
114 dstp[
i] = (1 + srcp[
i] + srcp[
i] + srcp_above[
i] + srcp_below[
i]) >> 2;
119 ptrdiff_t mref, ptrdiff_t pref,
int clip_max)
121 uint16_t *dstp = (uint16_t *)dst8;
122 const uint16_t *srcp = (
const uint16_t *)src8;
123 const uint16_t *srcp_above = srcp + mref / 2;
124 const uint16_t *srcp_below = srcp + pref / 2;
137 ptrdiff_t mref, ptrdiff_t pref,
int clip_max)
139 const uint8_t *srcp_above = srcp + mref;
140 const uint8_t *srcp_below = srcp + pref;
141 const uint8_t *srcp_above2 = srcp + mref * 2;
142 const uint8_t *srcp_below2 = srcp + pref * 2;
143 int i, src_x, src_ab;
148 src_x = srcp[
i] << 1;
149 src_ab = srcp_above[
i] + srcp_below[
i];
151 - srcp_above2[
i] - srcp_below2[
i]) >> 3);
155 if (src_ab > src_x) {
156 if (dstp[
i] < srcp[
i])
158 }
else if (dstp[
i] > srcp[
i])
164 ptrdiff_t mref, ptrdiff_t pref,
int clip_max)
166 uint16_t *dstp = (uint16_t *)dst8;
167 const uint16_t *srcp = (
const uint16_t *)src8;
168 const uint16_t *srcp_above = srcp + mref / 2;
169 const uint16_t *srcp_below = srcp + pref / 2;
170 const uint16_t *srcp_above2 = srcp + mref;
171 const uint16_t *srcp_below2 = srcp + pref;
172 int i, dst_le, src_le, src_x, src_ab;
180 dst_le =
av_clip((4 + ((src_le + src_x + src_ab) << 1)
186 if (src_ab > src_x) {
191 }
else if (dst_le > src_le) {
215 tinterlace->
vsub =
desc->log2_chroma_h;
224 uint8_t black[4] = { 0, 0, 0, 16 };
231 outlink->
w, outlink->
h, outlink->
format, 16);
293 #define FIELD_UPPER 0
294 #define FIELD_LOWER 1
295 #define FIELD_UPPER_AND_LOWER 2
308 uint8_t *dst[4],
int dst_linesize[4],
309 const uint8_t *
src[4],
int src_linesize[4],
316 int plane, vsub =
desc->log2_chroma_h;
320 for (plane = 0; plane <
desc->nb_components; plane++) {
321 int lines = plane == 1 || plane == 2 ?
AV_CEIL_RSHIFT(src_h, vsub) : src_h;
323 uint8_t *dstp = dst[plane];
324 const uint8_t *srcp =
src[plane];
325 int srcp_linesize = src_linesize[plane] * k;
326 int dstp_linesize = dst_linesize[plane] * (
interleave ? 2 : 1);
327 int clip_max = (1 << tinterlace->
csp->
comp[plane].
depth) - 1;
331 srcp += src_linesize[plane];
333 dstp += dst_linesize[plane];
339 for (
h = lines;
h > 0;
h--) {
340 ptrdiff_t pref = src_linesize[plane];
341 ptrdiff_t mref = -pref;
342 if (
h >= (lines - x)) mref = 0;
343 else if (
h <= (1 + x)) pref = 0;
345 tinterlace->
lowpass_line(dstp, cols, srcp, mref, pref, clip_max);
346 dstp += dstp_linesize;
347 srcp += srcp_linesize;
366 tinterlace->
cur = tinterlace->
next;
367 tinterlace->
next = picref;
369 cur = tinterlace->
cur;
370 next = tinterlace->
next;
372 if (!tinterlace->
cur)
375 switch (tinterlace->
mode) {
384 out->height = outlink->
h;
385 out->interlaced_frame = 1;
386 out->top_field_first = 1;
417 out->height = outlink->
h;
439 "video is already interlaced, adjusting framerate only\n");
452 out->interlaced_frame = 1;
453 out->top_field_first = tff;
463 (
const uint8_t **)next->data, next->linesize,
474 out->interlaced_frame = 1;
483 tff = next->top_field_first;
488 out->interlaced_frame = 1;
489 out->top_field_first = !tff;
492 out->pts = cur->
pts + next->pts;
503 (
const uint8_t **)next->data, next->linesize,
551 .
name =
"tinterlace",
558 .priv_class = &tinterlace_class,
571 .priv_class = &interlace_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
static int init_interlace(AVFilterContext *ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define TINTERLACE_FLAG_BYPASS_IL
static const AVRational standard_tbs[]
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define FILTER_PIXFMTS_ARRAY(array)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void(* lowpass_line)(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp, ptrdiff_t mref, ptrdiff_t pref, int clip_max)
static void lowpass_line_complex_c(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp, ptrdiff_t mref, ptrdiff_t pref, int clip_max)
#define FIELD_UPPER_AND_LOWER
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
int depth
Number of bits in the component.
#define AV_LOG_VERBOSE
Detailed information.
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
uint8_t * black_data[4]
buffer used to fill padded lines
const char * name
Filter name.
A link between two filters.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
int vsub
chroma vertical subsampling
AVRational preout_time_base
@ AV_PIX_FMT_YUV420P12LE
planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
static int config_out_props(AVFilterLink *outlink)
static const AVFilterPad tinterlace_inputs[]
A filter pad used for either input or output.
@ AV_PIX_FMT_YUV420P10LE
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
static av_cold void uninit(AVFilterContext *ctx)
@ AV_PIX_FMT_YUV444P12LE
planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
#define FF_ARRAY_ELEMS(a)
int flags
flags affecting interlacing algorithm
const AVPixFmtDescriptor * csp
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_CEIL_RSHIFT(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
#define av_assert0(cond)
assert() equivalent, that is always enabled.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags)
void ff_tinterlace_init_x86(TInterlaceContext *interlace)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FILTER_INPUTS(array)
const AVFilter ff_vf_tinterlace
@ AV_PIX_FMT_YUV444P10LE
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
@ AV_PIX_FMT_YUVA422P10LE
planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
#define TINTERLACE_FLAG_EXACT_TB
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
static const AVOption tinterlace_options[]
union FFDrawColor::@207 comp[MAX_PLANES]
Rational number (pair of numerator and denominator).
const AVFilter ff_vf_interlace
AVFilterLink ** inputs
array of pointers to input links
int mode
TInterlaceMode, interlace mode selected.
@ AV_PIX_FMT_YUV440P10LE
planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
static void interleave(uint8_t *dst, uint8_t *src, int w, int h, int dst_linesize, int src_linesize, enum FilterMode mode, int swap)
@ AV_PIX_FMT_YUV440P12LE
planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static enum AVPixelFormat pix_fmts[]
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
int format
agreed upon media format
#define TINTERLACE_FLAG_CVLPF
static AVRational av_make_q(int num, int den)
Create an AVRational.
#define AV_NOPTS_VALUE
Undefined timestamp value.
void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color, uint8_t *dst[], int dst_linesize[], int dst_x, int dst_y, int w, int h)
Fill a rectangle with an uniform color.
AVFilterContext * src
source filter
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define FULL_SCALE_YUVJ_FORMATS
static void copy_picture_field(TInterlaceContext *tinterlace, uint8_t *dst[4], int dst_linesize[4], const uint8_t *src[4], int src_linesize[4], enum AVPixelFormat format, int w, int src_h, int src_field, int interleave, int dst_field, int flags)
Copy picture field from src to dst.
@ AV_PIX_FMT_YUVA420P10LE
planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
int interlaced_frame
The content of the picture is interlaced.
#define i(width, name, range_min, range_max)
int w
agreed upon image width
const char * name
Pad name.
void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4])
Prepare a color.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
int lowpass
legacy interlace filter lowpass mode
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
int h
agreed upon image height
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void lowpass_line_c(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp, ptrdiff_t mref, ptrdiff_t pref, int clip_max)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static enum AVPixelFormat full_scale_yuvj_pix_fmts[]
int64_t frame_count_in
Number of past frames sent through the link.
@ AV_PIX_FMT_YUVA444P10LE
planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
static void lowpass_line_complex_c_16(uint8_t *dst8, ptrdiff_t width, const uint8_t *src8, ptrdiff_t mref, ptrdiff_t pref, int clip_max)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define TINTERLACE_FLAG_VLPF
static const AVOption interlace_options[]
#define flags(name, subs,...)
static void lowpass_line_c_16(uint8_t *dst8, ptrdiff_t width, const uint8_t *src8, ptrdiff_t mref, ptrdiff_t pref, int clip_max)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
@ AV_PIX_FMT_YUV422P12LE
planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
AVFILTER_DEFINE_CLASS(tinterlace)
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
static const AVFilterPad tinterlace_outputs[]