40 { 36, 68, 60, 92, 34, 66, 58, 90, },
41 { 100, 4, 124, 28, 98, 2, 122, 26, },
42 { 52, 84, 44, 76, 50, 82, 42, 74, },
43 { 116, 20, 108, 12, 114, 18, 106, 10, },
44 { 32, 64, 56, 88, 38, 70, 62, 94, },
45 { 96, 0, 120, 24, 102, 6, 126, 30, },
46 { 48, 80, 40, 72, 54, 86, 46, 78, },
47 { 112, 16, 104, 8, 118, 22, 110, 14, },
48 { 36, 68, 60, 92, 34, 66, 58, 90, },
52 64, 64, 64, 64, 64, 64, 64, 64
59 uint8_t *ptr = plane + stride * y;
60 for (i = 0; i <
height; i++) {
61 memset(ptr, val, width);
68 const int32_t *filterPos,
int filterSize)
73 const uint16_t *
src = (
const uint16_t *) _src;
80 for (i = 0; i < dstW; i++) {
82 int srcPos = filterPos[i];
85 for (j = 0; j < filterSize; j++) {
86 val += src[srcPos + j] * filter[filterSize * i + j];
89 dst[i] =
FFMIN(val >> sh, (1 << 19) - 1);
95 const int32_t *filterPos,
int filterSize)
99 const uint16_t *
src = (
const uint16_t *) _src;
105 for (i = 0; i < dstW; i++) {
107 int srcPos = filterPos[i];
110 for (j = 0; j < filterSize; j++) {
111 val += src[srcPos + j] * filter[filterSize * i + j];
114 dst[i] =
FFMIN(val >> sh, (1 << 15) - 1);
121 const int32_t *filterPos,
int filterSize)
124 for (i = 0; i < dstW; i++) {
126 int srcPos = filterPos[i];
128 for (j = 0; j < filterSize; j++) {
129 val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
131 dst[i] =
FFMIN(val >> 7, (1 << 15) - 1);
137 const int32_t *filterPos,
int filterSize)
141 for (i = 0; i < dstW; i++) {
143 int srcPos = filterPos[i];
145 for (j = 0; j < filterSize; j++) {
146 val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
148 dst[i] =
FFMIN(val >> 3, (1 << 19) - 1);
157 for (i = 0; i <
width; i++) {
158 dstU[i] = (
FFMIN(dstU[i], 30775) * 4663 - 9289992) >> 12;
159 dstV[i] = (
FFMIN(dstV[i], 30775) * 4663 - 9289992) >> 12;
166 for (i = 0; i <
width; i++) {
167 dstU[i] = (dstU[i] * 1799 + 4081085) >> 11;
168 dstV[i] = (dstV[i] * 1799 + 4081085) >> 11;
175 for (i = 0; i <
width; i++)
176 dst[i] = (
FFMIN(dst[i], 30189) * 19077 - 39057361) >> 14;
182 for (i = 0; i <
width; i++)
183 dst[i] = (dst[i] * 14071 + 33561947) >> 14;
191 for (i = 0; i <
width; i++) {
192 dstU[i] = (
FFMIN(dstU[i], 30775 << 4) * 4663 - (9289992 << 4)) >> 12;
193 dstV[i] = (
FFMIN(dstV[i], 30775 << 4) * 4663 - (9289992 << 4)) >> 12;
202 for (i = 0; i <
width; i++) {
203 dstU[i] = (dstU[i] * 1799 + (4081085 << 4)) >> 11;
204 dstV[i] = (dstV[i] * 1799 + (4081085 << 4)) >> 11;
212 for (i = 0; i <
width; i++) {
213 dst[i] = ((int)(
FFMIN(dst[i], 30189 << 4) * 4769
U - (39057361 << 2))) >> 12;
221 for (i = 0; i <
width; i++)
222 dst[i] = (dst[i]*(14071/4) + (33561947<<4)/4)>>12;
226 #define DEBUG_SWSCALE_BUFFERS 0
227 #define DEBUG_BUFFERS(...) \
228 if (DEBUG_SWSCALE_BUFFERS) \
229 av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
232 int srcStride[],
int srcSliceY,
233 int srcSliceH,
uint8_t *dst[],
int dstStride[])
237 const int dstW = c->
dstW;
238 const int dstH = c->
dstH;
271 int chrStart = lumEnd;
295 srcStride[3] = srcStride[0];
300 DEBUG_BUFFERS(
"swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
301 src[0], srcStride[0], src[1], srcStride[1],
302 src[2], srcStride[2], src[3], srcStride[3],
303 dst[0], dstStride[0], dst[1], dstStride[1],
304 dst[2], dstStride[2], dst[3], dstStride[3]);
305 DEBUG_BUFFERS(
"srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
306 srcSliceY, srcSliceH, dstY, dstH);
308 vLumFilterSize, vChrFilterSize);
310 if (dstStride[0]&15 || dstStride[1]&15 ||
311 dstStride[2]&15 || dstStride[3]&15) {
312 static int warnedAlready = 0;
315 "Warning: dstStride is not aligned!\n"
316 " ->cannot do aligned memory accesses anymore\n");
321 if ( (uintptr_t)dst[0]&15 || (uintptr_t)dst[1]&15 || (uintptr_t)dst[2]&15
322 || (uintptr_t)src[0]&15 || (uintptr_t)src[1]&15 || (uintptr_t)src[2]&15
323 || dstStride[0]&15 || dstStride[1]&15 || dstStride[2]&15 || dstStride[3]&15
324 || srcStride[0]&15 || srcStride[1]&15 || srcStride[2]&15 || srcStride[3]&15
326 static int warnedAlready=0;
337 if (srcSliceY == 0) {
345 if (!should_dither) {
354 srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
359 if (srcSliceY == 0) {
369 hout_slice->
width = dstW;
372 for (; dstY < dstH; dstY++) {
377 const int firstLumSrcY =
FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
380 const int firstChrSrcY =
FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
383 int lastLumSrcY =
FFMIN(c->
srcH, firstLumSrcY + vLumFilterSize) - 1;
384 int lastLumSrcY2 =
FFMIN(c->
srcH, firstLumSrcY2 + vLumFilterSize) - 1;
385 int lastChrSrcY =
FFMIN(c->
chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
389 int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
392 if (firstLumSrcY > lastInLumBuf) {
394 hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
402 lastInLumBuf = firstLumSrcY - 1;
404 if (firstChrSrcY > lastInChrBuf) {
406 hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
414 lastInChrBuf = firstChrSrcY - 1;
418 DEBUG_BUFFERS(
"\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
419 firstLumSrcY, lastLumSrcY, lastInLumBuf);
420 DEBUG_BUFFERS(
"\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
421 firstChrSrcY, lastChrSrcY, lastInChrBuf);
424 enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH &&
428 lastLumSrcY = srcSliceY + srcSliceH - 1;
429 lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
430 DEBUG_BUFFERS(
"buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
431 lastLumSrcY, lastChrSrcY);
439 if (posY <= lastLumSrcY && !hasLumHoles) {
440 firstPosY =
FFMAX(firstLumSrcY, posY);
443 firstPosY = lastInLumBuf + 1;
444 lastPosY = lastLumSrcY;
448 if (cPosY <= lastChrSrcY && !hasChrHoles) {
449 firstCPosY =
FFMAX(firstChrSrcY, cPosY);
452 firstCPosY = lastInChrBuf + 1;
453 lastCPosY = lastChrSrcY;
458 if (posY < lastLumSrcY + 1) {
459 for (i = lumStart; i < lumEnd; ++i)
460 desc[i].
process(c, &desc[i], firstPosY, lastPosY - firstPosY + 1);
463 lumBufIndex += lastLumSrcY - lastInLumBuf;
464 lastInLumBuf = lastLumSrcY;
466 if (cPosY < lastChrSrcY + 1) {
467 for (i = chrStart; i < chrEnd; ++i)
468 desc[i].
process(c, &desc[i], firstCPosY, lastCPosY - firstCPosY + 1);
471 chrBufIndex += lastChrSrcY - lastInChrBuf;
472 lastInChrBuf = lastChrSrcY;
475 if (lumBufIndex >= vLumFilterSize)
476 lumBufIndex -= vLumFilterSize;
477 if (chrBufIndex >= vChrFilterSize)
478 chrBufIndex -= vChrFilterSize;
484 lastInLumBuf, lastInChrBuf);
490 if (dstY >= dstH - 2) {
494 &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
497 yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
501 for (i = vStart; i < vEnd; ++i)
502 desc[i].
process(c, &desc[i], dstY, 1);
507 int height = dstY - lastDstY;
511 fillPlane16(dst[3], dstStride[3], length, height, lastDstY,
515 fillPlane(dst[3], dstStride[3], length, height, lastDstY, 255);
518 #if HAVE_MMXEXT_INLINE
520 __asm__
volatile (
"sfence" :::
"memory");
531 return dstY - lastDstY;
613 src[3] = src[2] =
NULL;
621 const int linesizes[4])
628 for (i = 0; i < 4; i++) {
630 if (!data[plane] || !linesizes[plane])
643 for (yp=0; yp<
h; yp++) {
644 for (xp=0; xp+2<
stride; xp+=3) {
645 int x, y, z,
r,
g,
b;
673 r = av_clip_uintp2(r, 12);
674 g = av_clip_uintp2(g, 12);
675 b = av_clip_uintp2(b, 12);
699 for (yp=0; yp<
h; yp++) {
700 for (xp=0; xp+2<
stride; xp+=3) {
701 int x, y, z,
r,
g,
b;
729 x = av_clip_uintp2(x, 12);
730 y = av_clip_uintp2(y, 12);
731 z = av_clip_uintp2(z, 12);
754 const uint8_t *
const srcSlice[],
755 const int srcStride[],
int srcSliceY,
756 int srcSliceH,
uint8_t *
const dst[],
757 const int dstStride[])
765 if (!srcStride || !dstStride || !dst || !srcSlice) {
766 av_log(c,
AV_LOG_ERROR,
"One of the input parameters to sws_scale() is NULL, please check the calling code\n");
770 if ((srcSliceY & (macro_height-1)) ||
771 ((srcSliceH& (macro_height-1)) && srcSliceY + srcSliceH != c->
srcH) ||
772 srcSliceY + srcSliceH > c->
srcH) {
773 av_log(c,
AV_LOG_ERROR,
"Slice parameters %d, %d are invalid\n", srcSliceY, srcSliceH);
781 srcSlice, srcStride, srcSliceY, srcSliceH,
805 srcSlice, srcStride, srcSliceY, srcSliceH,
815 memcpy(src2, srcSlice,
sizeof(src2));
816 memcpy(dst2, dst,
sizeof(dst2));
831 if (c->
sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->
srcH) {
840 for (i = 0; i < 256; i++) {
841 int r,
g,
b, y,
u, v,
a = 0xff;
843 uint32_t p = ((
const uint32_t *)(srcSlice[1]))[i];
844 a = (p >> 24) & 0xFF;
845 r = (p >> 16) & 0xFF;
850 g = ((i >> 2) & 7) * 36;
854 g = ((i >> 3) & 7) * 36;
857 r = ( i >> 3 ) * 255;
858 g = ((i >> 1) & 3) * 85;
864 b = ( i >> 3 ) * 255;
865 g = ((i >> 1) & 3) * 85;
868 #define RGB2YUV_SHIFT 15
869 #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
870 #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
871 #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
872 #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
873 #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
874 #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
875 #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
876 #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
877 #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
882 c->
pal_yuv[i]= y + (u<<8) + (v<<16) + ((unsigned)a<<24);
889 c->
pal_rgb[i]= r + (g<<8) + (b<<16) + ((unsigned)a<<24);
895 c->
pal_rgb[i]= a + (r<<8) + (g<<16) + ((unsigned)b<<24);
901 c->
pal_rgb[i]= a + (b<<8) + (g<<16) + ((unsigned)r<<24);
908 c->
pal_rgb[i]= b + (g<<8) + (r<<16) + ((unsigned)a<<24);
920 base = srcStride[0] < 0 ? rgb0_tmp - srcStride[0] * (srcSliceH-1) : rgb0_tmp;
921 for (y=0; y<srcSliceH; y++){
922 memcpy(base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*c->
srcW);
923 for (x=c->
src0Alpha-1; x<4*c->srcW; x+=4) {
924 base[ srcStride[0]*y + x] = 0xFF;
936 base = srcStride[0] < 0 ? rgb0_tmp - srcStride[0] * (srcSliceH-1) : rgb0_tmp;
938 xyz12Torgb48(c, (uint16_t*)base, (
const uint16_t*)src2[0], srcStride[0]/2, srcSliceH);
943 for (i = 0; i < 4; i++)
950 int srcStride2[4] = { srcStride[0], srcStride[1], srcStride[2],
952 int dstStride2[4] = { dstStride[0], dstStride[1], dstStride[2],
959 if (srcSliceY + srcSliceH == c->
srcH)
962 ret = c->
swscale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2,
966 int srcStride2[4] = { -srcStride[0], -srcStride[1], -srcStride[2],
968 int dstStride2[4] = { -dstStride[0], -dstStride[1], -dstStride[2],
971 src2[0] += (srcSliceH - 1) * srcStride[0];
975 src2[3] += (srcSliceH - 1) * srcStride[3];
976 dst2[0] += ( c->
dstH - 1) * dstStride[0];
979 dst2[3] += ( c->
dstH - 1) * dstStride[3];
988 ret = c->
swscale(c, src2, srcStride2, c->
srcH-srcSliceY-srcSliceH,
989 srcSliceH, dst2, dstStride2);
995 rgb48Toxyz12(c, (uint16_t*)dst2[0], (
const uint16_t*)dst2[0], dstStride[0]/2, ret);
int plane
Which of the 4 planes contains the component.
const char const char void * val
int chrBufIndex
Index in ring buffer of the last scaled horizontal chroma line from source.
static void lumRangeToJpeg_c(int16_t *dst, int width)
void(* hcscale_fast)(struct SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
static enum AVPixelFormat pix_fmt
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
int chrSrcH
Height of source chroma planes.
ptrdiff_t const GLvoid * data
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
#define AV_LOG_WARNING
Something somehow does not look correct.
packed RGB 8:8:8, 24bpp, RGBRGB...
int ff_init_slice_from_src(SwsSlice *s, uint8_t *src[4], int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
int vChrDrop
Binary logarithm of extra vertical subsampling factor in source image chroma planes specified by user...
static void lumRangeToJpeg16_c(int16_t *_dst, int width)
Struct which holds all necessary data for processing a slice.
#define DECLARE_ALIGNED(n, t, v)
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
int srcRange
0 = MPG YUV range, 1 = JPG YUV range (source image).
const uint8_t * lumDither8
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
int dstY
Last destination vertical line output from last slice.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
void ff_sws_init_input_funcs(SwsContext *c)
int srcH
Height of source luma/alpha planes.
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
static void rgb48Toxyz12(struct SwsContext *c, uint16_t *dst, const uint16_t *src, int stride, int h)
static void hScale16To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int chrDstVSubSample
Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in destination i...
void(* yuv2interleavedX_fn)(struct SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static void lumRangeFromJpeg_c(int16_t *dst, int width)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
8 bits with AV_PIX_FMT_RGB32 palette
int vChrFilterSize
Vertical filter size for chroma pixels.
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
int cascaded_tmpStride[4]
av_cold void ff_sws_init_swscale_x86(SwsContext *c)
#define SWS_FAST_BILINEAR
int lastInLumBuf
Last scaled horizontal luma/alpha line from source in the ring buffer.
int16_t rgb2xyz_matrix[3][4]
enum AVPixelFormat dstFormat
Destination pixel format.
yuv2packedX_fn yuv2packedX
void ff_init_vscale_pfn(SwsContext *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
av_cold void ff_sws_init_swscale_aarch64(SwsContext *c)
void(* lumConvertRange)(int16_t *dst, int width)
Color range conversion function for luma plane if needed.
int32_t * vChrFilterPos
Array of vertical filter starting positions for each dst[i] for chroma planes.
#define DEBUG_BUFFERS(...)
int dstH
Height of destination luma/alpha planes.
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PIX_FMT_BGR32_1
SwsFunc ff_getSwsFunc(SwsContext *c)
Return function pointer to fastest main scaler path function depending on architecture and available ...
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width)
yuv2packed1_fn yuv2packed1
simple assert() macros that are a bit more flexible than ISO C assert().
static void hScale16To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
void ff_hcscale_fast_c(SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
uint8_t * cascaded1_tmp[4]
static av_cold void sws_init_swscale(SwsContext *c)
SwsPlane plane[MAX_SLICE_PLANES]
color planes
void(* chrConvertRange)(int16_t *dst1, int16_t *dst2, int width)
Color range conversion function for chroma planes if needed.
int sliceH
number of lines
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
static void xyz12Torgb48(struct SwsContext *c, uint16_t *dst, const uint16_t *src, int stride, int h)
av_cold void ff_sws_init_swscale_ppc(SwsContext *c)
int dstRange
0 = MPG YUV range, 1 = JPG YUV range (destination image).
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
static av_always_inline int is9_OR_10BPS(enum AVPixelFormat pix_fmt)
yuv2planar1_fn yuv2plane1
yuv2interleavedX_fn yuv2nv12cX
int(* process)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
void(* hcScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int available_lines
max number of lines that can be hold by this plane
packed RGB 8:8:8, 24bpp, BGRBGR...
av_cold void ff_sws_init_range_convert(SwsContext *c)
struct SwsFilterDescriptor * desc
int dstW
Width of destination luma/alpha planes.
uint8_t * cascaded_tmp[4]
int sliceDir
Direction that slices are fed to the scaler (1 = top-to-bottom, -1 = bottom-to-top).
int cascaded1_tmpStride[4]
int needs_hcscale
Set if there are chroma planes to be converted.
int32_t * vLumFilterPos
Array of vertical filter starting positions for each dst[i] for luma/alpha planes.
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
int width
Slice line width.
int16_t xyz2rgb_matrix[3][4]
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
yuv2planarX_fn yuv2planeX
const uint8_t ff_dither_8x8_128[9][8]
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
static void lumRangeFromJpeg16_c(int16_t *_dst, int width)
static const char * format
int(* SwsFunc)(struct SwsContext *context, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
void ff_hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
int vLumFilterSize
Vertical filter size for luma/alpha pixels.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
const uint8_t * chrDither8
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
int lumBufIndex
Index in ring buffer of the last scaled horizontal luma/alpha line from source.
static int swscale(SwsContext *c, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
int lastInChrBuf
Last scaled horizontal chroma line from source in the ring buffer.
GLint GLenum GLboolean GLsizei stride
yuv2packed2_fn yuv2packed2
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
enum AVPixelFormat srcFormat
Source pixel format.
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
void(* hyscale_fast)(struct SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
Scale one horizontal line of input data using a bilinear filter to produce one line of output data...
struct SwsContext * cascaded_context[3]
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
SwsFunc swscale
Note that src, dst, srcStride, dstStride will be copied in the sws_scale() wrapper so they can be fre...
void ff_updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex, int lastInLumBuf, int lastInChrBuf)
#define AV_PIX_FMT_RGB32_1
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
static const uint8_t sws_pb_64[8]
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
static void hScale8To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
int sliceY
index of first line
int depth
Number of bits in the component.
int srcW
Width of source luma/alpha planes.
int chrSrcVSubSample
Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in source image...
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
AVPixelFormat
Pixel format.
void(* hyScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Scale one horizontal line of input data using a filter over the input lines, to produce one (differen...
av_cold void ff_sws_init_swscale_arm(SwsContext *c)
#define AV_CEIL_RSHIFT(a, b)
static void hScale8To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)