44     0x0200020002000200LL,};
 
   48     0x0004000400040004LL,};
 
   61 DECLARE_ALIGNED(8, 
const uint64_t, ff_bgr2YOffset)  = 0x1010101010101010ULL;
 
   62 DECLARE_ALIGNED(8, 
const uint64_t, ff_bgr2UVOffset) = 0x8080808080808080ULL;
 
   69 #define COMPILE_TEMPLATE_MMXEXT 0 
   70 #define RENAME(a) a ## _mmx 
   75 #if HAVE_MMXEXT_INLINE 
   77 #undef COMPILE_TEMPLATE_MMXEXT 
   78 #define COMPILE_TEMPLATE_MMXEXT 1 
   79 #define RENAME(a) a ## _mmxext 
  110     const int firstLumSrcY= vLumFilterPos[
dstY]; 
 
  111     const int firstChrSrcY= vChrFilterPos[chrDstY]; 
 
  119     if (dstY < dstH - 2) {
 
  121         const int16_t **lumSrcPtr  = (
const int16_t **)(
void*) lumPlane->
line + firstLumSrcY - lumPlane->
sliceY;
 
  122         const int16_t **chrUSrcPtr = (
const int16_t **)(
void*) chrUPlane->
line + firstChrSrcY - chrUPlane->
sliceY;
 
  123         const int16_t **alpSrcPtr  = (CONFIG_SWSCALE_ALPHA && c->
alpPixBuf) ? (
const int16_t **)(
void*) alpPlane->
line + firstLumSrcY - alpPlane->
sliceY : 
NULL;
 
  125         const int16_t **lumSrcPtr= (
const int16_t **)(
void*) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
 
  126         const int16_t **chrUSrcPtr= (
const int16_t **)(
void*) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + 
vChrBufSize;
 
  127         const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && 
alpPixBuf) ? (
const int16_t **)(
void*) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : 
NULL;
 
  130         if (firstLumSrcY < 0 || firstLumSrcY + vLumFilterSize > c->
srcH) {
 
  132             const int16_t **tmpY = (
const int16_t **) lumPlane->
tmp;
 
  134             const int16_t **tmpY = (
const int16_t **) lumPixBuf + 2 * 
vLumBufSize;
 
  136             int neg = -firstLumSrcY, i, 
end = 
FFMIN(c->
srcH - firstLumSrcY, vLumFilterSize);
 
  137             for (i = 0; i < neg;            i++)
 
  138                 tmpY[i] = lumSrcPtr[neg];
 
  139             for (     ; i < 
end;            i++)
 
  140                 tmpY[i] = lumSrcPtr[i];
 
  147                 const int16_t **tmpA = (
const int16_t **) alpPlane->
tmp;
 
  149                 const int16_t **tmpA = (
const int16_t **) alpPixBuf + 2 * 
vLumBufSize;
 
  151                 for (i = 0; i < neg;            i++)
 
  152                     tmpA[i] = alpSrcPtr[neg];
 
  153                 for (     ; i < 
end;            i++)
 
  154                     tmpA[i] = alpSrcPtr[i];
 
  156                     tmpA[i] = tmpA[i - 1];
 
  160         if (firstChrSrcY < 0 || firstChrSrcY + vChrFilterSize > c->
chrSrcH) {
 
  162             const int16_t **tmpU = (
const int16_t **) chrUPlane->
tmp;
 
  164             const int16_t **tmpU = (
const int16_t **) chrUPixBuf + 2 * 
vChrBufSize;
 
  166             int neg = -firstChrSrcY, i, end = 
FFMIN(c->
chrSrcH - firstChrSrcY, vChrFilterSize);
 
  167             for (i = 0; i < neg;            i++) {
 
  168                 tmpU[i] = chrUSrcPtr[neg];
 
  170             for (     ; i < 
end;            i++) {
 
  171                 tmpU[i] = chrUSrcPtr[i];
 
  174                 tmpU[i] = tmpU[i - 1];
 
  182                 *(
const void**)&lumMmxFilter[s*i              ]= lumSrcPtr[i  ];
 
  183                 *(
const void**)&lumMmxFilter[s*i+
APCK_PTR2/4  ]= lumSrcPtr[i+(vLumFilterSize>1)];
 
  185                 lumMmxFilter[s*i+
APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i    ]
 
  186                 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
 
  187                 if (CONFIG_SWSCALE_ALPHA && hasAlpha) {
 
  188                     *(
const void**)&alpMmxFilter[s*i              ]= alpSrcPtr[i  ];
 
  189                     *(
const void**)&alpMmxFilter[s*i+
APCK_PTR2/4  ]= alpSrcPtr[i+(vLumFilterSize>1)];
 
  195                 *(
const void**)&chrMmxFilter[s*i              ]= chrUSrcPtr[i  ];
 
  196                 *(
const void**)&chrMmxFilter[s*i+
APCK_PTR2/4  ]= chrUSrcPtr[i+(vChrFilterSize>1)];
 
  198                 chrMmxFilter[s*i+
APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i    ]
 
  199                 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
 
  203                 *(
const void**)&lumMmxFilter[4*i+0]= lumSrcPtr[i];
 
  206                 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001
U;
 
  207                 if (CONFIG_SWSCALE_ALPHA && hasAlpha) {
 
  208                     *(
const void**)&alpMmxFilter[4*i+0]= alpSrcPtr[i];
 
  210                     alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
 
  214                 *(
const void**)&chrMmxFilter[4*i+0]= chrUSrcPtr[i];
 
  217                 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001
U;
 
  224 static void yuv2yuvX_sse3(
const int16_t *
filter, 
int filterSize,
 
  228     if(((uintptr_t)dest) & 15){
 
  229         yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset);
 
  233 #define MAIN_FUNCTION \ 
  234         "pxor       %%xmm0, %%xmm0 \n\t" \ 
  235         "punpcklbw  %%xmm0, %%xmm3 \n\t" \ 
  236         "movd           %4, %%xmm1 \n\t" \ 
  237         "punpcklwd  %%xmm1, %%xmm1 \n\t" \ 
  238         "punpckldq  %%xmm1, %%xmm1 \n\t" \ 
  239         "punpcklqdq %%xmm1, %%xmm1 \n\t" \ 
  240         "psllw          $3, %%xmm1 \n\t" \ 
  241         "paddw      %%xmm1, %%xmm3 \n\t" \ 
  242         "psraw          $4, %%xmm3 \n\t" \ 
  243         "movdqa     %%xmm3, %%xmm4 \n\t" \ 
  244         "movdqa     %%xmm3, %%xmm7 \n\t" \ 
  245         "movl           %3, %%ecx  \n\t" \ 
  246         "mov                                 %0, %%"REG_d"  \n\t"\ 
  247         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\ 
  250         "movddup                  8(%%"REG_d"), %%xmm0      \n\t" \ 
  251         "movdqa              (%%"REG_S", %%"REG_c", 2), %%xmm2      \n\t" \ 
  252         "movdqa            16(%%"REG_S", %%"REG_c", 2), %%xmm5      \n\t" \ 
  253         "add                                $16, %%"REG_d"  \n\t"\ 
  254         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\ 
  255         "test                         %%"REG_S", %%"REG_S"  \n\t"\ 
  256         "pmulhw                           %%xmm0, %%xmm2      \n\t"\ 
  257         "pmulhw                           %%xmm0, %%xmm5      \n\t"\ 
  258         "paddw                            %%xmm2, %%xmm3      \n\t"\ 
  259         "paddw                            %%xmm5, %%xmm4      \n\t"\ 
  261         "psraw                               $3, %%xmm3      \n\t"\ 
  262         "psraw                               $3, %%xmm4      \n\t"\ 
  263         "packuswb                         %%xmm4, %%xmm3      \n\t"\ 
  264         "movntdq                          %%xmm3, (%1, %%"REG_c")\n\t"\ 
  265         "add                         $16, %%"REG_c"         \n\t"\ 
  266         "cmp                          %2, %%"REG_c"         \n\t"\ 
  267         "movdqa                   %%xmm7, %%xmm3            \n\t" \ 
  268         "movdqa                   %%xmm7, %%xmm4            \n\t" \ 
  269         "mov                                 %0, %%"REG_d"  \n\t"\ 
  270         "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\ 
  275             "movq          %5, %%xmm3  \n\t" 
  276             "movdqa    %%xmm3, %%xmm4  \n\t" 
  277             "psrlq        $24, %%xmm3  \n\t" 
  278             "psllq        $40, %%xmm4  \n\t" 
  279             "por       %%xmm4, %%xmm3  \n\t" 
  282               "r" (dest-offset), 
"g" ((
x86_reg)(dstW+offset)), 
"m" (offset),
 
  283               "m"(filterSize), 
"m"(((uint64_t *) 
dither)[0])
 
  284               : 
XMM_CLOBBERS(
"%xmm0" , 
"%xmm1" , 
"%xmm2" , 
"%xmm3" , 
"%xmm4" , 
"%xmm5" , 
"%xmm7" ,)
 
  285                 "%"REG_d, 
"%"REG_S, 
"%"REG_c
 
  289             "movq          %5, %%xmm3   \n\t" 
  292               "r" (dest-offset), 
"g" ((
x86_reg)(dstW+offset)), 
"m" (offset),
 
  293               "m"(filterSize), 
"m"(((uint64_t *) 
dither)[0])
 
  294               : 
XMM_CLOBBERS(
"%xmm0" , 
"%xmm1" , 
"%xmm2" , 
"%xmm3" , 
"%xmm4" , 
"%xmm5" , 
"%xmm7" ,)
 
  295                 "%"REG_d, 
"%"REG_S, 
"%"REG_c
 
  303 #define SCALE_FUNC(filter_n, from_bpc, to_bpc, opt) \ 
  304 void ff_hscale ## from_bpc ## to ## to_bpc ## _ ## filter_n ## _ ## opt( \ 
  305                                                 SwsContext *c, int16_t *data, \ 
  306                                                 int dstW, const uint8_t *src, \ 
  307                                                 const int16_t *filter, \ 
  308                                                 const int32_t *filterPos, int filterSize) 
  310 #define SCALE_FUNCS(filter_n, opt) \ 
  311     SCALE_FUNC(filter_n,  8, 15, opt); \ 
  312     SCALE_FUNC(filter_n,  9, 15, opt); \ 
  313     SCALE_FUNC(filter_n, 10, 15, opt); \ 
  314     SCALE_FUNC(filter_n, 12, 15, opt); \ 
  315     SCALE_FUNC(filter_n, 14, 15, opt); \ 
  316     SCALE_FUNC(filter_n, 16, 15, opt); \ 
  317     SCALE_FUNC(filter_n,  8, 19, opt); \ 
  318     SCALE_FUNC(filter_n,  9, 19, opt); \ 
  319     SCALE_FUNC(filter_n, 10, 19, opt); \ 
  320     SCALE_FUNC(filter_n, 12, 19, opt); \ 
  321     SCALE_FUNC(filter_n, 14, 19, opt); \ 
  322     SCALE_FUNC(filter_n, 16, 19, opt) 
  324 #define SCALE_FUNCS_MMX(opt) \ 
  325     SCALE_FUNCS(4, opt); \ 
  326     SCALE_FUNCS(8, opt); \ 
  329 #define SCALE_FUNCS_SSE(opt) \ 
  330     SCALE_FUNCS(4, opt); \ 
  331     SCALE_FUNCS(8, opt); \ 
  332     SCALE_FUNCS(X4, opt); \ 
  342 #define VSCALEX_FUNC(size, opt) \ 
  343 void ff_yuv2planeX_ ## size ## _ ## opt(const int16_t *filter, int filterSize, \ 
  344                                         const int16_t **src, uint8_t *dest, int dstW, \ 
  345                                         const uint8_t *dither, int offset) 
  346 #define VSCALEX_FUNCS(opt) \ 
  347     VSCALEX_FUNC(8,  opt); \ 
  348     VSCALEX_FUNC(9,  opt); \ 
  349     VSCALEX_FUNC(10, opt) 
  359 #define VSCALE_FUNC(size, opt) \ 
  360 void ff_yuv2plane1_ ## size ## _ ## opt(const int16_t *src, uint8_t *dst, int dstW, \ 
  361                                         const uint8_t *dither, int offset) 
  362 #define VSCALE_FUNCS(opt1, opt2) \ 
  363     VSCALE_FUNC(8,  opt1); \ 
  364     VSCALE_FUNC(9,  opt2); \ 
  365     VSCALE_FUNC(10, opt2); \ 
  366     VSCALE_FUNC(16, opt1) 
  375 #define INPUT_Y_FUNC(fmt, opt) \ 
  376 void ff_ ## fmt ## ToY_  ## opt(uint8_t *dst, const uint8_t *src, \ 
  377                                 const uint8_t *unused1, const uint8_t *unused2, \ 
  378                                 int w, uint32_t *unused) 
  379 #define INPUT_UV_FUNC(fmt, opt) \ 
  380 void ff_ ## fmt ## ToUV_ ## opt(uint8_t *dstU, uint8_t *dstV, \ 
  381                                 const uint8_t *unused0, \ 
  382                                 const uint8_t *src1, \ 
  383                                 const uint8_t *src2, \ 
  384                                 int w, uint32_t *unused) 
  385 #define INPUT_FUNC(fmt, opt) \ 
  386     INPUT_Y_FUNC(fmt, opt); \ 
  387     INPUT_UV_FUNC(fmt, opt) 
  388 #define INPUT_FUNCS(opt) \ 
  389     INPUT_FUNC(uyvy, opt); \ 
  390     INPUT_FUNC(yuyv, opt); \ 
  391     INPUT_UV_FUNC(nv12, opt); \ 
  392     INPUT_UV_FUNC(nv21, opt); \ 
  393     INPUT_FUNC(rgba, opt); \ 
  394     INPUT_FUNC(bgra, opt); \ 
  395     INPUT_FUNC(argb, opt); \ 
  396     INPUT_FUNC(abgr, opt); \ 
  397     INPUT_FUNC(rgb24, opt); \ 
  398     INPUT_FUNC(bgr24, opt) 
  413         sws_init_swscale_mmx(c);
 
  415 #if HAVE_MMXEXT_INLINE 
  417         sws_init_swscale_mmxext(c);
 
  424 #define ASSIGN_SCALE_FUNC2(hscalefn, filtersize, opt1, opt2) do { \ 
  425     if (c->srcBpc == 8) { \ 
  426         hscalefn = c->dstBpc <= 14 ? ff_hscale8to15_ ## filtersize ## _ ## opt2 : \ 
  427                                      ff_hscale8to19_ ## filtersize ## _ ## opt1; \ 
  428     } else if (c->srcBpc == 9) { \ 
  429         hscalefn = c->dstBpc <= 14 ? ff_hscale9to15_ ## filtersize ## _ ## opt2 : \ 
  430                                      ff_hscale9to19_ ## filtersize ## _ ## opt1; \ 
  431     } else if (c->srcBpc == 10) { \ 
  432         hscalefn = c->dstBpc <= 14 ? ff_hscale10to15_ ## filtersize ## _ ## opt2 : \ 
  433                                      ff_hscale10to19_ ## filtersize ## _ ## opt1; \ 
  434     } else if (c->srcBpc == 12) { \ 
  435         hscalefn = c->dstBpc <= 14 ? ff_hscale12to15_ ## filtersize ## _ ## opt2 : \ 
  436                                      ff_hscale12to19_ ## filtersize ## _ ## opt1; \ 
  437     } else if (c->srcBpc == 14 || ((c->srcFormat==AV_PIX_FMT_PAL8||isAnyRGB(c->srcFormat)) && av_pix_fmt_desc_get(c->srcFormat)->comp[0].depth<16)) { \ 
  438         hscalefn = c->dstBpc <= 14 ? ff_hscale14to15_ ## filtersize ## _ ## opt2 : \ 
  439                                      ff_hscale14to19_ ## filtersize ## _ ## opt1; \ 
  441         av_assert0(c->srcBpc == 16);\ 
  442         hscalefn = c->dstBpc <= 14 ? ff_hscale16to15_ ## filtersize ## _ ## opt2 : \ 
  443                                      ff_hscale16to19_ ## filtersize ## _ ## opt1; \ 
  446 #define ASSIGN_MMX_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \ 
  447     switch (filtersize) { \ 
  448     case 4:  ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \ 
  449     case 8:  ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \ 
  450     default: ASSIGN_SCALE_FUNC2(hscalefn, X, opt1, opt2); break; \ 
  452 #define ASSIGN_VSCALEX_FUNC(vscalefn, opt, do_16_case, condition_8bit) \ 
  454     case 16:                          do_16_case;                          break; \ 
  455     case 10: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_10_ ## opt; break; \ 
  456     case 9:  if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_9_  ## opt; break; \ 
  457     case 8: if ((condition_8bit) && !c->use_mmx_vfilter) vscalefn = ff_yuv2planeX_8_  ## opt; break; \ 
  459 #define ASSIGN_VSCALE_FUNC(vscalefn, opt1, opt2, opt2chk) \ 
  461     case 16: if (!isBE(c->dstFormat))            vscalefn = ff_yuv2plane1_16_ ## opt1; break; \ 
  462     case 10: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_10_ ## opt2; break; \ 
  463     case 9:  if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_9_  ## opt2;  break; \ 
  464     case 8:                                      vscalefn = ff_yuv2plane1_8_  ## opt1;  break; \ 
  465     default: av_assert0(c->dstBpc>8); \ 
  467 #define case_rgb(x, X, opt) \ 
  468         case AV_PIX_FMT_ ## X: \ 
  469             c->lumToYV12 = ff_ ## x ## ToY_ ## opt; \ 
  470             if (!c->chrSrcHSubSample) \ 
  471                 c->chrToYV12 = ff_ ## x ## ToUV_ ## opt; \ 
  513 #define ASSIGN_SSE_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \ 
  514     switch (filtersize) { \ 
  515     case 4:  ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \ 
  516     case 8:  ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \ 
  517     default: if (filtersize & 4) ASSIGN_SCALE_FUNC2(hscalefn, X4, opt1, opt2); \ 
  518              else                ASSIGN_SCALE_FUNC2(hscalefn, X8, opt1, opt2); \ 
  525                             HAVE_ALIGNED_STACK || ARCH_X86_64);
 
  574                             HAVE_ALIGNED_STACK || ARCH_X86_64);
 
  581                             HAVE_ALIGNED_STACK || ARCH_X86_64);
 
#define EXTERNAL_MMX(flags)
 
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 
 
int16_t ** alpPixBuf
Ring buffer for scaled horizontal alpha plane lines to be fed to the vertical scaler. 
 
const uint64_t ff_dither8[2]
 
int chrBufIndex
Index in ring buffer of the last scaled horizontal chroma line from source. 
 
int chrSrcH
Height of source chroma planes. 
 
#define VSCALE_FUNC(size, opt)
 
#define SCALE_FUNCS_MMX(opt)
 
#define DECLARE_ALIGNED(n, t, v)
 
int dstY
Last destination vertical line output from last slice. 
 
#define case_rgb(x, X, opt)
 
Macro definitions for various function/variable attributes. 
 
#define ASSIGN_SSE_SCALE_FUNC(hscalefn, filtersize, opt1, opt2)
 
int srcH
Height of source luma/alpha planes. 
 
#define VSCALE_FUNCS(opt1, opt2)
 
#define EXTERNAL_SSE4(flags)
 
#define DECLARE_ASM_CONST(n, t, v)
 
int chrDstVSubSample
Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in destination i...
 
uint8_t ** line
line buffer 
 
int vChrFilterSize
Vertical filter size for chroma pixels. 
 
static av_cold int end(AVCodecContext *avctx)
 
int16_t ** lumPixBuf
Ring buffer for scaled horizontal luma plane lines to be fed to the vertical scaler. 
 
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext. 
 
void(* lumToYV12)(uint8_t *dst, const uint8_t *src, const uint8_t *src2, const uint8_t *src3, int width, uint32_t *pal)
Unscaled conversion of luma plane to YV12 for horizontal scaler. 
 
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
 
av_cold void ff_sws_init_swscale_x86(SwsContext *c)
 
int lastInLumBuf
Last scaled horizontal luma/alpha line from source in the ring buffer. 
 
enum AVPixelFormat dstFormat
Destination pixel format. 
 
#define EXTERNAL_SSE2(flags)
 
#define VSCALEX_FUNCS(opt)
 
int32_t * vChrFilterPos
Array of vertical filter starting positions for each dst[i] for chroma planes. 
 
int dstH
Height of destination luma/alpha planes. 
 
#define INLINE_MMX(flags)
 
const uint64_t ff_dither4[2]
 
int hLumFilterSize
Horizontal filter size for luma/alpha pixels. 
 
static const uint8_t dither[8][8]
 
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
 
simple assert() macros that are a bit more flexible than ISO C assert(). 
 
int vChrBufSize
Number of vertical chroma lines allocated in the ring buffer. 
 
static const uint8_t offset[127][2]
 
SwsPlane plane[MAX_SLICE_PLANES]
color planes 
 
int32_t alpMmxFilter[4 *MAX_FILTER_SIZE]
 
int hChrFilterSize
Horizontal filter size for chroma pixels. 
 
as above, but U and V bytes are swapped 
 
#define AV_CPU_FLAG_SSE3
Prescott SSE3 functions. 
 
yuv2planar1_fn yuv2plane1
 
int vLumBufSize
Number of vertical luma/alpha lines allocated in the ring buffer. 
 
#define SCALE_FUNCS_SSE(opt)
 
int16_t ** chrUPixBuf
Ring buffer for scaled horizontal chroma plane lines to be fed to the vertical scaler. 
 
void(* hcScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
 
#define XMM_CLOBBERS(...)
 
int dstW
Width of destination luma/alpha planes. 
 
int32_t * vLumFilterPos
Array of vertical filter starting positions for each dst[i] for luma/alpha planes. 
 
#define AV_PIX_FMT_BGR555
 
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
 
int32_t lumMmxFilter[4 *MAX_FILTER_SIZE]
 
#define ASSIGN_MMX_SCALE_FUNC(hscalefn, filtersize, opt1, opt2)
 
yuv2planarX_fn yuv2planeX
 
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr 
 
#define EXTERNAL_SSSE3(flags)
 
int vLumFilterSize
Vertical filter size for luma/alpha pixels. 
 
int16_t * vChrFilter
Array of vertical filter coefficients for chroma planes. 
 
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU. 
 
#define EXTERNAL_MMXEXT(flags)
 
int lumBufIndex
Index in ring buffer of the last scaled horizontal luma/alpha line from source. 
 
#define VSCALEX_FUNC(size, opt)
 
#define INLINE_MMXEXT(flags)
 
int lastInChrBuf
Last scaled horizontal chroma line from source in the ring buffer. 
 
enum AVPixelFormat srcFormat
Source pixel format. 
 
int32_t chrMmxFilter[4 *MAX_FILTER_SIZE]
 
#define AV_PIX_FMT_RGB555
 
#define ASSIGN_VSCALEX_FUNC(vscalefn, opt, do_16_case, condition_8bit)
 
#define ASSIGN_VSCALE_FUNC(vscalefn, opt1, opt2, opt2chk)
 
uint8_t ** tmp
Tmp line buffer used by mmx code. 
 
void(* alpToYV12)(uint8_t *dst, const uint8_t *src, const uint8_t *src2, const uint8_t *src3, int width, uint32_t *pal)
Unscaled conversion of alpha plane to YV12 for horizontal scaler. 
 
void ff_updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex, int lastInLumBuf, int lastInChrBuf)
 
void(* chrToYV12)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, int width, uint32_t *pal)
Unscaled conversion of chroma planes to YV12 for horizontal scaler. 
 
int16_t * vLumFilter
Array of vertical filter coefficients for luma/alpha planes. 
 
int sliceY
index of first line 
 
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc... 
 
#define EXTERNAL_AVX(flags)
 
void(* hyScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Scale one horizontal line of input data using a filter over the input lines, to produce one (differen...