40 { 1, 3, 1, 3, 1, 3, 1, 3, },
41 { 2, 0, 2, 0, 2, 0, 2, 0, },
42 { 1, 3, 1, 3, 1, 3, 1, 3, },
46 { 6, 2, 6, 2, 6, 2, 6, 2, },
47 { 0, 4, 0, 4, 0, 4, 0, 4, },
48 { 6, 2, 6, 2, 6, 2, 6, 2, },
52 { 8, 4, 11, 7, 8, 4, 11, 7, },
53 { 2, 14, 1, 13, 2, 14, 1, 13, },
54 { 10, 6, 9, 5, 10, 6, 9, 5, },
55 { 0, 12, 3, 15, 0, 12, 3, 15, },
56 { 8, 4, 11, 7, 8, 4, 11, 7, },
60 { 17, 9, 23, 15, 16, 8, 22, 14, },
61 { 5, 29, 3, 27, 4, 28, 2, 26, },
62 { 21, 13, 19, 11, 20, 12, 18, 10, },
63 { 0, 24, 6, 30, 1, 25, 7, 31, },
64 { 16, 8, 22, 14, 17, 9, 23, 15, },
65 { 4, 28, 2, 26, 5, 29, 3, 27, },
66 { 20, 12, 18, 10, 21, 13, 19, 11, },
67 { 1, 25, 7, 31, 0, 24, 6, 30, },
68 { 17, 9, 23, 15, 16, 8, 22, 14, },
72 { 0, 55, 14, 68, 3, 58, 17, 72, },
73 { 37, 18, 50, 32, 40, 22, 54, 35, },
74 { 9, 64, 5, 59, 13, 67, 8, 63, },
75 { 46, 27, 41, 23, 49, 31, 44, 26, },
76 { 2, 57, 16, 71, 1, 56, 15, 70, },
77 { 39, 21, 52, 34, 38, 19, 51, 33, },
78 { 11, 66, 7, 62, 10, 65, 6, 60, },
79 { 48, 30, 43, 25, 47, 29, 42, 24, },
80 { 0, 55, 14, 68, 3, 58, 17, 72, },
85 {117, 62, 158, 103, 113, 58, 155, 100, },
86 { 34, 199, 21, 186, 31, 196, 17, 182, },
87 {144, 89, 131, 76, 141, 86, 127, 72, },
88 { 0, 165, 41, 206, 10, 175, 52, 217, },
89 {110, 55, 151, 96, 120, 65, 162, 107, },
90 { 28, 193, 14, 179, 38, 203, 24, 189, },
91 {138, 83, 124, 69, 148, 93, 134, 79, },
92 { 7, 172, 48, 213, 3, 168, 45, 210, },
93 {117, 62, 158, 103, 113, 58, 155, 100, },
98 { 0, 143, 18, 200, 2, 156, 25, 215, },
99 { 78, 28, 125, 64, 89, 36, 138, 74, },
100 { 10, 180, 3, 161, 16, 195, 8, 175, },
101 {109, 51, 93, 38, 121, 60, 105, 47, },
102 { 1, 152, 23, 210, 0, 147, 20, 205, },
103 { 85, 33, 134, 71, 81, 30, 130, 67, },
104 { 14, 190, 6, 171, 12, 185, 5, 166, },
105 {117, 57, 101, 44, 113, 54, 97, 41, },
106 { 0, 143, 18, 200, 2, 156, 25, 215, },
111 { 0, 124, 8, 193, 0, 140, 12, 213, },
112 { 55, 14, 104, 42, 66, 19, 119, 52, },
113 { 3, 168, 1, 145, 6, 187, 3, 162, },
114 { 86, 31, 70, 21, 99, 39, 82, 28, },
115 { 0, 134, 11, 206, 0, 129, 9, 200, },
116 { 62, 17, 114, 48, 58, 16, 109, 45, },
117 { 5, 181, 2, 157, 4, 175, 1, 151, },
118 { 95, 36, 78, 26, 90, 34, 74, 24, },
119 { 0, 124, 8, 193, 0, 140, 12, 213, },
124 { 0, 107, 3, 187, 0, 125, 6, 212, },
125 { 39, 7, 86, 28, 49, 11, 102, 36, },
126 { 1, 158, 0, 131, 3, 180, 1, 151, },
127 { 68, 19, 52, 12, 81, 25, 64, 17, },
128 { 0, 119, 5, 203, 0, 113, 4, 195, },
129 { 45, 9, 96, 33, 42, 8, 91, 30, },
130 { 2, 172, 1, 144, 2, 165, 0, 137, },
131 { 77, 23, 60, 15, 72, 21, 56, 14, },
132 { 0, 107, 3, 187, 0, 125, 6, 212, },
136 #define output_pixel(pos, val, bias, signedness) \
138 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
140 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
145 int big_endian,
int output_bits)
151 for (
i = 0;
i < dstW;
i++) {
159 const int32_t **
src, uint16_t *dest,
int dstW,
160 int big_endian,
int output_bits)
166 for (
i = 0;
i < dstW;
i++) {
176 for (j = 0; j < filterSize; j++)
184 const int16_t **chrUSrc,
const int16_t **chrVSrc,
187 uint16_t *dest = (uint16_t*)dest8;
194 for (
i = 0;
i < chrDstW;
i++) {
196 int v = 1 << (
shift - 1);
201 for (j = 0; j < chrFilterSize; j++) {
202 u += uSrc[j][
i] * (unsigned)chrFilter[j];
203 v += vSrc[j][
i] * (unsigned)chrFilter[j];
214 static const int big_endian = HAVE_BIGENDIAN;
215 static const int shift = 3;
216 static const float float_mult = 1.0f / 65535.0f;
220 for (
i = 0;
i < dstW; ++
i){
223 dest[
i] = float_mult * (float)val_uint;
230 static const int big_endian = HAVE_BIGENDIAN;
231 static const int shift = 3;
232 static const float float_mult = 1.0f / 65535.0f;
236 for (
i = 0;
i < dstW; ++
i){
245 float *dest,
int dstW)
247 static const int big_endian = HAVE_BIGENDIAN;
248 static const int shift = 15;
249 static const float float_mult = 1.0f / 65535.0f;
253 for (
i = 0;
i < dstW; ++
i){
254 val = (1 << (
shift - 1)) - 0x40000000;
255 for (j = 0; j < filterSize; ++j){
259 dest[
i] = float_mult * (float)val_uint;
265 uint32_t *dest,
int dstW)
267 static const int big_endian = HAVE_BIGENDIAN;
268 static const int shift = 15;
269 static const float float_mult = 1.0f / 65535.0f;
273 for (
i = 0;
i < dstW; ++
i){
274 val = (1 << (
shift - 1)) - 0x40000000;
275 for (j = 0; j < filterSize; ++j){
283 #define yuv2plane1_float(template, dest_type, BE_LE) \
284 static void yuv2plane1_float ## BE_LE ## _c(const int16_t *src, uint8_t *dest, int dstW, \
285 const uint8_t *dither, int offset) \
287 template((const int32_t *)src, (dest_type *)dest, dstW); \
290 #define yuv2planeX_float(template, dest_type, BE_LE) \
291 static void yuv2planeX_float ## BE_LE ## _c(const int16_t *filter, int filterSize, \
292 const int16_t **src, uint8_t *dest, int dstW, \
293 const uint8_t *dither, int offset) \
295 template(filter, filterSize, (const int32_t **)src, (dest_type *)dest, dstW); \
312 #define output_pixel(pos, val) \
314 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
316 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
320 yuv2plane1_10_c_template(
const int16_t *
src, uint16_t *dest,
int dstW,
321 int big_endian,
int output_bits)
324 int shift = 15 - output_bits;
326 for (
i = 0;
i < dstW;
i++) {
334 const int16_t **
src, uint16_t *dest,
int dstW,
335 int big_endian,
int output_bits)
338 int shift = 11 + 16 - output_bits;
340 for (
i = 0;
i < dstW;
i++) {
344 for (j = 0; j < filterSize; j++)
353 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
354 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
355 uint8_t *dest, int dstW, \
356 const uint8_t *dither, int offset)\
358 yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
359 (uint16_t *) dest, dstW, is_be, bits); \
361 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
362 const int16_t **src, uint8_t *dest, int dstW, \
363 const uint8_t *dither, int offset)\
365 yuv2planeX_## template_size ## _c_template(filter, \
366 filterSize, (const typeX_t **) src, \
367 (uint16_t *) dest, dstW, is_be, bits); \
380 static void yuv2planeX_8_c(
const int16_t *
filter,
int filterSize,
385 for (
i=0;
i<dstW;
i++) {
388 for (j=0; j<filterSize; j++)
391 dest[
i]= av_clip_uint8(
val>>19);
399 for (
i=0;
i<dstW;
i++) {
401 dest[
i]= av_clip_uint8(
val);
406 const int16_t **chrUSrc,
const int16_t **chrVSrc,
410 const uint8_t *chrDither =
c->chrDither8;
415 for (
i=0;
i<chrDstW;
i++) {
416 int u = chrDither[
i & 7] << 12;
417 int v = chrDither[(
i + 3) & 7] << 12;
419 for (j=0; j<chrFilterSize; j++) {
420 u += chrUSrc[j][
i] * chrFilter[j];
421 v += chrVSrc[j][
i] * chrFilter[j];
424 dest[2*
i]= av_clip_uint8(
u>>19);
425 dest[2*
i+1]= av_clip_uint8(v>>19);
428 for (
i=0;
i<chrDstW;
i++) {
429 int u = chrDither[
i & 7] << 12;
430 int v = chrDither[(
i + 3) & 7] << 12;
432 for (j=0; j<chrFilterSize; j++) {
433 u += chrUSrc[j][
i] * chrFilter[j];
434 v += chrVSrc[j][
i] * chrFilter[j];
437 dest[2*
i]= av_clip_uint8(v>>19);
438 dest[2*
i+1]= av_clip_uint8(
u>>19);
443 #define output_pixel(pos, val) \
445 AV_WB16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
447 AV_WL16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
451 uint16_t *dest,
int dstW,
457 for (
i = 0;
i < dstW;
i++) {
464 const int16_t **
src, uint16_t *dest,
int dstW,
470 for (
i = 0;
i < dstW;
i++) {
473 for (j = 0; j < filterSize; j++)
481 const int16_t **chrUSrc,
const int16_t **chrVSrc,
484 uint16_t *dest = (uint16_t*)dest8;
489 for (
i = 0;
i < chrDstW;
i++) {
491 int v = 1 << (
shift - 1);
493 for (j = 0; j < chrFilterSize; j++) {
494 u += chrUSrc[j][
i] * chrFilter[j];
495 v += chrVSrc[j][
i] * chrFilter[j];
534 #define accumulate_bit(acc, val) \
537 #define output_pixel(pos, acc) \
538 if (target == AV_PIX_FMT_MONOBLACK) { \
546 const int16_t **lumSrc,
int lumFilterSize,
547 const int16_t *chrFilter,
const int16_t **chrUSrc,
548 const int16_t **chrVSrc,
int chrFilterSize,
549 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
557 for (
i = 0;
i < dstW;
i += 2) {
562 for (j = 0; j < lumFilterSize; j++) {
563 Y1 += lumSrc[j][
i] * lumFilter[j];
564 Y2 += lumSrc[j][
i+1] * lumFilter[j];
568 if ((Y1 | Y2) & 0x100) {
569 Y1 = av_clip_uint8(Y1);
570 Y2 = av_clip_uint8(Y2);
573 Y1 += (7*err + 1*
c->dither_error[0][
i] + 5*
c->dither_error[0][
i+1] + 3*
c->dither_error[0][
i+2] + 8 - 256)>>4;
574 c->dither_error[0][
i] = err;
575 acc = 2*
acc + (Y1 >= 128);
578 err = Y2 + ((7*Y1 + 1*
c->dither_error[0][
i+1] + 5*
c->dither_error[0][
i+2] + 3*
c->dither_error[0][
i+3] + 8 - 256)>>4);
579 c->dither_error[0][
i+1] = Y1;
580 acc = 2*
acc + (err >= 128);
590 c->dither_error[0][
i] = err;
599 const int16_t *ubuf[2],
const int16_t *vbuf[2],
600 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
601 int yalpha,
int uvalpha,
int y,
604 const int16_t *buf0 =
buf[0], *buf1 =
buf[1];
606 int yalpha1 = 4096 - yalpha;
613 for (
i = 0;
i < dstW;
i +=2) {
616 Y = (buf0[
i + 0] * yalpha1 + buf1[
i + 0] * yalpha) >> 19;
617 Y += (7*err + 1*
c->dither_error[0][
i] + 5*
c->dither_error[0][
i+1] + 3*
c->dither_error[0][
i+2] + 8 - 256)>>4;
618 c->dither_error[0][
i] = err;
622 err = (buf0[
i + 1] * yalpha1 + buf1[
i + 1] * yalpha) >> 19;
623 err += (7*
Y + 1*
c->dither_error[0][
i+1] + 5*
c->dither_error[0][
i+2] + 3*
c->dither_error[0][
i+3] + 8 - 256)>>4;
624 c->dither_error[0][
i+1] =
Y;
625 acc = 2*
acc + (err >= 128);
631 c->dither_error[0][
i] = err;
633 for (
i = 0;
i < dstW;
i += 8) {
636 Y = (buf0[
i + 0] * yalpha1 + buf1[
i + 0] * yalpha) >> 19;
638 Y = (buf0[
i + 1] * yalpha1 + buf1[
i + 1] * yalpha) >> 19;
640 Y = (buf0[
i + 2] * yalpha1 + buf1[
i + 2] * yalpha) >> 19;
642 Y = (buf0[
i + 3] * yalpha1 + buf1[
i + 3] * yalpha) >> 19;
644 Y = (buf0[
i + 4] * yalpha1 + buf1[
i + 4] * yalpha) >> 19;
646 Y = (buf0[
i + 5] * yalpha1 + buf1[
i + 5] * yalpha) >> 19;
648 Y = (buf0[
i + 6] * yalpha1 + buf1[
i + 6] * yalpha) >> 19;
650 Y = (buf0[
i + 7] * yalpha1 + buf1[
i + 7] * yalpha) >> 19;
660 const int16_t *ubuf[2],
const int16_t *vbuf[2],
661 const int16_t *abuf0,
uint8_t *dest,
int dstW,
670 for (
i = 0;
i < dstW;
i +=2) {
673 Y = ((buf0[
i + 0] + 64) >> 7);
674 Y += (7*err + 1*
c->dither_error[0][
i] + 5*
c->dither_error[0][
i+1] + 3*
c->dither_error[0][
i+2] + 8 - 256)>>4;
675 c->dither_error[0][
i] = err;
679 err = ((buf0[
i + 1] + 64) >> 7);
680 err += (7*
Y + 1*
c->dither_error[0][
i+1] + 5*
c->dither_error[0][
i+2] + 3*
c->dither_error[0][
i+3] + 8 - 256)>>4;
681 c->dither_error[0][
i+1] =
Y;
682 acc = 2*
acc + (err >= 128);
688 c->dither_error[0][
i] = err;
690 for (
i = 0;
i < dstW;
i += 8) {
707 #undef accumulate_bit
709 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
710 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
711 const int16_t **lumSrc, int lumFilterSize, \
712 const int16_t *chrFilter, const int16_t **chrUSrc, \
713 const int16_t **chrVSrc, int chrFilterSize, \
714 const int16_t **alpSrc, uint8_t *dest, int dstW, \
717 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
718 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
719 alpSrc, dest, dstW, y, fmt); \
722 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
723 const int16_t *ubuf[2], const int16_t *vbuf[2], \
724 const int16_t *abuf[2], uint8_t *dest, int dstW, \
725 int yalpha, int uvalpha, int y) \
727 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
728 dest, dstW, yalpha, uvalpha, y, fmt); \
731 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
732 const int16_t *ubuf[2], const int16_t *vbuf[2], \
733 const int16_t *abuf0, uint8_t *dest, int dstW, \
734 int uvalpha, int y) \
736 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
737 abuf0, dest, dstW, uvalpha, \
744 #define output_pixels(pos, Y1, U, Y2, V) \
745 if (target == AV_PIX_FMT_YUYV422) { \
746 dest[pos + 0] = Y1; \
748 dest[pos + 2] = Y2; \
750 } else if (target == AV_PIX_FMT_YVYU422) { \
751 dest[pos + 0] = Y1; \
753 dest[pos + 2] = Y2; \
757 dest[pos + 1] = Y1; \
759 dest[pos + 3] = Y2; \
764 const int16_t **lumSrc,
int lumFilterSize,
765 const int16_t *chrFilter,
const int16_t **chrUSrc,
766 const int16_t **chrVSrc,
int chrFilterSize,
767 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
772 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
779 for (j = 0; j < lumFilterSize; j++) {
780 Y1 += lumSrc[j][
i * 2] * lumFilter[j];
781 Y2 += lumSrc[j][
i * 2 + 1] * lumFilter[j];
783 for (j = 0; j < chrFilterSize; j++) {
784 U += chrUSrc[j][
i] * chrFilter[j];
785 V += chrVSrc[j][
i] * chrFilter[j];
791 if ((Y1 | Y2 |
U |
V) & 0x100) {
792 Y1 = av_clip_uint8(Y1);
793 Y2 = av_clip_uint8(Y2);
794 U = av_clip_uint8(
U);
795 V = av_clip_uint8(
V);
803 const int16_t *ubuf[2],
const int16_t *vbuf[2],
804 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
805 int yalpha,
int uvalpha,
int y,
808 const int16_t *buf0 =
buf[0], *buf1 =
buf[1],
809 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
810 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
811 int yalpha1 = 4096 - yalpha;
812 int uvalpha1 = 4096 - uvalpha;
817 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
818 int Y1 = (buf0[
i * 2] * yalpha1 + buf1[
i * 2] * yalpha) >> 19;
819 int Y2 = (buf0[
i * 2 + 1] * yalpha1 + buf1[
i * 2 + 1] * yalpha) >> 19;
820 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha) >> 19;
821 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha) >> 19;
823 if ((Y1 | Y2 |
U |
V) & 0x100) {
824 Y1 = av_clip_uint8(Y1);
825 Y2 = av_clip_uint8(Y2);
826 U = av_clip_uint8(
U);
827 V = av_clip_uint8(
V);
836 const int16_t *ubuf[2],
const int16_t *vbuf[2],
837 const int16_t *abuf0,
uint8_t *dest,
int dstW,
840 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
843 if (uvalpha < 2048) {
844 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
845 int Y1 = (buf0[
i * 2 ]+64) >> 7;
846 int Y2 = (buf0[
i * 2 + 1]+64) >> 7;
847 int U = (ubuf0[
i] +64) >> 7;
848 int V = (vbuf0[
i] +64) >> 7;
850 if ((Y1 | Y2 |
U |
V) & 0x100) {
851 Y1 = av_clip_uint8(Y1);
852 Y2 = av_clip_uint8(Y2);
853 U = av_clip_uint8(
U);
854 V = av_clip_uint8(
V);
860 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
861 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
862 int Y1 = (buf0[
i * 2 ] + 64) >> 7;
863 int Y2 = (buf0[
i * 2 + 1] + 64) >> 7;
864 int U = (ubuf0[
i] + ubuf1[
i]+128) >> 8;
865 int V = (vbuf0[
i] + vbuf1[
i]+128) >> 8;
867 if ((Y1 | Y2 |
U |
V) & 0x100) {
868 Y1 = av_clip_uint8(Y1);
869 Y2 = av_clip_uint8(Y2);
870 U = av_clip_uint8(
U);
871 V = av_clip_uint8(
V);
885 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? R : B)
886 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? B : R)
887 #define output_pixel(pos, val) \
888 if (isBE(target)) { \
896 const int32_t **lumSrc,
int lumFilterSize,
897 const int16_t *chrFilter,
const int32_t **unused_chrUSrc,
898 const int32_t **unused_chrVSrc,
int unused_chrFilterSize,
899 const int32_t **alpSrc, uint16_t *dest,
int dstW,
900 int y,
enum AVPixelFormat target,
int unused_hasAlpha,
int unused_eightbytes)
902 int hasAlpha = !!alpSrc;
905 for (
i = 0;
i < dstW;
i++) {
908 int64_t
A = 0xffff<<14;
910 for (j = 0; j < lumFilterSize; j++)
911 Y += lumSrc[j][
i] * lumFilter[j];
914 Y = av_clip_uint16(
Y);
917 for (j = 0; j < lumFilterSize; j++)
918 A += alpSrc[j][
i] * lumFilter[j];
921 A = av_clip_uint16(
A);
932 const int32_t *abuf[2], uint16_t *dest,
int dstW,
933 int yalpha,
int unused_uvalpha,
int y,
934 enum AVPixelFormat target,
int unused_hasAlpha,
int unused_eightbytes)
936 int hasAlpha = abuf && abuf[0] && abuf[1];
938 *abuf0 = hasAlpha ? abuf[0] :
NULL,
939 *abuf1 = hasAlpha ? abuf[1] :
NULL;
940 int yalpha1 = 4096 - yalpha;
945 for (
i = 0;
i < dstW;
i++) {
946 int Y = (buf0[
i] * yalpha1 + buf1[
i] * yalpha) >> 15;
949 Y = av_clip_uint16(
Y);
952 A = (abuf0[
i] * yalpha1 + abuf1[
i] * yalpha) >> 15;
953 A = av_clip_uint16(
A);
964 const int32_t *abuf0, uint16_t *dest,
int dstW,
965 int unused_uvalpha,
int y,
enum AVPixelFormat target,
int unused_hasAlpha,
int unused_eightbytes)
967 int hasAlpha = !!abuf0;
970 for (
i = 0;
i < dstW;
i++) {
971 int Y = buf0[
i] >> 3;
974 Y = av_clip_uint16(
Y);
979 A = av_clip_uint16(
A);
989 const int32_t **lumSrc,
int lumFilterSize,
990 const int16_t *chrFilter,
const int32_t **chrUSrc,
991 const int32_t **chrVSrc,
int chrFilterSize,
992 const int32_t **alpSrc, uint16_t *dest,
int dstW,
993 int y,
enum AVPixelFormat target,
int hasAlpha,
int eightbytes)
996 int A1 = 0xffff<<14,
A2 = 0xffff<<14;
998 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
1000 int Y1 = -0x40000000;
1001 int Y2 = -0x40000000;
1002 int U = -(128 << 23);
1003 int V = -(128 << 23);
1006 for (j = 0; j < lumFilterSize; j++) {
1007 Y1 += lumSrc[j][
i * 2] * (unsigned)lumFilter[j];
1008 Y2 += lumSrc[j][
i * 2 + 1] * (unsigned)lumFilter[j];
1010 for (j = 0; j < chrFilterSize; j++) {;
1011 U += chrUSrc[j][
i] * (unsigned)chrFilter[j];
1012 V += chrVSrc[j][
i] * (unsigned)chrFilter[j];
1018 for (j = 0; j < lumFilterSize; j++) {
1019 A1 += alpSrc[j][
i * 2] * (unsigned)lumFilter[j];
1020 A2 += alpSrc[j][
i * 2 + 1] * (unsigned)lumFilter[j];
1037 Y1 -=
c->yuv2rgb_y_offset;
1038 Y2 -=
c->yuv2rgb_y_offset;
1039 Y1 *=
c->yuv2rgb_y_coeff;
1040 Y2 *=
c->yuv2rgb_y_coeff;
1045 R =
V *
c->yuv2rgb_v2r_coeff;
1046 G =
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
1047 B =
U *
c->yuv2rgb_u2b_coeff;
1072 const int32_t *abuf[2], uint16_t *dest,
int dstW,
1073 int yalpha,
int uvalpha,
int y,
1077 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1078 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1079 *abuf0 = hasAlpha ? abuf[0] :
NULL,
1080 *abuf1 = hasAlpha ? abuf[1] :
NULL;
1081 int yalpha1 = 4096 - yalpha;
1082 int uvalpha1 = 4096 - uvalpha;
1084 int A1 = 0xffff<<14,
A2 = 0xffff<<14;
1089 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
1090 int Y1 = (buf0[
i * 2] * yalpha1 + buf1[
i * 2] * yalpha) >> 14;
1091 int Y2 = (buf0[
i * 2 + 1] * yalpha1 + buf1[
i * 2 + 1] * yalpha) >> 14;
1092 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha - (128 << 23)) >> 14;
1093 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha - (128 << 23)) >> 14;
1096 Y1 -=
c->yuv2rgb_y_offset;
1097 Y2 -=
c->yuv2rgb_y_offset;
1098 Y1 *=
c->yuv2rgb_y_coeff;
1099 Y2 *=
c->yuv2rgb_y_coeff;
1103 R =
V *
c->yuv2rgb_v2r_coeff;
1104 G =
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
1105 B =
U *
c->yuv2rgb_u2b_coeff;
1108 A1 = (abuf0[
i * 2 ] * yalpha1 + abuf1[
i * 2 ] * yalpha) >> 1;
1109 A2 = (abuf0[
i * 2 + 1] * yalpha1 + abuf1[
i * 2 + 1] * yalpha) >> 1;
1137 const int32_t *abuf0, uint16_t *dest,
int dstW,
1138 int uvalpha,
int y,
enum AVPixelFormat target,
int hasAlpha,
int eightbytes)
1140 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1142 int A1 = 0xffff<<14,
A2= 0xffff<<14;
1144 if (uvalpha < 2048) {
1145 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
1146 int Y1 = (buf0[
i * 2] ) >> 2;
1147 int Y2 = (buf0[
i * 2 + 1]) >> 2;
1148 int U = (ubuf0[
i] - (128 << 11)) >> 2;
1149 int V = (vbuf0[
i] - (128 << 11)) >> 2;
1152 Y1 -=
c->yuv2rgb_y_offset;
1153 Y2 -=
c->yuv2rgb_y_offset;
1154 Y1 *=
c->yuv2rgb_y_coeff;
1155 Y2 *=
c->yuv2rgb_y_coeff;
1160 A1 = abuf0[
i * 2 ] << 11;
1161 A2 = abuf0[
i * 2 + 1] << 11;
1167 R =
V *
c->yuv2rgb_v2r_coeff;
1168 G =
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
1169 B =
U *
c->yuv2rgb_u2b_coeff;
1189 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1190 int A1 = 0xffff<<14,
A2 = 0xffff<<14;
1191 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
1192 int Y1 = (buf0[
i * 2] ) >> 2;
1193 int Y2 = (buf0[
i * 2 + 1]) >> 2;
1194 int U = (ubuf0[
i] + ubuf1[
i] - (128 << 12)) >> 3;
1195 int V = (vbuf0[
i] + vbuf1[
i] - (128 << 12)) >> 3;
1198 Y1 -=
c->yuv2rgb_y_offset;
1199 Y2 -=
c->yuv2rgb_y_offset;
1200 Y1 *=
c->yuv2rgb_y_coeff;
1201 Y2 *=
c->yuv2rgb_y_coeff;
1206 A1 = abuf0[
i * 2 ] << 11;
1207 A2 = abuf0[
i * 2 + 1] << 11;
1213 R =
V *
c->yuv2rgb_v2r_coeff;
1214 G =
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
1215 B =
U *
c->yuv2rgb_u2b_coeff;
1239 const int32_t **lumSrc,
int lumFilterSize,
1240 const int16_t *chrFilter,
const int32_t **chrUSrc,
1241 const int32_t **chrVSrc,
int chrFilterSize,
1242 const int32_t **alpSrc, uint16_t *dest,
int dstW,
1243 int y,
enum AVPixelFormat target,
int hasAlpha,
int eightbytes)
1248 for (
i = 0;
i < dstW;
i++) {
1250 int Y = -0x40000000;
1251 int U = -(128 << 23);
1252 int V = -(128 << 23);
1255 for (j = 0; j < lumFilterSize; j++) {
1256 Y += lumSrc[j][
i] * (unsigned)lumFilter[j];
1258 for (j = 0; j < chrFilterSize; j++) {;
1259 U += chrUSrc[j][
i] * (unsigned)chrFilter[j];
1260 V += chrVSrc[j][
i] * (unsigned)chrFilter[j];
1265 for (j = 0; j < lumFilterSize; j++) {
1266 A += alpSrc[j][
i] * (unsigned)lumFilter[j];
1279 Y -=
c->yuv2rgb_y_offset;
1280 Y *=
c->yuv2rgb_y_coeff;
1284 R =
V *
c->yuv2rgb_v2r_coeff;
1285 G =
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
1286 B =
U *
c->yuv2rgb_u2b_coeff;
1304 const int32_t *abuf[2], uint16_t *dest,
int dstW,
1305 int yalpha,
int uvalpha,
int y,
1309 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1310 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1311 *abuf0 = hasAlpha ? abuf[0] :
NULL,
1312 *abuf1 = hasAlpha ? abuf[1] :
NULL;
1313 int yalpha1 = 4096 - yalpha;
1314 int uvalpha1 = 4096 - uvalpha;
1321 for (
i = 0;
i < dstW;
i++) {
1322 int Y = (buf0[
i] * yalpha1 + buf1[
i] * yalpha) >> 14;
1323 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha - (128 << 23)) >> 14;
1324 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha - (128 << 23)) >> 14;
1327 Y -=
c->yuv2rgb_y_offset;
1328 Y *=
c->yuv2rgb_y_coeff;
1331 R =
V *
c->yuv2rgb_v2r_coeff;
1332 G =
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
1333 B =
U *
c->yuv2rgb_u2b_coeff;
1336 A = (abuf0[
i] * yalpha1 + abuf1[
i] * yalpha) >> 1;
1356 const int32_t *abuf0, uint16_t *dest,
int dstW,
1357 int uvalpha,
int y,
enum AVPixelFormat target,
int hasAlpha,
int eightbytes)
1359 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1363 if (uvalpha < 2048) {
1364 for (
i = 0;
i < dstW;
i++) {
1365 int Y = (buf0[
i]) >> 2;
1366 int U = (ubuf0[
i] - (128 << 11)) >> 2;
1367 int V = (vbuf0[
i] - (128 << 11)) >> 2;
1370 Y -=
c->yuv2rgb_y_offset;
1371 Y *=
c->yuv2rgb_y_coeff;
1380 R =
V *
c->yuv2rgb_v2r_coeff;
1381 G =
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
1382 B =
U *
c->yuv2rgb_u2b_coeff;
1395 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1397 for (
i = 0;
i < dstW;
i++) {
1398 int Y = (buf0[
i] ) >> 2;
1399 int U = (ubuf0[
i] + ubuf1[
i] - (128 << 12)) >> 3;
1400 int V = (vbuf0[
i] + vbuf1[
i] - (128 << 12)) >> 3;
1403 Y -=
c->yuv2rgb_y_offset;
1404 Y *=
c->yuv2rgb_y_coeff;
1413 R =
V *
c->yuv2rgb_v2r_coeff;
1414 G =
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
1415 B =
U *
c->yuv2rgb_u2b_coeff;
1434 #define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes) \
1435 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1436 const int16_t **_lumSrc, int lumFilterSize, \
1437 const int16_t *chrFilter, const int16_t **_chrUSrc, \
1438 const int16_t **_chrVSrc, int chrFilterSize, \
1439 const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
1442 const int32_t **lumSrc = (const int32_t **) _lumSrc, \
1443 **chrUSrc = (const int32_t **) _chrUSrc, \
1444 **chrVSrc = (const int32_t **) _chrVSrc, \
1445 **alpSrc = (const int32_t **) _alpSrc; \
1446 uint16_t *dest = (uint16_t *) _dest; \
1447 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1448 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1449 alpSrc, dest, dstW, y, fmt, hasAlpha, eightbytes); \
1452 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
1453 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1454 const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
1455 int yalpha, int uvalpha, int y) \
1457 const int32_t **buf = (const int32_t **) _buf, \
1458 **ubuf = (const int32_t **) _ubuf, \
1459 **vbuf = (const int32_t **) _vbuf, \
1460 **abuf = (const int32_t **) _abuf; \
1461 uint16_t *dest = (uint16_t *) _dest; \
1462 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1463 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha, eightbytes); \
1466 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
1467 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1468 const int16_t *_abuf0, uint8_t *_dest, int dstW, \
1469 int uvalpha, int y) \
1471 const int32_t *buf0 = (const int32_t *) _buf0, \
1472 **ubuf = (const int32_t **) _ubuf, \
1473 **vbuf = (const int32_t **) _vbuf, \
1474 *abuf0 = (const int32_t *) _abuf0; \
1475 uint16_t *dest = (uint16_t *) _dest; \
1476 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1477 dstW, uvalpha, y, fmt, hasAlpha, eightbytes); \
1518 unsigned A1,
unsigned A2,
1519 const
void *_r, const
void *_g, const
void *_b,
int y,
1524 uint32_t *dest = (uint32_t *) _dest;
1525 const uint32_t *
r = (
const uint32_t *) _r;
1526 const uint32_t *
g = (
const uint32_t *) _g;
1527 const uint32_t *
b = (
const uint32_t *) _b;
1532 dest[
i * 2 + 0] =
r[Y1] +
g[Y1] +
b[Y1] + (hasAlpha ?
A1 << sh : 0);
1533 dest[
i * 2 + 1] =
r[Y2] +
g[Y2] +
b[Y2] + (hasAlpha ?
A2 << sh : 0);
1539 dest[
i * 2 + 0] =
r[Y1] +
g[Y1] +
b[Y1] + (
A1 << sh);
1540 dest[
i * 2 + 1] =
r[Y2] +
g[Y2] +
b[Y2] + (
A2 << sh);
1542 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
1545 av_assert2((((
r[Y1] +
g[Y1] +
b[Y1]) >> sh) & 0xFF) == 0xFF);
1547 dest[
i * 2 + 0] =
r[Y1] +
g[Y1] +
b[Y1];
1548 dest[
i * 2 + 1] =
r[Y2] +
g[Y2] +
b[Y2];
1557 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
1558 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
1560 dest[
i * 6 + 0] =
r_b[Y1];
1561 dest[
i * 6 + 1] =
g[Y1];
1562 dest[
i * 6 + 2] =
b_r[Y1];
1563 dest[
i * 6 + 3] =
r_b[Y2];
1564 dest[
i * 6 + 4] =
g[Y2];
1565 dest[
i * 6 + 5] =
b_r[Y2];
1571 uint16_t *dest = (uint16_t *) _dest;
1572 const uint16_t *
r = (
const uint16_t *) _r;
1573 const uint16_t *
g = (
const uint16_t *) _g;
1574 const uint16_t *
b = (
const uint16_t *) _b;
1575 int dr1, dg1, db1, dr2, dg2, db2;
1600 dest[
i * 2 + 0] =
r[Y1 + dr1] +
g[Y1 + dg1] +
b[Y1 + db1];
1601 dest[
i * 2 + 1] =
r[Y2 + dr2] +
g[Y2 + dg2] +
b[Y2 + db2];
1607 int dr1, dg1, db1, dr2, dg2, db2;
1612 dr1 = dg1 =
d32[(
i * 2 + 0) & 7];
1613 db1 =
d64[(
i * 2 + 0) & 7];
1614 dr2 = dg2 =
d32[(
i * 2 + 1) & 7];
1615 db2 =
d64[(
i * 2 + 1) & 7];
1619 dr1 = db1 =
d128[(
i * 2 + 0) & 7];
1620 dg1 =
d64[(
i * 2 + 0) & 7];
1621 dr2 = db2 =
d128[(
i * 2 + 1) & 7];
1622 dg2 =
d64[(
i * 2 + 1) & 7];
1626 dest[
i] =
r[Y1 + dr1] +
g[Y1 + dg1] +
b[Y1 + db1] +
1627 ((
r[Y2 + dr2] +
g[Y2 + dg2] +
b[Y2 + db2]) << 4);
1629 dest[
i * 2 + 0] =
r[Y1 + dr1] +
g[Y1 + dg1] +
b[Y1 + db1];
1630 dest[
i * 2 + 1] =
r[Y2 + dr2] +
g[Y2 + dg2] +
b[Y2 + db2];
1637 const int16_t **lumSrc,
int lumFilterSize,
1638 const int16_t *chrFilter,
const int16_t **chrUSrc,
1639 const int16_t **chrVSrc,
int chrFilterSize,
1640 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
1645 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
1651 const void *
r, *
g, *
b;
1653 for (j = 0; j < lumFilterSize; j++) {
1654 Y1 += lumSrc[j][
i * 2] * lumFilter[j];
1655 Y2 += lumSrc[j][
i * 2 + 1] * lumFilter[j];
1657 for (j = 0; j < chrFilterSize; j++) {
1658 U += chrUSrc[j][
i] * chrFilter[j];
1659 V += chrVSrc[j][
i] * chrFilter[j];
1668 for (j = 0; j < lumFilterSize; j++) {
1669 A1 += alpSrc[j][
i * 2 ] * lumFilter[j];
1670 A2 += alpSrc[j][
i * 2 + 1] * lumFilter[j];
1674 if ((
A1 |
A2) & 0x100) {
1675 A1 = av_clip_uint8(
A1);
1676 A2 = av_clip_uint8(
A2);
1685 r,
g,
b, y, target, hasAlpha);
1691 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1692 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
1693 int yalpha,
int uvalpha,
int y,
1696 const int16_t *buf0 =
buf[0], *buf1 =
buf[1],
1697 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1698 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1699 *abuf0 = hasAlpha ? abuf[0] :
NULL,
1700 *abuf1 = hasAlpha ? abuf[1] :
NULL;
1701 int yalpha1 = 4096 - yalpha;
1702 int uvalpha1 = 4096 - uvalpha;
1707 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
1708 int Y1 = (buf0[
i * 2] * yalpha1 + buf1[
i * 2] * yalpha) >> 19;
1709 int Y2 = (buf0[
i * 2 + 1] * yalpha1 + buf1[
i * 2 + 1] * yalpha) >> 19;
1710 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha) >> 19;
1711 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha) >> 19;
1718 A1 = (abuf0[
i * 2 ] * yalpha1 + abuf1[
i * 2 ] * yalpha) >> 19;
1719 A2 = (abuf0[
i * 2 + 1] * yalpha1 + abuf1[
i * 2 + 1] * yalpha) >> 19;
1720 A1 = av_clip_uint8(
A1);
1721 A2 = av_clip_uint8(
A2);
1725 r,
g,
b, y, target, hasAlpha);
1731 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1732 const int16_t *abuf0,
uint8_t *dest,
int dstW,
1736 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1739 if (uvalpha < 2048) {
1740 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
1741 int Y1 = (buf0[
i * 2 ] + 64) >> 7;
1742 int Y2 = (buf0[
i * 2 + 1] + 64) >> 7;
1743 int U = (ubuf0[
i] + 64) >> 7;
1744 int V = (vbuf0[
i] + 64) >> 7;
1751 A1 = abuf0[
i * 2 ] * 255 + 16384 >> 15;
1752 A2 = abuf0[
i * 2 + 1] * 255 + 16384 >> 15;
1753 A1 = av_clip_uint8(
A1);
1754 A2 = av_clip_uint8(
A2);
1758 r,
g,
b, y, target, hasAlpha);
1761 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1762 for (
i = 0;
i < ((dstW + 1) >> 1);
i++) {
1763 int Y1 = (buf0[
i * 2 ] + 64) >> 7;
1764 int Y2 = (buf0[
i * 2 + 1] + 64) >> 7;
1765 int U = (ubuf0[
i] + ubuf1[
i] + 128) >> 8;
1766 int V = (vbuf0[
i] + vbuf1[
i] + 128) >> 8;
1773 A1 = (abuf0[
i * 2 ] + 64) >> 7;
1774 A2 = (abuf0[
i * 2 + 1] + 64) >> 7;
1775 A1 = av_clip_uint8(
A1);
1776 A2 = av_clip_uint8(
A2);
1780 r,
g,
b, y, target, hasAlpha);
1785 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1786 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1787 const int16_t **lumSrc, int lumFilterSize, \
1788 const int16_t *chrFilter, const int16_t **chrUSrc, \
1789 const int16_t **chrVSrc, int chrFilterSize, \
1790 const int16_t **alpSrc, uint8_t *dest, int dstW, \
1793 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1794 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1795 alpSrc, dest, dstW, y, fmt, hasAlpha); \
1798 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1799 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1800 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1801 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1802 const int16_t *abuf[2], uint8_t *dest, int dstW, \
1803 int yalpha, int uvalpha, int y) \
1805 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1806 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1809 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1810 YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1811 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1812 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1813 const int16_t *abuf0, uint8_t *dest, int dstW, \
1814 int uvalpha, int y) \
1816 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1817 dstW, uvalpha, y, fmt, hasAlpha); \
1824 #if CONFIG_SWSCALE_ALPHA
1847 Y -=
c->yuv2rgb_y_offset;
1848 Y *=
c->yuv2rgb_y_coeff;
1850 R = (unsigned)
Y +
V*
c->yuv2rgb_v2r_coeff;
1851 G = (
unsigned)
Y +
V*
c->yuv2rgb_v2g_coeff +
U*
c->yuv2rgb_u2g_coeff;
1852 B = (unsigned)
Y +
U*
c->yuv2rgb_u2b_coeff;
1853 if ((
R |
G |
B) & 0xC0000000) {
1854 R = av_clip_uintp2(
R, 30);
1855 G = av_clip_uintp2(
G, 30);
1856 B = av_clip_uintp2(
B, 30);
1861 dest[0] = hasAlpha ?
A : 255;
1875 dest[3] = hasAlpha ?
A : 255;
1878 dest[0] = hasAlpha ?
A : 255;
1892 dest[3] = hasAlpha ?
A : 255;
1901 switch (
c->dither) {
1908 R += (7*err[0] + 1*
c->dither_error[0][
i] + 5*
c->dither_error[0][
i+1] + 3*
c->dither_error[0][
i+2])>>4;
1909 G += (7*err[1] + 1*
c->dither_error[1][
i] + 5*
c->dither_error[1][
i+1] + 3*
c->dither_error[1][
i+2])>>4;
1910 B += (7*err[2] + 1*
c->dither_error[2][
i] + 5*
c->dither_error[2][
i+1] + 3*
c->dither_error[2][
i+2])>>4;
1911 c->dither_error[0][
i] = err[0];
1912 c->dither_error[1][
i] = err[1];
1913 c->dither_error[2][
i] = err[2];
1914 r =
R >> (isrgb8 ? 5 : 7);
1915 g =
G >> (isrgb8 ? 5 : 6);
1916 b =
B >> (isrgb8 ? 6 : 7);
1917 r = av_clip(
r, 0, isrgb8 ? 7 : 1);
1918 g = av_clip(
g, 0, isrgb8 ? 7 : 3);
1919 b = av_clip(
b, 0, isrgb8 ? 3 : 1);
1920 err[0] =
R -
r*(isrgb8 ? 36 : 255);
1921 err[1] =
G -
g*(isrgb8 ? 36 : 85);
1922 err[2] =
B -
b*(isrgb8 ? 85 : 255);
1927 #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff))
1931 r = av_clip_uintp2(
r, 3);
1932 g = av_clip_uintp2(
g, 3);
1933 b = av_clip_uintp2(
b, 2);
1938 r = av_clip_uintp2(
r, 1);
1939 g = av_clip_uintp2(
g, 2);
1940 b = av_clip_uintp2(
b, 1);
1946 #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2)
1950 r = av_clip_uintp2(
r, 3);
1951 g = av_clip_uintp2(
g, 3);
1952 b = av_clip_uintp2(
b, 2);
1957 r = av_clip_uintp2(
r, 1);
1958 g = av_clip_uintp2(
g, 2);
1959 b = av_clip_uintp2(
b, 1);
1966 dest[0] =
r + 2*
g + 8*
b;
1968 dest[0] =
b + 2*
g + 8*
r;
1970 dest[0] =
r + 8*
g + 64*
b;
1972 dest[0] =
b + 4*
g + 32*
r;
1981 const int16_t **lumSrc,
int lumFilterSize,
1982 const int16_t *chrFilter,
const int16_t **chrUSrc,
1983 const int16_t **chrVSrc,
int chrFilterSize,
1984 const int16_t **alpSrc,
uint8_t *dest,
1996 for (
i = 0;
i < dstW;
i++) {
1999 int U = (1<<9)-(128 << 19);
2000 int V = (1<<9)-(128 << 19);
2002 for (j = 0; j < lumFilterSize; j++) {
2003 Y += lumSrc[j][
i] * lumFilter[j];
2005 for (j = 0; j < chrFilterSize; j++) {
2006 U += chrUSrc[j][
i] * chrFilter[j];
2007 V += chrVSrc[j][
i] * chrFilter[j];
2014 for (j = 0; j < lumFilterSize; j++) {
2015 A += alpSrc[j][
i] * lumFilter[j];
2019 A = av_clip_uint8(
A);
2021 yuv2rgb_write_full(
c, dest,
i,
Y,
A,
U,
V, y, target, hasAlpha, err);
2024 c->dither_error[0][
i] = err[0];
2025 c->dither_error[1][
i] = err[1];
2026 c->dither_error[2][
i] = err[2];
2031 const int16_t *ubuf[2],
const int16_t *vbuf[2],
2032 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
2033 int yalpha,
int uvalpha,
int y,
2036 const int16_t *buf0 =
buf[0], *buf1 =
buf[1],
2037 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
2038 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
2039 *abuf0 = hasAlpha ? abuf[0] :
NULL,
2040 *abuf1 = hasAlpha ? abuf[1] :
NULL;
2041 int yalpha1 = 4096 - yalpha;
2042 int uvalpha1 = 4096 - uvalpha;
2055 for (
i = 0;
i < dstW;
i++) {
2056 int Y = ( buf0[
i] * yalpha1 + buf1[
i] * yalpha ) >> 10;
2057 int U = (ubuf0[
i] * uvalpha1 + ubuf1[
i] * uvalpha-(128 << 19)) >> 10;
2058 int V = (vbuf0[
i] * uvalpha1 + vbuf1[
i] * uvalpha-(128 << 19)) >> 10;
2061 A = (abuf0[
i] * yalpha1 + abuf1[
i] * yalpha + (1<<18)) >> 19;
2063 A = av_clip_uint8(
A);
2066 yuv2rgb_write_full(
c, dest,
i,
Y,
A,
U,
V, y, target, hasAlpha, err);
2069 c->dither_error[0][
i] = err[0];
2070 c->dither_error[1][
i] = err[1];
2071 c->dither_error[2][
i] = err[2];
2076 const int16_t *ubuf[2],
const int16_t *vbuf[2],
2077 const int16_t *abuf0,
uint8_t *dest,
int dstW,
2081 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
2090 if (uvalpha < 2048) {
2092 for (
i = 0;
i < dstW;
i++) {
2093 int Y = buf0[
i] * 4;
2094 int U = (ubuf0[
i] - (128<<7)) * 4;
2095 int V = (vbuf0[
i] - (128<<7)) * 4;
2098 A = (abuf0[
i] + 64) >> 7;
2100 A = av_clip_uint8(
A);
2103 yuv2rgb_write_full(
c, dest,
i,
Y,
A,
U,
V, y, target, hasAlpha, err);
2107 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
2109 for (
i = 0;
i < dstW;
i++) {
2110 int Y = buf0[
i] * 4;
2111 int U = (ubuf0[
i] + ubuf1[
i] - (128<<8)) * 2;
2112 int V = (vbuf0[
i] + vbuf1[
i] - (128<<8)) * 2;
2115 A = (abuf0[
i] + 64) >> 7;
2117 A = av_clip_uint8(
A);
2120 yuv2rgb_write_full(
c, dest,
i,
Y,
A,
U,
V, y, target, hasAlpha, err);
2125 c->dither_error[0][
i] = err[0];
2126 c->dither_error[1][
i] = err[1];
2127 c->dither_error[2][
i] = err[2];
2136 #if CONFIG_SWSCALE_ALPHA
2157 const int16_t **lumSrc,
int lumFilterSize,
2158 const int16_t *chrFilter, const int16_t **chrUSrc,
2159 const int16_t **chrVSrc,
int chrFilterSize,
2160 const int16_t **alpSrc,
uint8_t **dest,
2166 uint16_t **dest16 = (uint16_t**)dest;
2167 int SH = 22 + 8 -
desc->comp[0].depth;
2170 for (
i = 0;
i < dstW;
i++) {
2173 int U = (1 << 9) - (128 << 19);
2174 int V = (1 << 9) - (128 << 19);
2177 for (j = 0; j < lumFilterSize; j++)
2178 Y += lumSrc[j][
i] * lumFilter[j];
2180 for (j = 0; j < chrFilterSize; j++) {
2181 U += chrUSrc[j][
i] * chrFilter[j];
2182 V += chrVSrc[j][
i] * chrFilter[j];
2192 for (j = 0; j < lumFilterSize; j++)
2193 A += alpSrc[j][
i] * lumFilter[j];
2196 A = av_clip_uintp2(
A, 27);
2199 Y -=
c->yuv2rgb_y_offset;
2200 Y *=
c->yuv2rgb_y_coeff;
2202 R =
Y +
V *
c->yuv2rgb_v2r_coeff;
2203 G =
Y +
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
2204 B =
Y +
U *
c->yuv2rgb_u2b_coeff;
2206 if ((
R |
G |
B) & 0xC0000000) {
2207 R = av_clip_uintp2(
R, 30);
2208 G = av_clip_uintp2(
G, 30);
2209 B = av_clip_uintp2(
B, 30);
2213 dest16[0][
i] =
G >>
SH;
2214 dest16[1][
i] =
B >>
SH;
2215 dest16[2][
i] =
R >>
SH;
2217 dest16[3][
i] =
A >> (
SH - 3);
2219 dest[0][
i] =
G >> 22;
2220 dest[1][
i] =
B >> 22;
2221 dest[2][
i] =
R >> 22;
2223 dest[3][
i] =
A >> 19;
2226 if (
SH != 22 && (!
isBE(
c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2227 for (
i = 0;
i < dstW;
i++) {
2239 const int16_t **lumSrcx,
int lumFilterSize,
2240 const int16_t *chrFilter,
const int16_t **chrUSrcx,
2241 const int16_t **chrVSrcx,
int chrFilterSize,
2242 const int16_t **alpSrcx,
uint8_t **dest,
2248 uint16_t **dest16 = (uint16_t**)dest;
2254 for (
i = 0;
i < dstW;
i++) {
2256 int Y = -0x40000000;
2257 int U = -(128 << 23);
2258 int V = -(128 << 23);
2261 for (j = 0; j < lumFilterSize; j++)
2262 Y += lumSrc[j][
i] * (
unsigned)lumFilter[j];
2264 for (j = 0; j < chrFilterSize; j++) {
2265 U += chrUSrc[j][
i] * (unsigned)chrFilter[j];
2266 V += chrVSrc[j][
i] * (unsigned)chrFilter[j];
2277 for (j = 0; j < lumFilterSize; j++)
2278 A += alpSrc[j][
i] * (
unsigned)lumFilter[j];
2284 Y -=
c->yuv2rgb_y_offset;
2285 Y *=
c->yuv2rgb_y_coeff;
2287 R =
V *
c->yuv2rgb_v2r_coeff;
2288 G =
V *
c->yuv2rgb_v2g_coeff +
U *
c->yuv2rgb_u2g_coeff;
2289 B =
U *
c->yuv2rgb_u2b_coeff;
2291 R = av_clip_uintp2(
Y +
R, 30);
2292 G = av_clip_uintp2(
Y +
G, 30);
2293 B = av_clip_uintp2(
Y +
B, 30);
2295 dest16[0][
i] =
G >> 14;
2296 dest16[1][
i] =
B >> 14;
2297 dest16[2][
i] =
R >> 14;
2299 dest16[3][
i] = av_clip_uintp2(
A, 30) >> 14;
2301 if ((!
isBE(
c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2302 for (
i = 0;
i < dstW;
i++) {
2314 const int16_t *ubuf[2],
const int16_t *vbuf[2],
2315 const int16_t *abuf0,
uint8_t *dest,
int dstW,
2318 int hasAlpha = !!abuf0;
2321 for (
i = 0;
i < dstW;
i++) {
2322 int Y = (buf0[
i] + 64) >> 7;
2325 Y = av_clip_uint8(
Y);
2328 A = (abuf0[
i] + 64) >> 7;
2330 A = av_clip_uint8(
A);
2334 dest[
i * 2 + 1] = hasAlpha ?
A : 255;
2340 const int16_t *ubuf[2],
const int16_t *vbuf[2],
2341 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
2342 int yalpha,
int uvalpha,
int y)
2344 int hasAlpha = abuf && abuf[0] && abuf[1];
2345 const int16_t *buf0 =
buf[0], *buf1 =
buf[1],
2346 *abuf0 = hasAlpha ? abuf[0] :
NULL,
2347 *abuf1 = hasAlpha ? abuf[1] :
NULL;
2348 int yalpha1 = 4096 - yalpha;
2353 for (
i = 0;
i < dstW;
i++) {
2354 int Y = (buf0[
i] * yalpha1 + buf1[
i] * yalpha) >> 19;
2357 Y = av_clip_uint8(
Y);
2360 A = (abuf0[
i] * yalpha1 + abuf1[
i] * yalpha) >> 19;
2361 A = av_clip_uint8(
A);
2365 dest[
i * 2 + 1] = hasAlpha ?
A : 255;
2371 const int16_t **lumSrc,
int lumFilterSize,
2372 const int16_t *chrFilter,
const int16_t **chrUSrc,
2373 const int16_t **chrVSrc,
int chrFilterSize,
2374 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
int y)
2376 int hasAlpha = !!alpSrc;
2379 for (
i = 0;
i < dstW;
i++) {
2381 int Y = 1 << 18,
A = 1 << 18;
2383 for (j = 0; j < lumFilterSize; j++)
2384 Y += lumSrc[j][
i] * lumFilter[j];
2388 Y = av_clip_uint8(
Y);
2391 for (j = 0; j < lumFilterSize; j++)
2392 A += alpSrc[j][
i] * lumFilter[j];
2397 A = av_clip_uint8(
A);
2401 dest[2 *
i + 1] = hasAlpha ?
A : 255;
2407 const int16_t **_lumSrc,
int lumFilterSize,
2408 const int16_t *chrFilter,
const int16_t **_chrUSrc,
2409 const int16_t **_chrVSrc,
int chrFilterSize,
2410 const int16_t **_alpSrc,
uint8_t *dest,
int dstW,
int y)
2413 **chrUSrc = (
const int32_t **) _chrUSrc,
2414 **chrVSrc = (
const int32_t **) _chrVSrc,
2415 **alpSrc = (
const int32_t **) _alpSrc;
2416 int hasAlpha = !!alpSrc;
2419 for (
i = 0;
i < dstW;
i++) {
2420 int Y = 1 << 14,
U = 1 << 14;
2421 int V = 1 << 14,
A = 1 << 14;
2429 for (j = 0; j < lumFilterSize; j++)
2430 Y += lumSrc[j][
i] * (
unsigned)lumFilter[j];
2432 for (j = 0; j < chrFilterSize; j++)
2433 U += chrUSrc[j][
i] * (
unsigned)chrFilter[j];
2435 for (j = 0; j < chrFilterSize; j++)
2436 V += chrVSrc[j][
i] * (
unsigned)chrFilter[j];
2439 for (j = 0; j < lumFilterSize; j++)
2440 A += alpSrc[j][
i] * (
unsigned)lumFilter[j];
2442 Y = 0x8000 + av_clip_int16(
Y >> 15);
2443 U = 0x8000 + av_clip_int16(
U >> 15);
2444 V = 0x8000 + av_clip_int16(
V >> 15);
2445 A = 0x8000 + av_clip_int16(
A >> 15);
2447 AV_WL16(dest + 8 *
i, hasAlpha ?
A : 65535);
2470 }
else if (
is16BPS(dstFormat)) {
2471 *
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
2472 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
2476 }
else if (
isNBPS(dstFormat)) {
2477 if (
desc->comp[0].depth == 9) {
2478 *
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
2479 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
2480 }
else if (
desc->comp[0].depth == 10) {
2481 *
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
2482 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
2483 }
else if (
desc->comp[0].depth == 12) {
2484 *
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
2485 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
2486 }
else if (
desc->comp[0].depth == 14) {
2487 *
yuv2planeX =
isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
2488 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
2493 *yuv2plane1 = yuv2plane1_floatBE_c;
2496 *yuv2plane1 = yuv2plane1_floatLE_c;
2506 switch (dstFormat) {
2509 *yuv2packedX = yuv2rgba32_full_X_c;
2510 *yuv2packed2 = yuv2rgba32_full_2_c;
2511 *yuv2packed1 = yuv2rgba32_full_1_c;
2513 #if CONFIG_SWSCALE_ALPHA
2515 *yuv2packedX = yuv2rgba32_full_X_c;
2516 *yuv2packed2 = yuv2rgba32_full_2_c;
2517 *yuv2packed1 = yuv2rgba32_full_1_c;
2521 *yuv2packedX = yuv2rgbx32_full_X_c;
2522 *yuv2packed2 = yuv2rgbx32_full_2_c;
2523 *yuv2packed1 = yuv2rgbx32_full_1_c;
2529 *yuv2packedX = yuv2argb32_full_X_c;
2530 *yuv2packed2 = yuv2argb32_full_2_c;
2531 *yuv2packed1 = yuv2argb32_full_1_c;
2533 #if CONFIG_SWSCALE_ALPHA
2535 *yuv2packedX = yuv2argb32_full_X_c;
2536 *yuv2packed2 = yuv2argb32_full_2_c;
2537 *yuv2packed1 = yuv2argb32_full_1_c;
2541 *yuv2packedX = yuv2xrgb32_full_X_c;
2542 *yuv2packed2 = yuv2xrgb32_full_2_c;
2543 *yuv2packed1 = yuv2xrgb32_full_1_c;
2549 *yuv2packedX = yuv2bgra32_full_X_c;
2550 *yuv2packed2 = yuv2bgra32_full_2_c;
2551 *yuv2packed1 = yuv2bgra32_full_1_c;
2553 #if CONFIG_SWSCALE_ALPHA
2555 *yuv2packedX = yuv2bgra32_full_X_c;
2556 *yuv2packed2 = yuv2bgra32_full_2_c;
2557 *yuv2packed1 = yuv2bgra32_full_1_c;
2561 *yuv2packedX = yuv2bgrx32_full_X_c;
2562 *yuv2packed2 = yuv2bgrx32_full_2_c;
2563 *yuv2packed1 = yuv2bgrx32_full_1_c;
2569 *yuv2packedX = yuv2abgr32_full_X_c;
2570 *yuv2packed2 = yuv2abgr32_full_2_c;
2571 *yuv2packed1 = yuv2abgr32_full_1_c;
2573 #if CONFIG_SWSCALE_ALPHA
2575 *yuv2packedX = yuv2abgr32_full_X_c;
2576 *yuv2packed2 = yuv2abgr32_full_2_c;
2577 *yuv2packed1 = yuv2abgr32_full_1_c;
2581 *yuv2packedX = yuv2xbgr32_full_X_c;
2582 *yuv2packed2 = yuv2xbgr32_full_2_c;
2583 *yuv2packed1 = yuv2xbgr32_full_1_c;
2588 #if CONFIG_SWSCALE_ALPHA
2590 *yuv2packedX = yuv2rgba64le_full_X_c;
2591 *yuv2packed2 = yuv2rgba64le_full_2_c;
2592 *yuv2packed1 = yuv2rgba64le_full_1_c;
2596 *yuv2packedX = yuv2rgbx64le_full_X_c;
2597 *yuv2packed2 = yuv2rgbx64le_full_2_c;
2598 *yuv2packed1 = yuv2rgbx64le_full_1_c;
2602 #if CONFIG_SWSCALE_ALPHA
2604 *yuv2packedX = yuv2rgba64be_full_X_c;
2605 *yuv2packed2 = yuv2rgba64be_full_2_c;
2606 *yuv2packed1 = yuv2rgba64be_full_1_c;
2610 *yuv2packedX = yuv2rgbx64be_full_X_c;
2611 *yuv2packed2 = yuv2rgbx64be_full_2_c;
2612 *yuv2packed1 = yuv2rgbx64be_full_1_c;
2616 #if CONFIG_SWSCALE_ALPHA
2618 *yuv2packedX = yuv2bgra64le_full_X_c;
2619 *yuv2packed2 = yuv2bgra64le_full_2_c;
2620 *yuv2packed1 = yuv2bgra64le_full_1_c;
2624 *yuv2packedX = yuv2bgrx64le_full_X_c;
2625 *yuv2packed2 = yuv2bgrx64le_full_2_c;
2626 *yuv2packed1 = yuv2bgrx64le_full_1_c;
2630 #if CONFIG_SWSCALE_ALPHA
2632 *yuv2packedX = yuv2bgra64be_full_X_c;
2633 *yuv2packed2 = yuv2bgra64be_full_2_c;
2634 *yuv2packed1 = yuv2bgra64be_full_1_c;
2638 *yuv2packedX = yuv2bgrx64be_full_X_c;
2639 *yuv2packed2 = yuv2bgrx64be_full_2_c;
2640 *yuv2packed1 = yuv2bgrx64be_full_1_c;
2645 *yuv2packedX = yuv2rgb24_full_X_c;
2646 *yuv2packed2 = yuv2rgb24_full_2_c;
2647 *yuv2packed1 = yuv2rgb24_full_1_c;
2650 *yuv2packedX = yuv2bgr24_full_X_c;
2651 *yuv2packed2 = yuv2bgr24_full_2_c;
2652 *yuv2packed1 = yuv2bgr24_full_1_c;
2655 *yuv2packedX = yuv2rgb48le_full_X_c;
2656 *yuv2packed2 = yuv2rgb48le_full_2_c;
2657 *yuv2packed1 = yuv2rgb48le_full_1_c;
2660 *yuv2packedX = yuv2bgr48le_full_X_c;
2661 *yuv2packed2 = yuv2bgr48le_full_2_c;
2662 *yuv2packed1 = yuv2bgr48le_full_1_c;
2665 *yuv2packedX = yuv2rgb48be_full_X_c;
2666 *yuv2packed2 = yuv2rgb48be_full_2_c;
2667 *yuv2packed1 = yuv2rgb48be_full_1_c;
2670 *yuv2packedX = yuv2bgr48be_full_X_c;
2671 *yuv2packed2 = yuv2bgr48be_full_2_c;
2672 *yuv2packed1 = yuv2bgr48be_full_1_c;
2675 *yuv2packedX = yuv2bgr4_byte_full_X_c;
2676 *yuv2packed2 = yuv2bgr4_byte_full_2_c;
2677 *yuv2packed1 = yuv2bgr4_byte_full_1_c;
2680 *yuv2packedX = yuv2rgb4_byte_full_X_c;
2681 *yuv2packed2 = yuv2rgb4_byte_full_2_c;
2682 *yuv2packed1 = yuv2rgb4_byte_full_1_c;
2685 *yuv2packedX = yuv2bgr8_full_X_c;
2686 *yuv2packed2 = yuv2bgr8_full_2_c;
2687 *yuv2packed1 = yuv2bgr8_full_1_c;
2690 *yuv2packedX = yuv2rgb8_full_X_c;
2691 *yuv2packed2 = yuv2rgb8_full_2_c;
2692 *yuv2packed1 = yuv2rgb8_full_1_c;
2717 if (!*yuv2packedX && !*yuv2anyX)
2721 switch (dstFormat) {
2723 #if CONFIG_SWSCALE_ALPHA
2725 *yuv2packed1 = yuv2rgba64le_1_c;
2726 *yuv2packed2 = yuv2rgba64le_2_c;
2727 *yuv2packedX = yuv2rgba64le_X_c;
2731 *yuv2packed1 = yuv2rgbx64le_1_c;
2732 *yuv2packed2 = yuv2rgbx64le_2_c;
2733 *yuv2packedX = yuv2rgbx64le_X_c;
2737 #if CONFIG_SWSCALE_ALPHA
2739 *yuv2packed1 = yuv2rgba64be_1_c;
2740 *yuv2packed2 = yuv2rgba64be_2_c;
2741 *yuv2packedX = yuv2rgba64be_X_c;
2745 *yuv2packed1 = yuv2rgbx64be_1_c;
2746 *yuv2packed2 = yuv2rgbx64be_2_c;
2747 *yuv2packedX = yuv2rgbx64be_X_c;
2751 #if CONFIG_SWSCALE_ALPHA
2753 *yuv2packed1 = yuv2bgra64le_1_c;
2754 *yuv2packed2 = yuv2bgra64le_2_c;
2755 *yuv2packedX = yuv2bgra64le_X_c;
2759 *yuv2packed1 = yuv2bgrx64le_1_c;
2760 *yuv2packed2 = yuv2bgrx64le_2_c;
2761 *yuv2packedX = yuv2bgrx64le_X_c;
2765 #if CONFIG_SWSCALE_ALPHA
2767 *yuv2packed1 = yuv2bgra64be_1_c;
2768 *yuv2packed2 = yuv2bgra64be_2_c;
2769 *yuv2packedX = yuv2bgra64be_X_c;
2773 *yuv2packed1 = yuv2bgrx64be_1_c;
2774 *yuv2packed2 = yuv2bgrx64be_2_c;
2775 *yuv2packedX = yuv2bgrx64be_X_c;
2779 *yuv2packed1 = yuv2rgb48le_1_c;
2780 *yuv2packed2 = yuv2rgb48le_2_c;
2781 *yuv2packedX = yuv2rgb48le_X_c;
2784 *yuv2packed1 = yuv2rgb48be_1_c;
2785 *yuv2packed2 = yuv2rgb48be_2_c;
2786 *yuv2packedX = yuv2rgb48be_X_c;
2789 *yuv2packed1 = yuv2bgr48le_1_c;
2790 *yuv2packed2 = yuv2bgr48le_2_c;
2791 *yuv2packedX = yuv2bgr48le_X_c;
2794 *yuv2packed1 = yuv2bgr48be_1_c;
2795 *yuv2packed2 = yuv2bgr48be_2_c;
2796 *yuv2packedX = yuv2bgr48be_X_c;
2801 *yuv2packed1 = yuv2rgb32_1_c;
2802 *yuv2packed2 = yuv2rgb32_2_c;
2803 *yuv2packedX = yuv2rgb32_X_c;
2805 #if CONFIG_SWSCALE_ALPHA
2807 *yuv2packed1 = yuv2rgba32_1_c;
2808 *yuv2packed2 = yuv2rgba32_2_c;
2809 *yuv2packedX = yuv2rgba32_X_c;
2813 *yuv2packed1 = yuv2rgbx32_1_c;
2814 *yuv2packed2 = yuv2rgbx32_2_c;
2815 *yuv2packedX = yuv2rgbx32_X_c;
2822 *yuv2packed1 = yuv2rgb32_1_1_c;
2823 *yuv2packed2 = yuv2rgb32_1_2_c;
2824 *yuv2packedX = yuv2rgb32_1_X_c;
2826 #if CONFIG_SWSCALE_ALPHA
2828 *yuv2packed1 = yuv2rgba32_1_1_c;
2829 *yuv2packed2 = yuv2rgba32_1_2_c;
2830 *yuv2packedX = yuv2rgba32_1_X_c;
2834 *yuv2packed1 = yuv2rgbx32_1_1_c;
2835 *yuv2packed2 = yuv2rgbx32_1_2_c;
2836 *yuv2packedX = yuv2rgbx32_1_X_c;
2841 *yuv2packed1 = yuv2rgb24_1_c;
2842 *yuv2packed2 = yuv2rgb24_2_c;
2843 *yuv2packedX = yuv2rgb24_X_c;
2846 *yuv2packed1 = yuv2bgr24_1_c;
2847 *yuv2packed2 = yuv2bgr24_2_c;
2848 *yuv2packedX = yuv2bgr24_X_c;
2854 *yuv2packed1 = yuv2rgb16_1_c;
2855 *yuv2packed2 = yuv2rgb16_2_c;
2856 *yuv2packedX = yuv2rgb16_X_c;
2862 *yuv2packed1 = yuv2rgb15_1_c;
2863 *yuv2packed2 = yuv2rgb15_2_c;
2864 *yuv2packedX = yuv2rgb15_X_c;
2870 *yuv2packed1 = yuv2rgb12_1_c;
2871 *yuv2packed2 = yuv2rgb12_2_c;
2872 *yuv2packedX = yuv2rgb12_X_c;
2876 *yuv2packed1 = yuv2rgb8_1_c;
2877 *yuv2packed2 = yuv2rgb8_2_c;
2878 *yuv2packedX = yuv2rgb8_X_c;
2882 *yuv2packed1 = yuv2rgb4_1_c;
2883 *yuv2packed2 = yuv2rgb4_2_c;
2884 *yuv2packedX = yuv2rgb4_X_c;
2888 *yuv2packed1 = yuv2rgb4b_1_c;
2889 *yuv2packed2 = yuv2rgb4b_2_c;
2890 *yuv2packedX = yuv2rgb4b_X_c;
2894 switch (dstFormat) {
2896 *yuv2packed1 = yuv2monowhite_1_c;
2897 *yuv2packed2 = yuv2monowhite_2_c;
2898 *yuv2packedX = yuv2monowhite_X_c;
2901 *yuv2packed1 = yuv2monoblack_1_c;
2902 *yuv2packed2 = yuv2monoblack_2_c;
2903 *yuv2packedX = yuv2monoblack_X_c;
2906 *yuv2packed1 = yuv2yuyv422_1_c;
2907 *yuv2packed2 = yuv2yuyv422_2_c;
2908 *yuv2packedX = yuv2yuyv422_X_c;
2911 *yuv2packed1 = yuv2yvyu422_1_c;
2912 *yuv2packed2 = yuv2yvyu422_2_c;
2913 *yuv2packedX = yuv2yvyu422_X_c;
2916 *yuv2packed1 = yuv2uyvy422_1_c;
2917 *yuv2packed2 = yuv2uyvy422_2_c;
2918 *yuv2packedX = yuv2uyvy422_X_c;
2926 *yuv2packed1 = yuv2ya16le_1_c;
2927 *yuv2packed2 = yuv2ya16le_2_c;
2928 *yuv2packedX = yuv2ya16le_X_c;
2931 *yuv2packed1 = yuv2ya16be_1_c;
2932 *yuv2packed2 = yuv2ya16be_2_c;
2933 *yuv2packedX = yuv2ya16be_X_c;