FFmpeg
output.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <string.h>
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/bswap.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/pixdesc.h"
34 #include "config.h"
35 #include "rgb2rgb.h"
36 #include "swscale.h"
37 #include "swscale_internal.h"
38 
39 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_4)[][8] = {
40 { 1, 3, 1, 3, 1, 3, 1, 3, },
41 { 2, 0, 2, 0, 2, 0, 2, 0, },
42 { 1, 3, 1, 3, 1, 3, 1, 3, },
43 };
44 
45 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_8)[][8] = {
46 { 6, 2, 6, 2, 6, 2, 6, 2, },
47 { 0, 4, 0, 4, 0, 4, 0, 4, },
48 { 6, 2, 6, 2, 6, 2, 6, 2, },
49 };
50 
51 DECLARE_ALIGNED(8, const uint8_t, ff_dither_4x4_16)[][8] = {
52 { 8, 4, 11, 7, 8, 4, 11, 7, },
53 { 2, 14, 1, 13, 2, 14, 1, 13, },
54 { 10, 6, 9, 5, 10, 6, 9, 5, },
55 { 0, 12, 3, 15, 0, 12, 3, 15, },
56 { 8, 4, 11, 7, 8, 4, 11, 7, },
57 };
58 
59 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_32)[][8] = {
60 { 17, 9, 23, 15, 16, 8, 22, 14, },
61 { 5, 29, 3, 27, 4, 28, 2, 26, },
62 { 21, 13, 19, 11, 20, 12, 18, 10, },
63 { 0, 24, 6, 30, 1, 25, 7, 31, },
64 { 16, 8, 22, 14, 17, 9, 23, 15, },
65 { 4, 28, 2, 26, 5, 29, 3, 27, },
66 { 20, 12, 18, 10, 21, 13, 19, 11, },
67 { 1, 25, 7, 31, 0, 24, 6, 30, },
68 { 17, 9, 23, 15, 16, 8, 22, 14, },
69 };
70 
71 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_73)[][8] = {
72 { 0, 55, 14, 68, 3, 58, 17, 72, },
73 { 37, 18, 50, 32, 40, 22, 54, 35, },
74 { 9, 64, 5, 59, 13, 67, 8, 63, },
75 { 46, 27, 41, 23, 49, 31, 44, 26, },
76 { 2, 57, 16, 71, 1, 56, 15, 70, },
77 { 39, 21, 52, 34, 38, 19, 51, 33, },
78 { 11, 66, 7, 62, 10, 65, 6, 60, },
79 { 48, 30, 43, 25, 47, 29, 42, 24, },
80 { 0, 55, 14, 68, 3, 58, 17, 72, },
81 };
82 
83 #if 1
84 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
85 {117, 62, 158, 103, 113, 58, 155, 100, },
86 { 34, 199, 21, 186, 31, 196, 17, 182, },
87 {144, 89, 131, 76, 141, 86, 127, 72, },
88 { 0, 165, 41, 206, 10, 175, 52, 217, },
89 {110, 55, 151, 96, 120, 65, 162, 107, },
90 { 28, 193, 14, 179, 38, 203, 24, 189, },
91 {138, 83, 124, 69, 148, 93, 134, 79, },
92 { 7, 172, 48, 213, 3, 168, 45, 210, },
93 {117, 62, 158, 103, 113, 58, 155, 100, },
94 };
95 #elif 1
96 // tries to correct a gamma of 1.5
97 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
98 { 0, 143, 18, 200, 2, 156, 25, 215, },
99 { 78, 28, 125, 64, 89, 36, 138, 74, },
100 { 10, 180, 3, 161, 16, 195, 8, 175, },
101 {109, 51, 93, 38, 121, 60, 105, 47, },
102 { 1, 152, 23, 210, 0, 147, 20, 205, },
103 { 85, 33, 134, 71, 81, 30, 130, 67, },
104 { 14, 190, 6, 171, 12, 185, 5, 166, },
105 {117, 57, 101, 44, 113, 54, 97, 41, },
106 { 0, 143, 18, 200, 2, 156, 25, 215, },
107 };
108 #elif 1
109 // tries to correct a gamma of 2.0
110 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
111 { 0, 124, 8, 193, 0, 140, 12, 213, },
112 { 55, 14, 104, 42, 66, 19, 119, 52, },
113 { 3, 168, 1, 145, 6, 187, 3, 162, },
114 { 86, 31, 70, 21, 99, 39, 82, 28, },
115 { 0, 134, 11, 206, 0, 129, 9, 200, },
116 { 62, 17, 114, 48, 58, 16, 109, 45, },
117 { 5, 181, 2, 157, 4, 175, 1, 151, },
118 { 95, 36, 78, 26, 90, 34, 74, 24, },
119 { 0, 124, 8, 193, 0, 140, 12, 213, },
120 };
121 #else
122 // tries to correct a gamma of 2.5
123 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
124 { 0, 107, 3, 187, 0, 125, 6, 212, },
125 { 39, 7, 86, 28, 49, 11, 102, 36, },
126 { 1, 158, 0, 131, 3, 180, 1, 151, },
127 { 68, 19, 52, 12, 81, 25, 64, 17, },
128 { 0, 119, 5, 203, 0, 113, 4, 195, },
129 { 45, 9, 96, 33, 42, 8, 91, 30, },
130 { 2, 172, 1, 144, 2, 165, 0, 137, },
131 { 77, 23, 60, 15, 72, 21, 56, 14, },
132 { 0, 107, 3, 187, 0, 125, 6, 212, },
133 };
134 #endif
135 
136 #define IS_BE_LE 0
137 #define IS_BE_BE 1
138 /* ENDIAN_IDENTIFIER needs to be "BE" or "LE". */
139 #define IS_BE(ENDIAN_IDENTIFIER) IS_BE_ ## ENDIAN_IDENTIFIER
140 
141 #define output_pixel(pos, val, bias, signedness) \
142  if (big_endian) { \
143  AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
144  } else { \
145  AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
146  }
147 
148 static av_always_inline void
149 yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW,
150  int big_endian, int output_bits)
151 {
152  int i;
153  int shift = 3;
154  av_assert0(output_bits == 16);
155 
156  for (i = 0; i < dstW; i++) {
157  int val = src[i] + (1 << (shift - 1));
158  output_pixel(&dest[i], val, 0, uint);
159  }
160 }
161 
162 static av_always_inline void
163 yuv2planeX_16_c_template(const int16_t *filter, int filterSize,
164  const int32_t **src, uint16_t *dest, int dstW,
165  int big_endian, int output_bits)
166 {
167  int i;
168  int shift = 15;
169  av_assert0(output_bits == 16);
170 
171  for (i = 0; i < dstW; i++) {
172  int val = 1 << (shift - 1);
173  int j;
174 
175  /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
176  * filters (or anything with negative coeffs, the range can be slightly
177  * wider in both directions. To account for this overflow, we subtract
178  * a constant so it always fits in the signed range (assuming a
179  * reasonable filterSize), and re-add that at the end. */
180  val -= 0x40000000;
181  for (j = 0; j < filterSize; j++)
182  val += src[j][i] * (unsigned)filter[j];
183 
184  output_pixel(&dest[i], val, 0x8000, int);
185  }
186 }
187 
188 static av_always_inline void
189 yuv2nv12cX_16_c_template(int big_endian, const uint8_t *chrDither,
190  const int16_t *chrFilter, int chrFilterSize,
191  const int16_t **chrUSrc, const int16_t **chrVSrc,
192  uint8_t *dest8, int chrDstW, int output_bits)
193 {
194  uint16_t *dest = (uint16_t*)dest8;
195  const int32_t **uSrc = (const int32_t **)chrUSrc;
196  const int32_t **vSrc = (const int32_t **)chrVSrc;
197  int shift = 15;
198  int i, j;
199  av_assert0(output_bits == 16);
200 
201  for (i = 0; i < chrDstW; i++) {
202  int u = 1 << (shift - 1);
203  int v = 1 << (shift - 1);
204 
205  /* See yuv2planeX_16_c_template for details. */
206  u -= 0x40000000;
207  v -= 0x40000000;
208  for (j = 0; j < chrFilterSize; j++) {
209  u += uSrc[j][i] * (unsigned)chrFilter[j];
210  v += vSrc[j][i] * (unsigned)chrFilter[j];
211  }
212 
213  output_pixel(&dest[2*i] , u, 0x8000, int);
214  output_pixel(&dest[2*i+1], v, 0x8000, int);
215  }
216 }
217 
218 static av_always_inline void
219 yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
220 {
221  static const int big_endian = HAVE_BIGENDIAN;
222  static const int shift = 3;
223  static const float float_mult = 1.0f / 65535.0f;
224  int i, val;
225  uint16_t val_uint;
226 
227  for (i = 0; i < dstW; ++i){
228  val = src[i] + (1 << (shift - 1));
229  output_pixel(&val_uint, val, 0, uint);
230  dest[i] = float_mult * (float)val_uint;
231  }
232 }
233 
234 static av_always_inline void
235 yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
236 {
237  static const int big_endian = HAVE_BIGENDIAN;
238  static const int shift = 3;
239  static const float float_mult = 1.0f / 65535.0f;
240  int i, val;
241  uint16_t val_uint;
242 
243  for (i = 0; i < dstW; ++i){
244  val = src[i] + (1 << (shift - 1));
245  output_pixel(&val_uint, val, 0, uint);
246  dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
247  }
248 }
249 
250 static av_always_inline void
251 yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src,
252  float *dest, int dstW)
253 {
254  static const int big_endian = HAVE_BIGENDIAN;
255  static const int shift = 15;
256  static const float float_mult = 1.0f / 65535.0f;
257  int i, j, val;
258  uint16_t val_uint;
259 
260  for (i = 0; i < dstW; ++i){
261  val = (1 << (shift - 1)) - 0x40000000;
262  for (j = 0; j < filterSize; ++j){
263  val += src[j][i] * (unsigned)filter[j];
264  }
265  output_pixel(&val_uint, val, 0x8000, int);
266  dest[i] = float_mult * (float)val_uint;
267  }
268 }
269 
270 static av_always_inline void
271 yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src,
272  uint32_t *dest, int dstW)
273 {
274  static const int big_endian = HAVE_BIGENDIAN;
275  static const int shift = 15;
276  static const float float_mult = 1.0f / 65535.0f;
277  int i, j, val;
278  uint16_t val_uint;
279 
280  for (i = 0; i < dstW; ++i){
281  val = (1 << (shift - 1)) - 0x40000000;
282  for (j = 0; j < filterSize; ++j){
283  val += src[j][i] * (unsigned)filter[j];
284  }
285  output_pixel(&val_uint, val, 0x8000, int);
286  dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
287  }
288 }
289 
290 #define yuv2plane1_float(template, dest_type, BE_LE) \
291 static void yuv2plane1_float ## BE_LE ## _c(const int16_t *src, uint8_t *dest, int dstW, \
292  const uint8_t *dither, int offset) \
293 { \
294  template((const int32_t *)src, (dest_type *)dest, dstW); \
295 }
296 
297 #define yuv2planeX_float(template, dest_type, BE_LE) \
298 static void yuv2planeX_float ## BE_LE ## _c(const int16_t *filter, int filterSize, \
299  const int16_t **src, uint8_t *dest, int dstW, \
300  const uint8_t *dither, int offset) \
301 { \
302  template(filter, filterSize, (const int32_t **)src, (dest_type *)dest, dstW); \
303 }
304 
305 #if HAVE_BIGENDIAN
310 #else
315 #endif
316 
317 #undef output_pixel
318 
319 #define output_pixel(pos, val) \
320  if (big_endian) { \
321  AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
322  } else { \
323  AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
324  }
325 
326 static av_always_inline void
327 yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW,
328  int big_endian, int output_bits)
329 {
330  int i;
331  int shift = 15 - output_bits;
332 
333  for (i = 0; i < dstW; i++) {
334  int val = src[i] + (1 << (shift - 1));
335  output_pixel(&dest[i], val);
336  }
337 }
338 
339 static av_always_inline void
340 yuv2planeX_10_c_template(const int16_t *filter, int filterSize,
341  const int16_t **src, uint16_t *dest, int dstW,
342  int big_endian, int output_bits)
343 {
344  int i;
345  int shift = 11 + 16 - output_bits;
346 
347  for (i = 0; i < dstW; i++) {
348  int val = 1 << (shift - 1);
349  int j;
350 
351  for (j = 0; j < filterSize; j++)
352  val += src[j][i] * filter[j];
353 
354  output_pixel(&dest[i], val);
355  }
356 }
357 
358 #undef output_pixel
359 
360 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
361 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
362  uint8_t *dest, int dstW, \
363  const uint8_t *dither, int offset)\
364 { \
365  yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
366  (uint16_t *) dest, dstW, is_be, bits); \
367 }\
368 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
369  const int16_t **src, uint8_t *dest, int dstW, \
370  const uint8_t *dither, int offset)\
371 { \
372  yuv2planeX_## template_size ## _c_template(filter, \
373  filterSize, (const typeX_t **) src, \
374  (uint16_t *) dest, dstW, is_be, bits); \
375 }
376 
377 yuv2NBPS( 9, BE, 1, 10, int16_t)
378 yuv2NBPS( 9, LE, 0, 10, int16_t)
379 yuv2NBPS(10, BE, 1, 10, int16_t)
380 yuv2NBPS(10, LE, 0, 10, int16_t)
381 yuv2NBPS(12, BE, 1, 10, int16_t)
382 yuv2NBPS(12, LE, 0, 10, int16_t)
383 yuv2NBPS(14, BE, 1, 10, int16_t)
384 yuv2NBPS(14, LE, 0, 10, int16_t)
385 yuv2NBPS(16, BE, 1, 16, int32_t)
386 yuv2NBPS(16, LE, 0, 16, int32_t)
387 
388 
389 static void yuv2nv12cX_16LE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
390  const int16_t *chrFilter, int chrFilterSize,
391  const int16_t **chrUSrc, const int16_t **chrVSrc,
392  uint8_t *dest8, int chrDstW)
393 {
394  yuv2nv12cX_16_c_template(0, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, dest8, chrDstW, 16);
395 }
396 
397 static void yuv2nv12cX_16BE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
398  const int16_t *chrFilter, int chrFilterSize,
399  const int16_t **chrUSrc, const int16_t **chrVSrc,
400  uint8_t *dest8, int chrDstW)
401 {
402  yuv2nv12cX_16_c_template(1, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, dest8, chrDstW, 16);
403 }
404 
405 static void yuv2planeX_8_c(const int16_t *filter, int filterSize,
406  const int16_t **src, uint8_t *dest, int dstW,
407  const uint8_t *dither, int offset)
408 {
409  int i;
410  for (i=0; i<dstW; i++) {
411  int val = dither[(i + offset) & 7] << 12;
412  int j;
413  for (j=0; j<filterSize; j++)
414  val += src[j][i] * filter[j];
415 
416  dest[i]= av_clip_uint8(val>>19);
417  }
418 }
419 
420 static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW,
421  const uint8_t *dither, int offset)
422 {
423  int i;
424  for (i=0; i<dstW; i++) {
425  int val = (src[i] + dither[(i + offset) & 7]) >> 7;
426  dest[i]= av_clip_uint8(val);
427  }
428 }
429 
430 static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
431  const int16_t *chrFilter, int chrFilterSize,
432  const int16_t **chrUSrc, const int16_t **chrVSrc,
433  uint8_t *dest, int chrDstW)
434 {
435  int i;
436 
437  if (!isSwappedChroma(dstFormat))
438  for (i=0; i<chrDstW; i++) {
439  int u = chrDither[i & 7] << 12;
440  int v = chrDither[(i + 3) & 7] << 12;
441  int j;
442  for (j=0; j<chrFilterSize; j++) {
443  u += chrUSrc[j][i] * chrFilter[j];
444  v += chrVSrc[j][i] * chrFilter[j];
445  }
446 
447  dest[2*i]= av_clip_uint8(u>>19);
448  dest[2*i+1]= av_clip_uint8(v>>19);
449  }
450  else
451  for (i=0; i<chrDstW; i++) {
452  int u = chrDither[i & 7] << 12;
453  int v = chrDither[(i + 3) & 7] << 12;
454  int j;
455  for (j=0; j<chrFilterSize; j++) {
456  u += chrUSrc[j][i] * chrFilter[j];
457  v += chrVSrc[j][i] * chrFilter[j];
458  }
459 
460  dest[2*i]= av_clip_uint8(v>>19);
461  dest[2*i+1]= av_clip_uint8(u>>19);
462  }
463 }
464 
465 
466 #define output_pixel(pos, val) \
467  if (big_endian) { \
468  AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits) << output_shift); \
469  } else { \
470  AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits) << output_shift); \
471  }
472 
473 static void yuv2p01xl1_c(const int16_t *src,
474  uint16_t *dest, int dstW,
475  int big_endian, int output_bits)
476 {
477  int i;
478  int shift = 15 - output_bits;
479  int output_shift = 16 - output_bits;
480 
481  for (i = 0; i < dstW; i++) {
482  int val = src[i] + (1 << (shift - 1));
483  output_pixel(&dest[i], val);
484  }
485 }
486 
487 static void yuv2p01xlX_c(const int16_t *filter, int filterSize,
488  const int16_t **src, uint16_t *dest, int dstW,
489  int big_endian, int output_bits)
490 {
491  int i, j;
492  int shift = 11 + 16 - output_bits;
493  int output_shift = 16 - output_bits;
494 
495  for (i = 0; i < dstW; i++) {
496  int val = 1 << (shift - 1);
497 
498  for (j = 0; j < filterSize; j++)
499  val += src[j][i] * filter[j];
500 
501  output_pixel(&dest[i], val);
502  }
503 }
504 
505 static void yuv2p01xcX_c(int big_endian, const uint8_t *chrDither,
506  const int16_t *chrFilter, int chrFilterSize,
507  const int16_t **chrUSrc, const int16_t **chrVSrc,
508  uint8_t *dest8, int chrDstW, int output_bits)
509 {
510  uint16_t *dest = (uint16_t*)dest8;
511  int i, j;
512  int shift = 11 + 16 - output_bits;
513  int output_shift = 16 - output_bits;
514 
515  for (i = 0; i < chrDstW; i++) {
516  int u = 1 << (shift - 1);
517  int v = 1 << (shift - 1);
518 
519  for (j = 0; j < chrFilterSize; j++) {
520  u += chrUSrc[j][i] * chrFilter[j];
521  v += chrVSrc[j][i] * chrFilter[j];
522  }
523 
524  output_pixel(&dest[2*i] , u);
525  output_pixel(&dest[2*i+1], v);
526  }
527 }
528 
529 #undef output_pixel
530 
531 #define yuv2p01x_wrapper(bits) \
532  static void yuv2p0 ## bits ## l1_LE_c(const int16_t *src, \
533  uint8_t *dest, int dstW, \
534  const uint8_t *dither, int offset) \
535  { \
536  yuv2p01xl1_c(src, (uint16_t*)dest, dstW, 0, bits); \
537  } \
538  \
539  static void yuv2p0 ## bits ## l1_BE_c(const int16_t *src, \
540  uint8_t *dest, int dstW, \
541  const uint8_t *dither, int offset) \
542  { \
543  yuv2p01xl1_c(src, (uint16_t*)dest, dstW, 1, bits); \
544  } \
545  \
546  static void yuv2p0 ## bits ## lX_LE_c(const int16_t *filter, \
547  int filterSize, const int16_t **src, \
548  uint8_t *dest, int dstW, \
549  const uint8_t *dither, int offset) \
550  { \
551  yuv2p01xlX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 0, bits); \
552  } \
553  \
554  static void yuv2p0 ## bits ## lX_BE_c(const int16_t *filter, \
555  int filterSize, const int16_t **src, \
556  uint8_t *dest, int dstW, \
557  const uint8_t *dither, int offset) \
558  { \
559  yuv2p01xlX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 1, bits); \
560  } \
561  \
562  static void yuv2p0 ## bits ## cX_LE_c(enum AVPixelFormat dstFormat, \
563  const uint8_t *chrDither, \
564  const int16_t *chrFilter, \
565  int chrFilterSize, \
566  const int16_t **chrUSrc, \
567  const int16_t **chrVSrc, \
568  uint8_t *dest8, int chrDstW) \
569  { \
570  yuv2p01xcX_c(0, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, \
571  dest8, chrDstW, bits); \
572  } \
573  \
574  static void yuv2p0 ## bits ## cX_BE_c(enum AVPixelFormat dstFormat, \
575  const uint8_t *chrDither, \
576  const int16_t *chrFilter, \
577  int chrFilterSize, \
578  const int16_t **chrUSrc, \
579  const int16_t **chrVSrc, \
580  uint8_t *dest8, int chrDstW) \
581  { \
582  yuv2p01xcX_c(1, chrDither, chrFilter, chrFilterSize, chrUSrc, chrVSrc, \
583  dest8, chrDstW, bits); \
584  }
585 
588 
589 #define accumulate_bit(acc, val) \
590  acc <<= 1; \
591  acc |= (val) >= 234
592 #define output_pixel(pos, acc) \
593  if (target == AV_PIX_FMT_MONOBLACK) { \
594  pos = acc; \
595  } else { \
596  pos = ~acc; \
597  }
598 
599 static av_always_inline void
600 yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
601  const int16_t **lumSrc, int lumFilterSize,
602  const int16_t *chrFilter, const int16_t **chrUSrc,
603  const int16_t **chrVSrc, int chrFilterSize,
604  const int16_t **alpSrc, uint8_t *dest, int dstW,
605  int y, enum AVPixelFormat target)
606 {
607  const uint8_t * const d128 = ff_dither_8x8_220[y&7];
608  int i;
609  unsigned acc = 0;
610  int err = 0;
611 
612  for (i = 0; i < dstW; i += 2) {
613  int j;
614  int Y1 = 1 << 18;
615  int Y2 = 1 << 18;
616 
617  for (j = 0; j < lumFilterSize; j++) {
618  Y1 += lumSrc[j][i] * lumFilter[j];
619  Y2 += lumSrc[j][i+1] * lumFilter[j];
620  }
621  Y1 >>= 19;
622  Y2 >>= 19;
623  if ((Y1 | Y2) & 0x100) {
624  Y1 = av_clip_uint8(Y1);
625  Y2 = av_clip_uint8(Y2);
626  }
627  if (c->dither == SWS_DITHER_ED) {
628  Y1 += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
629  c->dither_error[0][i] = err;
630  acc = 2*acc + (Y1 >= 128);
631  Y1 -= 220*(acc&1);
632 
633  err = Y2 + ((7*Y1 + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4);
634  c->dither_error[0][i+1] = Y1;
635  acc = 2*acc + (err >= 128);
636  err -= 220*(acc&1);
637  } else {
638  accumulate_bit(acc, Y1 + d128[(i + 0) & 7]);
639  accumulate_bit(acc, Y2 + d128[(i + 1) & 7]);
640  }
641  if ((i & 7) == 6) {
642  output_pixel(*dest++, acc);
643  }
644  }
645  c->dither_error[0][i] = err;
646 
647  if (i & 6) {
648  output_pixel(*dest, acc);
649  }
650 }
651 
652 static av_always_inline void
653 yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
654  const int16_t *ubuf[2], const int16_t *vbuf[2],
655  const int16_t *abuf[2], uint8_t *dest, int dstW,
656  int yalpha, int uvalpha, int y,
657  enum AVPixelFormat target)
658 {
659  const int16_t *buf0 = buf[0], *buf1 = buf[1];
660  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
661  int yalpha1 = 4096 - yalpha;
662  int i;
663  av_assert2(yalpha <= 4096U);
664 
665  if (c->dither == SWS_DITHER_ED) {
666  int err = 0;
667  int acc = 0;
668  for (i = 0; i < dstW; i +=2) {
669  int Y;
670 
671  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
672  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
673  c->dither_error[0][i] = err;
674  acc = 2*acc + (Y >= 128);
675  Y -= 220*(acc&1);
676 
677  err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
678  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
679  c->dither_error[0][i+1] = Y;
680  acc = 2*acc + (err >= 128);
681  err -= 220*(acc&1);
682 
683  if ((i & 7) == 6)
684  output_pixel(*dest++, acc);
685  }
686  c->dither_error[0][i] = err;
687  } else {
688  for (i = 0; i < dstW; i += 8) {
689  int Y, acc = 0;
690 
691  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
692  accumulate_bit(acc, Y + d128[0]);
693  Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
694  accumulate_bit(acc, Y + d128[1]);
695  Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
696  accumulate_bit(acc, Y + d128[2]);
697  Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
698  accumulate_bit(acc, Y + d128[3]);
699  Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
700  accumulate_bit(acc, Y + d128[4]);
701  Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
702  accumulate_bit(acc, Y + d128[5]);
703  Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
704  accumulate_bit(acc, Y + d128[6]);
705  Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
706  accumulate_bit(acc, Y + d128[7]);
707 
708  output_pixel(*dest++, acc);
709  }
710  }
711 }
712 
713 static av_always_inline void
714 yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
715  const int16_t *ubuf[2], const int16_t *vbuf[2],
716  const int16_t *abuf0, uint8_t *dest, int dstW,
717  int uvalpha, int y, enum AVPixelFormat target)
718 {
719  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
720  int i;
721 
722  if (c->dither == SWS_DITHER_ED) {
723  int err = 0;
724  int acc = 0;
725  for (i = 0; i < dstW; i +=2) {
726  int Y;
727 
728  Y = ((buf0[i + 0] + 64) >> 7);
729  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
730  c->dither_error[0][i] = err;
731  acc = 2*acc + (Y >= 128);
732  Y -= 220*(acc&1);
733 
734  err = ((buf0[i + 1] + 64) >> 7);
735  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
736  c->dither_error[0][i+1] = Y;
737  acc = 2*acc + (err >= 128);
738  err -= 220*(acc&1);
739 
740  if ((i & 7) == 6)
741  output_pixel(*dest++, acc);
742  }
743  c->dither_error[0][i] = err;
744  } else {
745  for (i = 0; i < dstW; i += 8) {
746  int acc = 0;
747  accumulate_bit(acc, ((buf0[i + 0] + 64) >> 7) + d128[0]);
748  accumulate_bit(acc, ((buf0[i + 1] + 64) >> 7) + d128[1]);
749  accumulate_bit(acc, ((buf0[i + 2] + 64) >> 7) + d128[2]);
750  accumulate_bit(acc, ((buf0[i + 3] + 64) >> 7) + d128[3]);
751  accumulate_bit(acc, ((buf0[i + 4] + 64) >> 7) + d128[4]);
752  accumulate_bit(acc, ((buf0[i + 5] + 64) >> 7) + d128[5]);
753  accumulate_bit(acc, ((buf0[i + 6] + 64) >> 7) + d128[6]);
754  accumulate_bit(acc, ((buf0[i + 7] + 64) >> 7) + d128[7]);
755 
756  output_pixel(*dest++, acc);
757  }
758  }
759 }
760 
761 #undef output_pixel
762 #undef accumulate_bit
763 
764 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
765 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
766  const int16_t **lumSrc, int lumFilterSize, \
767  const int16_t *chrFilter, const int16_t **chrUSrc, \
768  const int16_t **chrVSrc, int chrFilterSize, \
769  const int16_t **alpSrc, uint8_t *dest, int dstW, \
770  int y) \
771 { \
772  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
773  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
774  alpSrc, dest, dstW, y, fmt); \
775 } \
776  \
777 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
778  const int16_t *ubuf[2], const int16_t *vbuf[2], \
779  const int16_t *abuf[2], uint8_t *dest, int dstW, \
780  int yalpha, int uvalpha, int y) \
781 { \
782  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
783  dest, dstW, yalpha, uvalpha, y, fmt); \
784 } \
785  \
786 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
787  const int16_t *ubuf[2], const int16_t *vbuf[2], \
788  const int16_t *abuf0, uint8_t *dest, int dstW, \
789  int uvalpha, int y) \
790 { \
791  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
792  abuf0, dest, dstW, uvalpha, \
793  y, fmt); \
794 }
795 
796 YUV2PACKEDWRAPPER(yuv2mono,, white, AV_PIX_FMT_MONOWHITE)
797 YUV2PACKEDWRAPPER(yuv2mono,, black, AV_PIX_FMT_MONOBLACK)
798 
799 #define output_pixels(pos, Y1, U, Y2, V) \
800  if (target == AV_PIX_FMT_YUYV422) { \
801  dest[pos + 0] = Y1; \
802  dest[pos + 1] = U; \
803  dest[pos + 2] = Y2; \
804  dest[pos + 3] = V; \
805  } else if (target == AV_PIX_FMT_YVYU422) { \
806  dest[pos + 0] = Y1; \
807  dest[pos + 1] = V; \
808  dest[pos + 2] = Y2; \
809  dest[pos + 3] = U; \
810  } else { /* AV_PIX_FMT_UYVY422 */ \
811  dest[pos + 0] = U; \
812  dest[pos + 1] = Y1; \
813  dest[pos + 2] = V; \
814  dest[pos + 3] = Y2; \
815  }
816 
817 static av_always_inline void
818 yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
819  const int16_t **lumSrc, int lumFilterSize,
820  const int16_t *chrFilter, const int16_t **chrUSrc,
821  const int16_t **chrVSrc, int chrFilterSize,
822  const int16_t **alpSrc, uint8_t *dest, int dstW,
823  int y, enum AVPixelFormat target)
824 {
825  int i;
826 
827  for (i = 0; i < ((dstW + 1) >> 1); i++) {
828  int j;
829  int Y1 = 1 << 18;
830  int Y2 = 1 << 18;
831  int U = 1 << 18;
832  int V = 1 << 18;
833 
834  for (j = 0; j < lumFilterSize; j++) {
835  Y1 += lumSrc[j][i * 2] * lumFilter[j];
836  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
837  }
838  for (j = 0; j < chrFilterSize; j++) {
839  U += chrUSrc[j][i] * chrFilter[j];
840  V += chrVSrc[j][i] * chrFilter[j];
841  }
842  Y1 >>= 19;
843  Y2 >>= 19;
844  U >>= 19;
845  V >>= 19;
846  if ((Y1 | Y2 | U | V) & 0x100) {
847  Y1 = av_clip_uint8(Y1);
848  Y2 = av_clip_uint8(Y2);
849  U = av_clip_uint8(U);
850  V = av_clip_uint8(V);
851  }
852  output_pixels(4*i, Y1, U, Y2, V);
853  }
854 }
855 
856 static av_always_inline void
857 yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
858  const int16_t *ubuf[2], const int16_t *vbuf[2],
859  const int16_t *abuf[2], uint8_t *dest, int dstW,
860  int yalpha, int uvalpha, int y,
861  enum AVPixelFormat target)
862 {
863  const int16_t *buf0 = buf[0], *buf1 = buf[1],
864  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
865  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
866  int yalpha1 = 4096 - yalpha;
867  int uvalpha1 = 4096 - uvalpha;
868  int i;
869  av_assert2(yalpha <= 4096U);
870  av_assert2(uvalpha <= 4096U);
871 
872  for (i = 0; i < ((dstW + 1) >> 1); i++) {
873  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
874  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
875  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
876  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
877 
878  if ((Y1 | Y2 | U | V) & 0x100) {
879  Y1 = av_clip_uint8(Y1);
880  Y2 = av_clip_uint8(Y2);
881  U = av_clip_uint8(U);
882  V = av_clip_uint8(V);
883  }
884 
885  output_pixels(i * 4, Y1, U, Y2, V);
886  }
887 }
888 
889 static av_always_inline void
890 yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
891  const int16_t *ubuf[2], const int16_t *vbuf[2],
892  const int16_t *abuf0, uint8_t *dest, int dstW,
893  int uvalpha, int y, enum AVPixelFormat target)
894 {
895  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
896  int i;
897 
898  if (uvalpha < 2048) {
899  for (i = 0; i < ((dstW + 1) >> 1); i++) {
900  int Y1 = (buf0[i * 2 ]+64) >> 7;
901  int Y2 = (buf0[i * 2 + 1]+64) >> 7;
902  int U = (ubuf0[i] +64) >> 7;
903  int V = (vbuf0[i] +64) >> 7;
904 
905  if ((Y1 | Y2 | U | V) & 0x100) {
906  Y1 = av_clip_uint8(Y1);
907  Y2 = av_clip_uint8(Y2);
908  U = av_clip_uint8(U);
909  V = av_clip_uint8(V);
910  }
911 
912  output_pixels(i * 4, Y1, U, Y2, V);
913  }
914  } else {
915  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
916  for (i = 0; i < ((dstW + 1) >> 1); i++) {
917  int Y1 = (buf0[i * 2 ] + 64) >> 7;
918  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
919  int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
920  int V = (vbuf0[i] + vbuf1[i]+128) >> 8;
921 
922  if ((Y1 | Y2 | U | V) & 0x100) {
923  Y1 = av_clip_uint8(Y1);
924  Y2 = av_clip_uint8(Y2);
925  U = av_clip_uint8(U);
926  V = av_clip_uint8(V);
927  }
928 
929  output_pixels(i * 4, Y1, U, Y2, V);
930  }
931  }
932 }
933 
934 #undef output_pixels
935 
936 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
937 YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
938 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
939 
940 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? R : B)
941 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? B : R)
942 #define output_pixel(pos, val) \
943  if (is_be) { \
944  AV_WB16(pos, val); \
945  } else { \
946  AV_WL16(pos, val); \
947  }
948 
949 static av_always_inline void
950 yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter,
951  const int32_t **lumSrc, int lumFilterSize,
952  const int16_t *chrFilter, const int32_t **unused_chrUSrc,
953  const int32_t **unused_chrVSrc, int unused_chrFilterSize,
954  const int32_t **alpSrc, uint16_t *dest, int dstW,
955  int y, enum AVPixelFormat target,
956  int unused_hasAlpha, int unused_eightbytes, int is_be)
957 {
958  int hasAlpha = !!alpSrc;
959  int i;
960 
961  for (i = 0; i < dstW; i++) {
962  int j;
963  int Y = -0x40000000;
964  int A = 0xffff;
965 
966  for (j = 0; j < lumFilterSize; j++)
967  Y += lumSrc[j][i] * lumFilter[j];
968 
969  Y >>= 15;
970  Y += (1<<3) + 0x8000;
971  Y = av_clip_uint16(Y);
972 
973  if (hasAlpha) {
974  A = -0x40000000 + (1<<14);
975  for (j = 0; j < lumFilterSize; j++)
976  A += alpSrc[j][i] * lumFilter[j];
977 
978  A >>= 15;
979  A += 0x8000;
980  A = av_clip_uint16(A);
981  }
982 
983  output_pixel(&dest[2 * i ], Y);
984  output_pixel(&dest[2 * i + 1], A);
985  }
986 }
987 
988 static av_always_inline void
990  const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
991  const int32_t *abuf[2], uint16_t *dest, int dstW,
992  int yalpha, int unused_uvalpha, int y,
993  enum AVPixelFormat target, int unused_hasAlpha,
994  int unused_eightbytes, int is_be)
995 {
996  int hasAlpha = abuf && abuf[0] && abuf[1];
997  const int32_t *buf0 = buf[0], *buf1 = buf[1],
998  *abuf0 = hasAlpha ? abuf[0] : NULL,
999  *abuf1 = hasAlpha ? abuf[1] : NULL;
1000  int yalpha1 = 4096 - yalpha;
1001  int i;
1002 
1003  av_assert2(yalpha <= 4096U);
1004 
1005  for (i = 0; i < dstW; i++) {
1006  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 15;
1007  int A;
1008 
1009  Y = av_clip_uint16(Y);
1010 
1011  if (hasAlpha) {
1012  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 15;
1013  A = av_clip_uint16(A);
1014  }
1015 
1016  output_pixel(&dest[2 * i ], Y);
1017  output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
1018  }
1019 }
1020 
1021 static av_always_inline void
1023  const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
1024  const int32_t *abuf0, uint16_t *dest, int dstW,
1025  int unused_uvalpha, int y, enum AVPixelFormat target,
1026  int unused_hasAlpha, int unused_eightbytes, int is_be)
1027 {
1028  int hasAlpha = !!abuf0;
1029  int i;
1030 
1031  for (i = 0; i < dstW; i++) {
1032  int Y = buf0[i] >> 3;/* 19 - 16 */
1033  int A;
1034 
1035  Y = av_clip_uint16(Y);
1036 
1037  if (hasAlpha) {
1038  A = abuf0[i] >> 3;
1039  if (A & 0x100)
1040  A = av_clip_uint16(A);
1041  }
1042 
1043  output_pixel(&dest[2 * i ], Y);
1044  output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
1045  }
1046 }
1047 
1048 static av_always_inline void
1049 yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter,
1050  const int32_t **lumSrc, int lumFilterSize,
1051  const int16_t *chrFilter, const int32_t **chrUSrc,
1052  const int32_t **chrVSrc, int chrFilterSize,
1053  const int32_t **alpSrc, uint16_t *dest, int dstW,
1054  int y, enum AVPixelFormat target, int hasAlpha, int eightbytes,
1055  int is_be)
1056 {
1057  int i;
1058  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1059 
1060  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1061  int j;
1062  int Y1 = -0x40000000;
1063  int Y2 = -0x40000000;
1064  int U = -(128 << 23); // 19
1065  int V = -(128 << 23);
1066  int R, G, B;
1067 
1068  for (j = 0; j < lumFilterSize; j++) {
1069  Y1 += lumSrc[j][i * 2] * (unsigned)lumFilter[j];
1070  Y2 += lumSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1071  }
1072  for (j = 0; j < chrFilterSize; j++) {;
1073  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1074  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1075  }
1076 
1077  if (hasAlpha) {
1078  A1 = -0x40000000;
1079  A2 = -0x40000000;
1080  for (j = 0; j < lumFilterSize; j++) {
1081  A1 += alpSrc[j][i * 2] * (unsigned)lumFilter[j];
1082  A2 += alpSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1083  }
1084  A1 >>= 1;
1085  A1 += 0x20002000;
1086  A2 >>= 1;
1087  A2 += 0x20002000;
1088  }
1089 
1090  // 8 bits: 12+15=27; 16 bits: 12+19=31
1091  Y1 >>= 14; // 10
1092  Y1 += 0x10000;
1093  Y2 >>= 14;
1094  Y2 += 0x10000;
1095  U >>= 14;
1096  V >>= 14;
1097 
1098  // 8 bits: 27 -> 17 bits, 16 bits: 31 - 14 = 17 bits
1099  Y1 -= c->yuv2rgb_y_offset;
1100  Y2 -= c->yuv2rgb_y_offset;
1101  Y1 *= c->yuv2rgb_y_coeff;
1102  Y2 *= c->yuv2rgb_y_coeff;
1103  Y1 += (1 << 13) - (1 << 29); // 21
1104  Y2 += (1 << 13) - (1 << 29);
1105  // 8 bits: 17 + 13 bits = 30 bits, 16 bits: 17 + 13 bits = 30 bits
1106 
1107  R = V * c->yuv2rgb_v2r_coeff;
1108  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1109  B = U * c->yuv2rgb_u2b_coeff;
1110 
1111  // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits
1112  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y1) >> 14) + (1<<15), 16));
1113  output_pixel(&dest[1], av_clip_uintp2((( G + Y1) >> 14) + (1<<15), 16));
1114  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y1) >> 14) + (1<<15), 16));
1115  if (eightbytes) {
1116  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1117  output_pixel(&dest[4], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16));
1118  output_pixel(&dest[5], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16));
1119  output_pixel(&dest[6], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16));
1120  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1121  dest += 8;
1122  } else {
1123  output_pixel(&dest[3], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16));
1124  output_pixel(&dest[4], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16));
1125  output_pixel(&dest[5], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16));
1126  dest += 6;
1127  }
1128  }
1129 }
1130 
1131 static av_always_inline void
1133  const int32_t *ubuf[2], const int32_t *vbuf[2],
1134  const int32_t *abuf[2], uint16_t *dest, int dstW,
1135  int yalpha, int uvalpha, int y,
1136  enum AVPixelFormat target, int hasAlpha, int eightbytes,
1137  int is_be)
1138 {
1139  const int32_t *buf0 = buf[0], *buf1 = buf[1],
1140  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1141  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1142  *abuf0 = hasAlpha ? abuf[0] : NULL,
1143  *abuf1 = hasAlpha ? abuf[1] : NULL;
1144  int yalpha1 = 4096 - yalpha;
1145  int uvalpha1 = 4096 - uvalpha;
1146  int i;
1147  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1148 
1149  av_assert2(yalpha <= 4096U);
1150  av_assert2(uvalpha <= 4096U);
1151 
1152  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1153  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
1154  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
1155  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1156  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1157  int R, G, B;
1158 
1159  Y1 -= c->yuv2rgb_y_offset;
1160  Y2 -= c->yuv2rgb_y_offset;
1161  Y1 *= c->yuv2rgb_y_coeff;
1162  Y2 *= c->yuv2rgb_y_coeff;
1163  Y1 += (1 << 13) - (1 << 29);
1164  Y2 += (1 << 13) - (1 << 29);
1165 
1166  R = V * c->yuv2rgb_v2r_coeff;
1167  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1168  B = U * c->yuv2rgb_u2b_coeff;
1169 
1170  if (hasAlpha) {
1171  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1;
1172  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1;
1173 
1174  A1 += 1 << 13;
1175  A2 += 1 << 13;
1176  }
1177 
1178  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y1) >> 14) + (1<<15), 16));
1179  output_pixel(&dest[1], av_clip_uintp2((( G + Y1) >> 14) + (1<<15), 16));
1180  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y1) >> 14) + (1<<15), 16));
1181  if (eightbytes) {
1182  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1183  output_pixel(&dest[4], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16));
1184  output_pixel(&dest[5], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16));
1185  output_pixel(&dest[6], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16));
1186  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1187  dest += 8;
1188  } else {
1189  output_pixel(&dest[3], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16));
1190  output_pixel(&dest[4], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16));
1191  output_pixel(&dest[5], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16));
1192  dest += 6;
1193  }
1194  }
1195 }
1196 
1197 static av_always_inline void
1199  const int32_t *ubuf[2], const int32_t *vbuf[2],
1200  const int32_t *abuf0, uint16_t *dest, int dstW,
1201  int uvalpha, int y, enum AVPixelFormat target,
1202  int hasAlpha, int eightbytes, int is_be)
1203 {
1204  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1205  int i;
1206  int A1 = 0xffff<<14, A2= 0xffff<<14;
1207 
1208  if (uvalpha < 2048) {
1209  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1210  int Y1 = (buf0[i * 2] ) >> 2;
1211  int Y2 = (buf0[i * 2 + 1]) >> 2;
1212  int U = (ubuf0[i] - (128 << 11)) >> 2;
1213  int V = (vbuf0[i] - (128 << 11)) >> 2;
1214  int R, G, B;
1215 
1216  Y1 -= c->yuv2rgb_y_offset;
1217  Y2 -= c->yuv2rgb_y_offset;
1218  Y1 *= c->yuv2rgb_y_coeff;
1219  Y2 *= c->yuv2rgb_y_coeff;
1220  Y1 += (1 << 13) - (1 << 29);
1221  Y2 += (1 << 13) - (1 << 29);
1222 
1223  if (hasAlpha) {
1224  A1 = abuf0[i * 2 ] << 11;
1225  A2 = abuf0[i * 2 + 1] << 11;
1226 
1227  A1 += 1 << 13;
1228  A2 += 1 << 13;
1229  }
1230 
1231  R = V * c->yuv2rgb_v2r_coeff;
1232  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1233  B = U * c->yuv2rgb_u2b_coeff;
1234 
1235  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y1) >> 14) + (1<<15), 16));
1236  output_pixel(&dest[1], av_clip_uintp2((( G + Y1) >> 14) + (1<<15), 16));
1237  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y1) >> 14) + (1<<15), 16));
1238  if (eightbytes) {
1239  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1240  output_pixel(&dest[4], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16));
1241  output_pixel(&dest[5], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16));
1242  output_pixel(&dest[6], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16));
1243  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1244  dest += 8;
1245  } else {
1246  output_pixel(&dest[3], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16));
1247  output_pixel(&dest[4], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16));
1248  output_pixel(&dest[5], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16));
1249  dest += 6;
1250  }
1251  }
1252  } else {
1253  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1254  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1255  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1256  int Y1 = (buf0[i * 2] ) >> 2;
1257  int Y2 = (buf0[i * 2 + 1]) >> 2;
1258  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1259  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1260  int R, G, B;
1261 
1262  Y1 -= c->yuv2rgb_y_offset;
1263  Y2 -= c->yuv2rgb_y_offset;
1264  Y1 *= c->yuv2rgb_y_coeff;
1265  Y2 *= c->yuv2rgb_y_coeff;
1266  Y1 += (1 << 13) - (1 << 29);
1267  Y2 += (1 << 13) - (1 << 29);
1268 
1269  if (hasAlpha) {
1270  A1 = abuf0[i * 2 ] << 11;
1271  A2 = abuf0[i * 2 + 1] << 11;
1272 
1273  A1 += 1 << 13;
1274  A2 += 1 << 13;
1275  }
1276 
1277  R = V * c->yuv2rgb_v2r_coeff;
1278  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1279  B = U * c->yuv2rgb_u2b_coeff;
1280 
1281  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y1) >> 14) + (1<<15), 16));
1282  output_pixel(&dest[1], av_clip_uintp2((( G + Y1) >> 14) + (1<<15), 16));
1283  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y1) >> 14) + (1<<15), 16));
1284  if (eightbytes) {
1285  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1286  output_pixel(&dest[4], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16));
1287  output_pixel(&dest[5], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16));
1288  output_pixel(&dest[6], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16));
1289  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1290  dest += 8;
1291  } else {
1292  output_pixel(&dest[3], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16));
1293  output_pixel(&dest[4], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16));
1294  output_pixel(&dest[5], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16));
1295  dest += 6;
1296  }
1297  }
1298  }
1299 }
1300 
1301 static av_always_inline void
1302 yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1303  const int32_t **lumSrc, int lumFilterSize,
1304  const int16_t *chrFilter, const int32_t **chrUSrc,
1305  const int32_t **chrVSrc, int chrFilterSize,
1306  const int32_t **alpSrc, uint16_t *dest, int dstW,
1307  int y, enum AVPixelFormat target, int hasAlpha,
1308  int eightbytes, int is_be)
1309 {
1310  int i;
1311  int A = 0xffff<<14;
1312 
1313  for (i = 0; i < dstW; i++) {
1314  int j;
1315  int Y = -0x40000000;
1316  int U = -(128 << 23); // 19
1317  int V = -(128 << 23);
1318  int R, G, B;
1319 
1320  for (j = 0; j < lumFilterSize; j++) {
1321  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
1322  }
1323  for (j = 0; j < chrFilterSize; j++) {;
1324  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1325  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1326  }
1327 
1328  if (hasAlpha) {
1329  A = -0x40000000;
1330  for (j = 0; j < lumFilterSize; j++) {
1331  A += alpSrc[j][i] * (unsigned)lumFilter[j];
1332  }
1333  A >>= 1;
1334  A += 0x20002000;
1335  }
1336 
1337  // 8bit: 12+15=27; 16-bit: 12+19=31
1338  Y >>= 14; // 10
1339  Y += 0x10000;
1340  U >>= 14;
1341  V >>= 14;
1342 
1343  // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit
1344  Y -= c->yuv2rgb_y_offset;
1345  Y *= c->yuv2rgb_y_coeff;
1346  Y += (1 << 13) - (1<<29); // 21
1347  // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit
1348 
1349  R = V * c->yuv2rgb_v2r_coeff;
1350  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1351  B = U * c->yuv2rgb_u2b_coeff;
1352 
1353  // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit
1354  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y)>>14) + (1<<15), 16));
1355  output_pixel(&dest[1], av_clip_uintp2((( G + Y)>>14) + (1<<15), 16));
1356  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y)>>14) + (1<<15), 16));
1357  if (eightbytes) {
1358  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1359  dest += 4;
1360  } else {
1361  dest += 3;
1362  }
1363  }
1364 }
1365 
1366 static av_always_inline void
1368  const int32_t *ubuf[2], const int32_t *vbuf[2],
1369  const int32_t *abuf[2], uint16_t *dest, int dstW,
1370  int yalpha, int uvalpha, int y,
1371  enum AVPixelFormat target, int hasAlpha, int eightbytes,
1372  int is_be)
1373 {
1374  const int32_t *buf0 = buf[0], *buf1 = buf[1],
1375  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1376  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1377  *abuf0 = hasAlpha ? abuf[0] : NULL,
1378  *abuf1 = hasAlpha ? abuf[1] : NULL;
1379  int yalpha1 = 4096 - yalpha;
1380  int uvalpha1 = 4096 - uvalpha;
1381  int i;
1382  int A = 0xffff<<14;
1383 
1384  av_assert2(yalpha <= 4096U);
1385  av_assert2(uvalpha <= 4096U);
1386 
1387  for (i = 0; i < dstW; i++) {
1388  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 14;
1389  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1390  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1391  int R, G, B;
1392 
1393  Y -= c->yuv2rgb_y_offset;
1394  Y *= c->yuv2rgb_y_coeff;
1395  Y += (1 << 13) - (1 << 29);
1396 
1397  R = V * c->yuv2rgb_v2r_coeff;
1398  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1399  B = U * c->yuv2rgb_u2b_coeff;
1400 
1401  if (hasAlpha) {
1402  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 1;
1403 
1404  A += 1 << 13;
1405  }
1406 
1407  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y) >> 14) + (1<<15), 16));
1408  output_pixel(&dest[1], av_clip_uintp2((( G + Y) >> 14) + (1<<15), 16));
1409  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y) >> 14) + (1<<15), 16));
1410  if (eightbytes) {
1411  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1412  dest += 4;
1413  } else {
1414  dest += 3;
1415  }
1416  }
1417 }
1418 
1419 static av_always_inline void
1421  const int32_t *ubuf[2], const int32_t *vbuf[2],
1422  const int32_t *abuf0, uint16_t *dest, int dstW,
1423  int uvalpha, int y, enum AVPixelFormat target,
1424  int hasAlpha, int eightbytes, int is_be)
1425 {
1426  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1427  int i;
1428  int A = 0xffff<<14;
1429 
1430  if (uvalpha < 2048) {
1431  for (i = 0; i < dstW; i++) {
1432  int Y = (buf0[i]) >> 2;
1433  int U = (ubuf0[i] - (128 << 11)) >> 2;
1434  int V = (vbuf0[i] - (128 << 11)) >> 2;
1435  int R, G, B;
1436 
1437  Y -= c->yuv2rgb_y_offset;
1438  Y *= c->yuv2rgb_y_coeff;
1439  Y += (1 << 13) - (1 << 29);
1440 
1441  if (hasAlpha) {
1442  A = abuf0[i] << 11;
1443 
1444  A += 1 << 13;
1445  }
1446 
1447  R = V * c->yuv2rgb_v2r_coeff;
1448  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1449  B = U * c->yuv2rgb_u2b_coeff;
1450 
1451  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y) >> 14) + (1<<15), 16));
1452  output_pixel(&dest[1], av_clip_uintp2((( G + Y) >> 14) + (1<<15), 16));
1453  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y) >> 14) + (1<<15), 16));
1454  if (eightbytes) {
1455  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1456  dest += 4;
1457  } else {
1458  dest += 3;
1459  }
1460  }
1461  } else {
1462  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1463  int A = 0xffff<<14;
1464  for (i = 0; i < dstW; i++) {
1465  int Y = (buf0[i] ) >> 2;
1466  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1467  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1468  int R, G, B;
1469 
1470  Y -= c->yuv2rgb_y_offset;
1471  Y *= c->yuv2rgb_y_coeff;
1472  Y += (1 << 13) - (1 << 29);
1473 
1474  if (hasAlpha) {
1475  A = abuf0[i] << 11;
1476 
1477  A += 1 << 13;
1478  }
1479 
1480  R = V * c->yuv2rgb_v2r_coeff;
1481  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1482  B = U * c->yuv2rgb_u2b_coeff;
1483 
1484  output_pixel(&dest[0], av_clip_uintp2(((R_B + Y) >> 14) + (1<<15), 16));
1485  output_pixel(&dest[1], av_clip_uintp2((( G + Y) >> 14) + (1<<15), 16));
1486  output_pixel(&dest[2], av_clip_uintp2(((B_R + Y) >> 14) + (1<<15), 16));
1487  if (eightbytes) {
1488  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1489  dest += 4;
1490  } else {
1491  dest += 3;
1492  }
1493  }
1494  }
1495 }
1496 
1497 #undef output_pixel
1498 #undef r_b
1499 #undef b_r
1500 
1501 #define YUV2PACKED16WRAPPER_EXT(name, base, ext, fmt, is_be, hasAlpha, eightbytes) \
1502 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1503  const int16_t **_lumSrc, int lumFilterSize, \
1504  const int16_t *chrFilter, const int16_t **_chrUSrc, \
1505  const int16_t **_chrVSrc, int chrFilterSize, \
1506  const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
1507  int y) \
1508 { \
1509  const int32_t **lumSrc = (const int32_t **) _lumSrc, \
1510  **chrUSrc = (const int32_t **) _chrUSrc, \
1511  **chrVSrc = (const int32_t **) _chrVSrc, \
1512  **alpSrc = (const int32_t **) _alpSrc; \
1513  uint16_t *dest = (uint16_t *) _dest; \
1514  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1515  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1516  alpSrc, dest, dstW, y, fmt, hasAlpha, eightbytes, is_be); \
1517 } \
1518  \
1519 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
1520  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1521  const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
1522  int yalpha, int uvalpha, int y) \
1523 { \
1524  const int32_t **buf = (const int32_t **) _buf, \
1525  **ubuf = (const int32_t **) _ubuf, \
1526  **vbuf = (const int32_t **) _vbuf, \
1527  **abuf = (const int32_t **) _abuf; \
1528  uint16_t *dest = (uint16_t *) _dest; \
1529  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1530  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha, eightbytes, is_be); \
1531 } \
1532  \
1533 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
1534  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1535  const int16_t *_abuf0, uint8_t *_dest, int dstW, \
1536  int uvalpha, int y) \
1537 { \
1538  const int32_t *buf0 = (const int32_t *) _buf0, \
1539  **ubuf = (const int32_t **) _ubuf, \
1540  **vbuf = (const int32_t **) _vbuf, \
1541  *abuf0 = (const int32_t *) _abuf0; \
1542  uint16_t *dest = (uint16_t *) _dest; \
1543  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1544  dstW, uvalpha, y, fmt, hasAlpha, eightbytes, is_be); \
1545 }
1546 #define YUV2PACKED16WRAPPER(name, base, ext, base_fmt, endianness, hasAlpha, eightbytes) \
1547  YUV2PACKED16WRAPPER_EXT(name, base, ext, base_fmt ## endianness, IS_BE(endianness), hasAlpha, eightbytes)
1548 
1549 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48be, AV_PIX_FMT_RGB48, BE, 0, 0)
1550 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48le, AV_PIX_FMT_RGB48, LE, 0, 0)
1551 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48be, AV_PIX_FMT_BGR48, BE, 0, 0)
1552 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48le, AV_PIX_FMT_BGR48, LE, 0, 0)
1553 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64be, AV_PIX_FMT_RGBA64, BE, 1, 1)
1554 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64le, AV_PIX_FMT_RGBA64, LE, 1, 1)
1555 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64be, AV_PIX_FMT_RGBA64, BE, 0, 1)
1556 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64le, AV_PIX_FMT_RGBA64, LE, 0, 1)
1557 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64be, AV_PIX_FMT_BGRA64, BE, 1, 1)
1558 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64le, AV_PIX_FMT_BGRA64, LE, 1, 1)
1559 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64be, AV_PIX_FMT_BGRA64, BE, 0, 1)
1560 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64le, AV_PIX_FMT_BGRA64, LE, 0, 1)
1561 YUV2PACKED16WRAPPER(yuv2, ya16, ya16be, AV_PIX_FMT_YA16, BE, 1, 0)
1562 YUV2PACKED16WRAPPER(yuv2, ya16, ya16le, AV_PIX_FMT_YA16, LE, 1, 0)
1563 
1564 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48be_full, AV_PIX_FMT_RGB48, BE, 0, 0)
1565 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48le_full, AV_PIX_FMT_RGB48, LE, 0, 0)
1566 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48be_full, AV_PIX_FMT_BGR48, BE, 0, 0)
1567 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48le_full, AV_PIX_FMT_BGR48, LE, 0, 0)
1568 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64be_full, AV_PIX_FMT_RGBA64, BE, 1, 1)
1569 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64le_full, AV_PIX_FMT_RGBA64, LE, 1, 1)
1570 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64be_full, AV_PIX_FMT_RGBA64, BE, 0, 1)
1571 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64le_full, AV_PIX_FMT_RGBA64, LE, 0, 1)
1572 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64be_full, AV_PIX_FMT_BGRA64, BE, 1, 1)
1573 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64le_full, AV_PIX_FMT_BGRA64, LE, 1, 1)
1574 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64be_full, AV_PIX_FMT_BGRA64, BE, 0, 1)
1575 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64le_full, AV_PIX_FMT_BGRA64, LE, 0, 1)
1576 
1577 /*
1578  * Write out 2 RGB pixels in the target pixel format. This function takes a
1579  * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
1580  * things like endianness conversion and shifting. The caller takes care of
1581  * setting the correct offset in these tables from the chroma (U/V) values.
1582  * This function then uses the luminance (Y1/Y2) values to write out the
1583  * correct RGB values into the destination buffer.
1584  */
1585 static av_always_inline void
1586 yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2,
1587  unsigned A1, unsigned A2,
1588  const void *_r, const void *_g, const void *_b, int y,
1589  enum AVPixelFormat target, int hasAlpha)
1590 {
1591  if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
1592  target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
1593  uint32_t *dest = (uint32_t *) _dest;
1594  const uint32_t *r = (const uint32_t *) _r;
1595  const uint32_t *g = (const uint32_t *) _g;
1596  const uint32_t *b = (const uint32_t *) _b;
1597 
1598 #if CONFIG_SMALL
1599  int sh = hasAlpha ? ((target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24) : 0;
1600 
1601  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
1602  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
1603 #else
1604  if (hasAlpha) {
1605  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1606 
1607  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0);
1608  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
1609  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
1610  } else {
1611 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
1612  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1613 
1614  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0xFF);
1615 #endif
1616  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1617  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1618  }
1619 #endif
1620  } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
1621  uint8_t *dest = (uint8_t *) _dest;
1622  const uint8_t *r = (const uint8_t *) _r;
1623  const uint8_t *g = (const uint8_t *) _g;
1624  const uint8_t *b = (const uint8_t *) _b;
1625 
1626 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
1627 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
1628 
1629  dest[i * 6 + 0] = r_b[Y1];
1630  dest[i * 6 + 1] = g[Y1];
1631  dest[i * 6 + 2] = b_r[Y1];
1632  dest[i * 6 + 3] = r_b[Y2];
1633  dest[i * 6 + 4] = g[Y2];
1634  dest[i * 6 + 5] = b_r[Y2];
1635 #undef r_b
1636 #undef b_r
1637  } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
1638  target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
1639  target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
1640  uint16_t *dest = (uint16_t *) _dest;
1641  const uint16_t *r = (const uint16_t *) _r;
1642  const uint16_t *g = (const uint16_t *) _g;
1643  const uint16_t *b = (const uint16_t *) _b;
1644  int dr1, dg1, db1, dr2, dg2, db2;
1645 
1646  if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
1647  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1648  dg1 = ff_dither_2x2_4[ y & 1 ][0];
1649  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1650  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1651  dg2 = ff_dither_2x2_4[ y & 1 ][1];
1652  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1653  } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
1654  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1655  dg1 = ff_dither_2x2_8[ y & 1 ][1];
1656  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1657  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1658  dg2 = ff_dither_2x2_8[ y & 1 ][0];
1659  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1660  } else {
1661  dr1 = ff_dither_4x4_16[ y & 3 ][0];
1662  dg1 = ff_dither_4x4_16[ y & 3 ][1];
1663  db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
1664  dr2 = ff_dither_4x4_16[ y & 3 ][1];
1665  dg2 = ff_dither_4x4_16[ y & 3 ][0];
1666  db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
1667  }
1668 
1669  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1670  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1671  } else if (target == AV_PIX_FMT_X2RGB10 || target == AV_PIX_FMT_X2BGR10) {
1672  uint32_t *dest = (uint32_t *) _dest;
1673  const uint32_t *r = (const uint32_t *) _r;
1674  const uint32_t *g = (const uint32_t *) _g;
1675  const uint32_t *b = (const uint32_t *) _b;
1676  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1677  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1678  } else /* 8/4 bits */ {
1679  uint8_t *dest = (uint8_t *) _dest;
1680  const uint8_t *r = (const uint8_t *) _r;
1681  const uint8_t *g = (const uint8_t *) _g;
1682  const uint8_t *b = (const uint8_t *) _b;
1683  int dr1, dg1, db1, dr2, dg2, db2;
1684 
1685  if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
1686  const uint8_t * const d64 = ff_dither_8x8_73[y & 7];
1687  const uint8_t * const d32 = ff_dither_8x8_32[y & 7];
1688  dr1 = dg1 = d32[(i * 2 + 0) & 7];
1689  db1 = d64[(i * 2 + 0) & 7];
1690  dr2 = dg2 = d32[(i * 2 + 1) & 7];
1691  db2 = d64[(i * 2 + 1) & 7];
1692  } else {
1693  const uint8_t * const d64 = ff_dither_8x8_73 [y & 7];
1694  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
1695  dr1 = db1 = d128[(i * 2 + 0) & 7];
1696  dg1 = d64[(i * 2 + 0) & 7];
1697  dr2 = db2 = d128[(i * 2 + 1) & 7];
1698  dg2 = d64[(i * 2 + 1) & 7];
1699  }
1700 
1701  if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
1702  dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
1703  ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
1704  } else {
1705  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1706  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1707  }
1708  }
1709 }
1710 
1711 static av_always_inline void
1712 yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
1713  const int16_t **lumSrc, int lumFilterSize,
1714  const int16_t *chrFilter, const int16_t **chrUSrc,
1715  const int16_t **chrVSrc, int chrFilterSize,
1716  const int16_t **alpSrc, uint8_t *dest, int dstW,
1717  int y, enum AVPixelFormat target, int hasAlpha)
1718 {
1719  int i;
1720 
1721  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1722  int j, A1, A2;
1723  int Y1 = 1 << 18;
1724  int Y2 = 1 << 18;
1725  int U = 1 << 18;
1726  int V = 1 << 18;
1727  const void *r, *g, *b;
1728 
1729  for (j = 0; j < lumFilterSize; j++) {
1730  Y1 += lumSrc[j][i * 2] * lumFilter[j];
1731  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
1732  }
1733  for (j = 0; j < chrFilterSize; j++) {
1734  U += chrUSrc[j][i] * chrFilter[j];
1735  V += chrVSrc[j][i] * chrFilter[j];
1736  }
1737  Y1 >>= 19;
1738  Y2 >>= 19;
1739  U >>= 19;
1740  V >>= 19;
1741  if (hasAlpha) {
1742  A1 = 1 << 18;
1743  A2 = 1 << 18;
1744  for (j = 0; j < lumFilterSize; j++) {
1745  A1 += alpSrc[j][i * 2 ] * lumFilter[j];
1746  A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
1747  }
1748  A1 >>= 19;
1749  A2 >>= 19;
1750  if ((A1 | A2) & 0x100) {
1751  A1 = av_clip_uint8(A1);
1752  A2 = av_clip_uint8(A2);
1753  }
1754  }
1755 
1756  r = c->table_rV[V + YUVRGB_TABLE_HEADROOM];
1757  g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]);
1758  b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1759 
1760  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1761  r, g, b, y, target, hasAlpha);
1762  }
1763 }
1764 
1765 static av_always_inline void
1766 yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
1767  const int16_t *ubuf[2], const int16_t *vbuf[2],
1768  const int16_t *abuf[2], uint8_t *dest, int dstW,
1769  int yalpha, int uvalpha, int y,
1770  enum AVPixelFormat target, int hasAlpha)
1771 {
1772  const int16_t *buf0 = buf[0], *buf1 = buf[1],
1773  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1774  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1775  *abuf0 = hasAlpha ? abuf[0] : NULL,
1776  *abuf1 = hasAlpha ? abuf[1] : NULL;
1777  int yalpha1 = 4096 - yalpha;
1778  int uvalpha1 = 4096 - uvalpha;
1779  int i;
1780  av_assert2(yalpha <= 4096U);
1781  av_assert2(uvalpha <= 4096U);
1782 
1783  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1784  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1785  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1786  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1787  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1788  int A1, A2;
1789  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1790  *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1791  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1792 
1793  if (hasAlpha) {
1794  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1795  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1796  A1 = av_clip_uint8(A1);
1797  A2 = av_clip_uint8(A2);
1798  }
1799 
1800  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1801  r, g, b, y, target, hasAlpha);
1802  }
1803 }
1804 
1805 static av_always_inline void
1806 yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
1807  const int16_t *ubuf[2], const int16_t *vbuf[2],
1808  const int16_t *abuf0, uint8_t *dest, int dstW,
1809  int uvalpha, int y, enum AVPixelFormat target,
1810  int hasAlpha)
1811 {
1812  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1813  int i;
1814 
1815  if (uvalpha < 2048) {
1816  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1817  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1818  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1819  int U = (ubuf0[i] + 64) >> 7;
1820  int V = (vbuf0[i] + 64) >> 7;
1821  int A1, A2;
1822  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1823  *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1824  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1825 
1826  if (hasAlpha) {
1827  A1 = abuf0[i * 2 ] * 255 + 16384 >> 15;
1828  A2 = abuf0[i * 2 + 1] * 255 + 16384 >> 15;
1829  A1 = av_clip_uint8(A1);
1830  A2 = av_clip_uint8(A2);
1831  }
1832 
1833  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1834  r, g, b, y, target, hasAlpha);
1835  }
1836  } else {
1837  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1838  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1839  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1840  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1841  int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
1842  int V = (vbuf0[i] + vbuf1[i] + 128) >> 8;
1843  int A1, A2;
1844  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1845  *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1846  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1847 
1848  if (hasAlpha) {
1849  A1 = (abuf0[i * 2 ] + 64) >> 7;
1850  A2 = (abuf0[i * 2 + 1] + 64) >> 7;
1851  A1 = av_clip_uint8(A1);
1852  A2 = av_clip_uint8(A2);
1853  }
1854 
1855  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1856  r, g, b, y, target, hasAlpha);
1857  }
1858  }
1859 }
1860 
1861 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1862 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1863  const int16_t **lumSrc, int lumFilterSize, \
1864  const int16_t *chrFilter, const int16_t **chrUSrc, \
1865  const int16_t **chrVSrc, int chrFilterSize, \
1866  const int16_t **alpSrc, uint8_t *dest, int dstW, \
1867  int y) \
1868 { \
1869  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1870  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1871  alpSrc, dest, dstW, y, fmt, hasAlpha); \
1872 }
1873 
1874 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1875 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1876 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1877  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1878  const int16_t *abuf[2], uint8_t *dest, int dstW, \
1879  int yalpha, int uvalpha, int y) \
1880 { \
1881  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1882  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1883 }
1884 
1885 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1886 YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1887 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1888  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1889  const int16_t *abuf0, uint8_t *dest, int dstW, \
1890  int uvalpha, int y) \
1891 { \
1892  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1893  dstW, uvalpha, y, fmt, hasAlpha); \
1894 }
1895 
1896 #if CONFIG_SMALL
1897 YUV2RGBWRAPPER(yuv2rgb,, 32_1, AV_PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1898 YUV2RGBWRAPPER(yuv2rgb,, 32, AV_PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1899 #else
1900 #if CONFIG_SWSCALE_ALPHA
1903 #endif
1906 #endif
1907 YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
1908 YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
1915 YUV2RGBWRAPPER(yuv2, rgb, x2rgb10, AV_PIX_FMT_X2RGB10, 0)
1916 YUV2RGBWRAPPER(yuv2, rgb, x2bgr10, AV_PIX_FMT_X2BGR10, 0)
1917 
1919  uint8_t *dest, int i, int Y, int A, int U, int V,
1920  int y, enum AVPixelFormat target, int hasAlpha, int err[4])
1921 {
1922  int R, G, B;
1923  int isrgb8 = target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8;
1924 
1925  Y -= c->yuv2rgb_y_offset;
1926  Y *= c->yuv2rgb_y_coeff;
1927  Y += 1 << 21;
1928  R = (unsigned)Y + V*c->yuv2rgb_v2r_coeff;
1929  G = (unsigned)Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1930  B = (unsigned)Y + U*c->yuv2rgb_u2b_coeff;
1931  if ((R | G | B) & 0xC0000000) {
1932  R = av_clip_uintp2(R, 30);
1933  G = av_clip_uintp2(G, 30);
1934  B = av_clip_uintp2(B, 30);
1935  }
1936 
1937  switch(target) {
1938  case AV_PIX_FMT_ARGB:
1939  dest[0] = hasAlpha ? A : 255;
1940  dest[1] = R >> 22;
1941  dest[2] = G >> 22;
1942  dest[3] = B >> 22;
1943  break;
1944  case AV_PIX_FMT_RGB24:
1945  dest[0] = R >> 22;
1946  dest[1] = G >> 22;
1947  dest[2] = B >> 22;
1948  break;
1949  case AV_PIX_FMT_RGBA:
1950  dest[0] = R >> 22;
1951  dest[1] = G >> 22;
1952  dest[2] = B >> 22;
1953  dest[3] = hasAlpha ? A : 255;
1954  break;
1955  case AV_PIX_FMT_ABGR:
1956  dest[0] = hasAlpha ? A : 255;
1957  dest[1] = B >> 22;
1958  dest[2] = G >> 22;
1959  dest[3] = R >> 22;
1960  break;
1961  case AV_PIX_FMT_BGR24:
1962  dest[0] = B >> 22;
1963  dest[1] = G >> 22;
1964  dest[2] = R >> 22;
1965  break;
1966  case AV_PIX_FMT_BGRA:
1967  dest[0] = B >> 22;
1968  dest[1] = G >> 22;
1969  dest[2] = R >> 22;
1970  dest[3] = hasAlpha ? A : 255;
1971  break;
1972  case AV_PIX_FMT_BGR4_BYTE:
1973  case AV_PIX_FMT_RGB4_BYTE:
1974  case AV_PIX_FMT_BGR8:
1975  case AV_PIX_FMT_RGB8:
1976  {
1977  int r,g,b;
1978 
1979  switch (c->dither) {
1980  case SWS_DITHER_NONE:
1981  if (isrgb8) {
1982  r = av_clip_uintp2(R >> 27, 3);
1983  g = av_clip_uintp2(G >> 27, 3);
1984  b = av_clip_uintp2(B >> 28, 2);
1985  } else {
1986  r = av_clip_uintp2(R >> 29, 1);
1987  g = av_clip_uintp2(G >> 28, 2);
1988  b = av_clip_uintp2(B >> 29, 1);
1989  }
1990  break;
1991  default:
1992  case SWS_DITHER_AUTO:
1993  case SWS_DITHER_ED:
1994  R >>= 22;
1995  G >>= 22;
1996  B >>= 22;
1997  R += (7*err[0] + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
1998  G += (7*err[1] + 1*c->dither_error[1][i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
1999  B += (7*err[2] + 1*c->dither_error[2][i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
2000  c->dither_error[0][i] = err[0];
2001  c->dither_error[1][i] = err[1];
2002  c->dither_error[2][i] = err[2];
2003  r = R >> (isrgb8 ? 5 : 7);
2004  g = G >> (isrgb8 ? 5 : 6);
2005  b = B >> (isrgb8 ? 6 : 7);
2006  r = av_clip(r, 0, isrgb8 ? 7 : 1);
2007  g = av_clip(g, 0, isrgb8 ? 7 : 3);
2008  b = av_clip(b, 0, isrgb8 ? 3 : 1);
2009  err[0] = R - r*(isrgb8 ? 36 : 255);
2010  err[1] = G - g*(isrgb8 ? 36 : 85);
2011  err[2] = B - b*(isrgb8 ? 85 : 255);
2012  break;
2013  case SWS_DITHER_A_DITHER:
2014  if (isrgb8) {
2015  /* see http://pippin.gimp.org/a_dither/ for details/origin */
2016 #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff))
2017  r = (((R >> 19) + A_DITHER(i,y) -96)>>8);
2018  g = (((G >> 19) + A_DITHER(i + 17,y) - 96)>>8);
2019  b = (((B >> 20) + A_DITHER(i + 17*2,y) -96)>>8);
2020  r = av_clip_uintp2(r, 3);
2021  g = av_clip_uintp2(g, 3);
2022  b = av_clip_uintp2(b, 2);
2023  } else {
2024  r = (((R >> 21) + A_DITHER(i,y)-256)>>8);
2025  g = (((G >> 19) + A_DITHER(i + 17,y)-256)>>8);
2026  b = (((B >> 21) + A_DITHER(i + 17*2,y)-256)>>8);
2027  r = av_clip_uintp2(r, 1);
2028  g = av_clip_uintp2(g, 2);
2029  b = av_clip_uintp2(b, 1);
2030  }
2031  break;
2032  case SWS_DITHER_X_DITHER:
2033  if (isrgb8) {
2034  /* see http://pippin.gimp.org/a_dither/ for details/origin */
2035 #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2)
2036  r = (((R >> 19) + X_DITHER(i,y) - 96)>>8);
2037  g = (((G >> 19) + X_DITHER(i + 17,y) - 96)>>8);
2038  b = (((B >> 20) + X_DITHER(i + 17*2,y) - 96)>>8);
2039  r = av_clip_uintp2(r, 3);
2040  g = av_clip_uintp2(g, 3);
2041  b = av_clip_uintp2(b, 2);
2042  } else {
2043  r = (((R >> 21) + X_DITHER(i,y)-256)>>8);
2044  g = (((G >> 19) + X_DITHER(i + 17,y)-256)>>8);
2045  b = (((B >> 21) + X_DITHER(i + 17*2,y)-256)>>8);
2046  r = av_clip_uintp2(r, 1);
2047  g = av_clip_uintp2(g, 2);
2048  b = av_clip_uintp2(b, 1);
2049  }
2050 
2051  break;
2052  }
2053 
2054  if(target == AV_PIX_FMT_BGR4_BYTE) {
2055  dest[0] = r + 2*g + 8*b;
2056  } else if(target == AV_PIX_FMT_RGB4_BYTE) {
2057  dest[0] = b + 2*g + 8*r;
2058  } else if(target == AV_PIX_FMT_BGR8) {
2059  dest[0] = r + 8*g + 64*b;
2060  } else if(target == AV_PIX_FMT_RGB8) {
2061  dest[0] = b + 4*g + 32*r;
2062  } else
2063  av_assert2(0);
2064  break;}
2065  }
2066 }
2067 
2068 static av_always_inline void
2069 yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
2070  const int16_t **lumSrc, int lumFilterSize,
2071  const int16_t *chrFilter, const int16_t **chrUSrc,
2072  const int16_t **chrVSrc, int chrFilterSize,
2073  const int16_t **alpSrc, uint8_t *dest,
2074  int dstW, int y, enum AVPixelFormat target, int hasAlpha)
2075 {
2076  int i;
2077  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2078  int err[4] = {0};
2079  int A = 0; //init to silence warning
2080 
2081  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2082  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2083  step = 1;
2084 
2085  for (i = 0; i < dstW; i++) {
2086  int j;
2087  int Y = 1<<9;
2088  int U = (1<<9)-(128 << 19);
2089  int V = (1<<9)-(128 << 19);
2090 
2091  for (j = 0; j < lumFilterSize; j++) {
2092  Y += lumSrc[j][i] * lumFilter[j];
2093  }
2094  for (j = 0; j < chrFilterSize; j++) {
2095  U += chrUSrc[j][i] * chrFilter[j];
2096  V += chrVSrc[j][i] * chrFilter[j];
2097  }
2098  Y >>= 10;
2099  U >>= 10;
2100  V >>= 10;
2101  if (hasAlpha) {
2102  A = 1 << 18;
2103  for (j = 0; j < lumFilterSize; j++) {
2104  A += alpSrc[j][i] * lumFilter[j];
2105  }
2106  A >>= 19;
2107  if (A & 0x100)
2108  A = av_clip_uint8(A);
2109  }
2110  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2111  dest += step;
2112  }
2113  c->dither_error[0][i] = err[0];
2114  c->dither_error[1][i] = err[1];
2115  c->dither_error[2][i] = err[2];
2116 }
2117 
2118 static av_always_inline void
2119 yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2],
2120  const int16_t *ubuf[2], const int16_t *vbuf[2],
2121  const int16_t *abuf[2], uint8_t *dest, int dstW,
2122  int yalpha, int uvalpha, int y,
2123  enum AVPixelFormat target, int hasAlpha)
2124 {
2125  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2126  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
2127  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
2128  *abuf0 = hasAlpha ? abuf[0] : NULL,
2129  *abuf1 = hasAlpha ? abuf[1] : NULL;
2130  int yalpha1 = 4096 - yalpha;
2131  int uvalpha1 = 4096 - uvalpha;
2132  int i;
2133  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2134  int err[4] = {0};
2135  int A = 0; // init to silcene warning
2136 
2137  av_assert2(yalpha <= 4096U);
2138  av_assert2(uvalpha <= 4096U);
2139 
2140  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2141  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2142  step = 1;
2143 
2144  for (i = 0; i < dstW; i++) {
2145  int Y = ( buf0[i] * yalpha1 + buf1[i] * yalpha ) >> 10; //FIXME rounding
2146  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha-(128 << 19)) >> 10;
2147  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha-(128 << 19)) >> 10;
2148 
2149  if (hasAlpha) {
2150  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha + (1<<18)) >> 19;
2151  if (A & 0x100)
2152  A = av_clip_uint8(A);
2153  }
2154 
2155  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2156  dest += step;
2157  }
2158  c->dither_error[0][i] = err[0];
2159  c->dither_error[1][i] = err[1];
2160  c->dither_error[2][i] = err[2];
2161 }
2162 
2163 static av_always_inline void
2165  const int16_t *ubuf[2], const int16_t *vbuf[2],
2166  const int16_t *abuf0, uint8_t *dest, int dstW,
2167  int uvalpha, int y, enum AVPixelFormat target,
2168  int hasAlpha)
2169 {
2170  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
2171  int i;
2172  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2173  int err[4] = {0};
2174 
2175  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2176  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2177  step = 1;
2178 
2179  if (uvalpha < 2048) {
2180  int A = 0; //init to silence warning
2181  for (i = 0; i < dstW; i++) {
2182  int Y = buf0[i] * 4;
2183  int U = (ubuf0[i] - (128<<7)) * 4;
2184  int V = (vbuf0[i] - (128<<7)) * 4;
2185 
2186  if (hasAlpha) {
2187  A = (abuf0[i] + 64) >> 7;
2188  if (A & 0x100)
2189  A = av_clip_uint8(A);
2190  }
2191 
2192  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2193  dest += step;
2194  }
2195  } else {
2196  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
2197  int A = 0; //init to silence warning
2198  for (i = 0; i < dstW; i++) {
2199  int Y = buf0[i] * 4;
2200  int U = (ubuf0[i] + ubuf1[i] - (128<<8)) * 2;
2201  int V = (vbuf0[i] + vbuf1[i] - (128<<8)) * 2;
2202 
2203  if (hasAlpha) {
2204  A = (abuf0[i] + 64) >> 7;
2205  if (A & 0x100)
2206  A = av_clip_uint8(A);
2207  }
2208 
2209  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2210  dest += step;
2211  }
2212  }
2213 
2214  c->dither_error[0][i] = err[0];
2215  c->dither_error[1][i] = err[1];
2216  c->dither_error[2][i] = err[2];
2217 }
2218 
2219 #if CONFIG_SMALL
2220 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2221 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2222 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2223 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2224 #else
2225 #if CONFIG_SWSCALE_ALPHA
2226 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
2227 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
2228 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
2229 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
2230 #endif
2231 YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
2232 YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
2233 YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
2234 YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
2235 #endif
2236 YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
2237 YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
2238 
2239 YUV2RGBWRAPPER(yuv2, rgb_full, bgr4_byte_full, AV_PIX_FMT_BGR4_BYTE, 0)
2240 YUV2RGBWRAPPER(yuv2, rgb_full, rgb4_byte_full, AV_PIX_FMT_RGB4_BYTE, 0)
2241 YUV2RGBWRAPPER(yuv2, rgb_full, bgr8_full, AV_PIX_FMT_BGR8, 0)
2242 YUV2RGBWRAPPER(yuv2, rgb_full, rgb8_full, AV_PIX_FMT_RGB8, 0)
2243 
2244 static void
2245 yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter,
2246  const int16_t **lumSrc, int lumFilterSize,
2247  const int16_t *chrFilter, const int16_t **chrUSrc,
2248  const int16_t **chrVSrc, int chrFilterSize,
2249  const int16_t **alpSrc, uint8_t **dest,
2250  int dstW, int y)
2251 {
2252  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2253  int i;
2254  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrc;
2255  uint16_t **dest16 = (uint16_t**)dest;
2256  int SH = 22 + 8 - desc->comp[0].depth;
2257  int A = 0; // init to silence warning
2258 
2259  for (i = 0; i < dstW; i++) {
2260  int j;
2261  int Y = 1 << 9;
2262  int U = (1 << 9) - (128 << 19);
2263  int V = (1 << 9) - (128 << 19);
2264  int R, G, B;
2265 
2266  for (j = 0; j < lumFilterSize; j++)
2267  Y += lumSrc[j][i] * lumFilter[j];
2268 
2269  for (j = 0; j < chrFilterSize; j++) {
2270  U += chrUSrc[j][i] * chrFilter[j];
2271  V += chrVSrc[j][i] * chrFilter[j];
2272  }
2273 
2274  Y >>= 10;
2275  U >>= 10;
2276  V >>= 10;
2277 
2278  if (hasAlpha) {
2279  A = 1 << 18;
2280 
2281  for (j = 0; j < lumFilterSize; j++)
2282  A += alpSrc[j][i] * lumFilter[j];
2283 
2284  if (A & 0xF8000000)
2285  A = av_clip_uintp2(A, 27);
2286  }
2287 
2288  Y -= c->yuv2rgb_y_offset;
2289  Y *= c->yuv2rgb_y_coeff;
2290  Y += 1 << (SH-1);
2291  R = Y + V * c->yuv2rgb_v2r_coeff;
2292  G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2293  B = Y + U * c->yuv2rgb_u2b_coeff;
2294 
2295  if ((R | G | B) & 0xC0000000) {
2296  R = av_clip_uintp2(R, 30);
2297  G = av_clip_uintp2(G, 30);
2298  B = av_clip_uintp2(B, 30);
2299  }
2300 
2301  if (SH != 22) {
2302  dest16[0][i] = G >> SH;
2303  dest16[1][i] = B >> SH;
2304  dest16[2][i] = R >> SH;
2305  if (hasAlpha)
2306  dest16[3][i] = A >> (SH - 3);
2307  } else {
2308  dest[0][i] = G >> 22;
2309  dest[1][i] = B >> 22;
2310  dest[2][i] = R >> 22;
2311  if (hasAlpha)
2312  dest[3][i] = A >> 19;
2313  }
2314  }
2315  if (SH != 22 && (!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2316  for (i = 0; i < dstW; i++) {
2317  dest16[0][i] = av_bswap16(dest16[0][i]);
2318  dest16[1][i] = av_bswap16(dest16[1][i]);
2319  dest16[2][i] = av_bswap16(dest16[2][i]);
2320  if (hasAlpha)
2321  dest16[3][i] = av_bswap16(dest16[3][i]);
2322  }
2323  }
2324 }
2325 
2326 static void
2327 yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter,
2328  const int16_t **lumSrcx, int lumFilterSize,
2329  const int16_t *chrFilter, const int16_t **chrUSrcx,
2330  const int16_t **chrVSrcx, int chrFilterSize,
2331  const int16_t **alpSrcx, uint8_t **dest,
2332  int dstW, int y)
2333 {
2334  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2335  int i;
2336  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2337  uint16_t **dest16 = (uint16_t**)dest;
2338  const int32_t **lumSrc = (const int32_t**)lumSrcx;
2339  const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2340  const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2341  const int32_t **alpSrc = (const int32_t**)alpSrcx;
2342 
2343  for (i = 0; i < dstW; i++) {
2344  int j;
2345  int Y = -0x40000000;
2346  int U = -(128 << 23);
2347  int V = -(128 << 23);
2348  int R, G, B, A;
2349 
2350  for (j = 0; j < lumFilterSize; j++)
2351  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2352 
2353  for (j = 0; j < chrFilterSize; j++) {
2354  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2355  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2356  }
2357 
2358  Y >>= 14;
2359  Y += 0x10000;
2360  U >>= 14;
2361  V >>= 14;
2362 
2363  if (hasAlpha) {
2364  A = -0x40000000;
2365 
2366  for (j = 0; j < lumFilterSize; j++)
2367  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2368 
2369  A >>= 1;
2370  A += 0x20002000;
2371  }
2372 
2373  Y -= c->yuv2rgb_y_offset;
2374  Y *= c->yuv2rgb_y_coeff;
2375  Y += (1 << 13) - (1 << 29);
2376  R = V * c->yuv2rgb_v2r_coeff;
2377  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2378  B = U * c->yuv2rgb_u2b_coeff;
2379 
2380  dest16[2][i] = av_clip_uintp2(((Y + R) >> 14) + (1<<15), 16);
2381  dest16[0][i] = av_clip_uintp2(((Y + G) >> 14) + (1<<15), 16);
2382  dest16[1][i] = av_clip_uintp2(((Y + B) >> 14) + (1<<15), 16);
2383 
2384  if (hasAlpha)
2385  dest16[3][i] = av_clip_uintp2(A, 30) >> 14;
2386  }
2387  if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2388  for (i = 0; i < dstW; i++) {
2389  dest16[0][i] = av_bswap16(dest16[0][i]);
2390  dest16[1][i] = av_bswap16(dest16[1][i]);
2391  dest16[2][i] = av_bswap16(dest16[2][i]);
2392  if (hasAlpha)
2393  dest16[3][i] = av_bswap16(dest16[3][i]);
2394  }
2395  }
2396 }
2397 
2398 static void
2399 yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter,
2400  const int16_t **lumSrcx, int lumFilterSize,
2401  const int16_t *chrFilter, const int16_t **chrUSrcx,
2402  const int16_t **chrVSrcx, int chrFilterSize,
2403  const int16_t **alpSrcx, uint8_t **dest,
2404  int dstW, int y)
2405 {
2406  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2407  int i;
2408  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2409  uint32_t **dest32 = (uint32_t**)dest;
2410  const int32_t **lumSrc = (const int32_t**)lumSrcx;
2411  const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2412  const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2413  const int32_t **alpSrc = (const int32_t**)alpSrcx;
2414  static const float float_mult = 1.0f / 65535.0f;
2415 
2416  for (i = 0; i < dstW; i++) {
2417  int j;
2418  int Y = -0x40000000;
2419  int U = -(128 << 23);
2420  int V = -(128 << 23);
2421  int R, G, B, A;
2422 
2423  for (j = 0; j < lumFilterSize; j++)
2424  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2425 
2426  for (j = 0; j < chrFilterSize; j++) {
2427  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2428  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2429  }
2430 
2431  Y >>= 14;
2432  Y += 0x10000;
2433  U >>= 14;
2434  V >>= 14;
2435 
2436  if (hasAlpha) {
2437  A = -0x40000000;
2438 
2439  for (j = 0; j < lumFilterSize; j++)
2440  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2441 
2442  A >>= 1;
2443  A += 0x20002000;
2444  }
2445 
2446  Y -= c->yuv2rgb_y_offset;
2447  Y *= c->yuv2rgb_y_coeff;
2448  Y += (1 << 13) - (1 << 29);
2449  R = V * c->yuv2rgb_v2r_coeff;
2450  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2451  B = U * c->yuv2rgb_u2b_coeff;
2452 
2453  R = av_clip_uintp2(((Y + R) >> 14) + (1<<15), 16);
2454  G = av_clip_uintp2(((Y + G) >> 14) + (1<<15), 16);
2455  B = av_clip_uintp2(((Y + B) >> 14) + (1<<15), 16);
2456 
2457  dest32[0][i] = av_float2int(float_mult * (float)G);
2458  dest32[1][i] = av_float2int(float_mult * (float)B);
2459  dest32[2][i] = av_float2int(float_mult * (float)R);
2460  if (hasAlpha)
2461  dest32[3][i] = av_float2int(float_mult * (float)(av_clip_uintp2(A, 30) >> 14));
2462  }
2463  if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2464  for (i = 0; i < dstW; i++) {
2465  dest32[0][i] = av_bswap32(dest32[0][i]);
2466  dest32[1][i] = av_bswap32(dest32[1][i]);
2467  dest32[2][i] = av_bswap32(dest32[2][i]);
2468  if (hasAlpha)
2469  dest32[3][i] = av_bswap32(dest32[3][i]);
2470  }
2471  }
2472 }
2473 
2474 static void
2475 yuv2ya8_1_c(SwsContext *c, const int16_t *buf0,
2476  const int16_t *ubuf[2], const int16_t *vbuf[2],
2477  const int16_t *abuf0, uint8_t *dest, int dstW,
2478  int uvalpha, int y)
2479 {
2480  int hasAlpha = !!abuf0;
2481  int i;
2482 
2483  for (i = 0; i < dstW; i++) {
2484  int Y = (buf0[i] + 64) >> 7;
2485  int A;
2486 
2487  Y = av_clip_uint8(Y);
2488 
2489  if (hasAlpha) {
2490  A = (abuf0[i] + 64) >> 7;
2491  if (A & 0x100)
2492  A = av_clip_uint8(A);
2493  }
2494 
2495  dest[i * 2 ] = Y;
2496  dest[i * 2 + 1] = hasAlpha ? A : 255;
2497  }
2498 }
2499 
2500 static void
2501 yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2],
2502  const int16_t *ubuf[2], const int16_t *vbuf[2],
2503  const int16_t *abuf[2], uint8_t *dest, int dstW,
2504  int yalpha, int uvalpha, int y)
2505 {
2506  int hasAlpha = abuf && abuf[0] && abuf[1];
2507  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2508  *abuf0 = hasAlpha ? abuf[0] : NULL,
2509  *abuf1 = hasAlpha ? abuf[1] : NULL;
2510  int yalpha1 = 4096 - yalpha;
2511  int i;
2512 
2513  av_assert2(yalpha <= 4096U);
2514 
2515  for (i = 0; i < dstW; i++) {
2516  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 19;
2517  int A;
2518 
2519  Y = av_clip_uint8(Y);
2520 
2521  if (hasAlpha) {
2522  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 19;
2523  A = av_clip_uint8(A);
2524  }
2525 
2526  dest[i * 2 ] = Y;
2527  dest[i * 2 + 1] = hasAlpha ? A : 255;
2528  }
2529 }
2530 
2531 static void
2532 yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter,
2533  const int16_t **lumSrc, int lumFilterSize,
2534  const int16_t *chrFilter, const int16_t **chrUSrc,
2535  const int16_t **chrVSrc, int chrFilterSize,
2536  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2537 {
2538  int hasAlpha = !!alpSrc;
2539  int i;
2540 
2541  for (i = 0; i < dstW; i++) {
2542  int j;
2543  int Y = 1 << 18, A = 1 << 18;
2544 
2545  for (j = 0; j < lumFilterSize; j++)
2546  Y += lumSrc[j][i] * lumFilter[j];
2547 
2548  Y >>= 19;
2549  if (Y & 0x100)
2550  Y = av_clip_uint8(Y);
2551 
2552  if (hasAlpha) {
2553  for (j = 0; j < lumFilterSize; j++)
2554  A += alpSrc[j][i] * lumFilter[j];
2555 
2556  A >>= 19;
2557 
2558  if (A & 0x100)
2559  A = av_clip_uint8(A);
2560  }
2561 
2562  dest[2 * i ] = Y;
2563  dest[2 * i + 1] = hasAlpha ? A : 255;
2564  }
2565 }
2566 
2567 static void
2568 yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter,
2569  const int16_t **_lumSrc, int lumFilterSize,
2570  const int16_t *chrFilter, const int16_t **_chrUSrc,
2571  const int16_t **_chrVSrc, int chrFilterSize,
2572  const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
2573 {
2574  const int32_t **lumSrc = (const int32_t **) _lumSrc,
2575  **chrUSrc = (const int32_t **) _chrUSrc,
2576  **chrVSrc = (const int32_t **) _chrVSrc,
2577  **alpSrc = (const int32_t **) _alpSrc;
2578  int hasAlpha = !!alpSrc;
2579  int i;
2580 
2581  for (i = 0; i < dstW; i++) {
2582  int Y = 1 << 14, U = 1 << 14;
2583  int V = 1 << 14, A = 1 << 14;
2584  int j;
2585 
2586  Y -= 0x40000000;
2587  U -= 0x40000000;
2588  V -= 0x40000000;
2589  A -= 0x40000000;
2590 
2591  for (j = 0; j < lumFilterSize; j++)
2592  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2593 
2594  for (j = 0; j < chrFilterSize; j++)
2595  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2596 
2597  for (j = 0; j < chrFilterSize; j++)
2598  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2599 
2600  if (hasAlpha)
2601  for (j = 0; j < lumFilterSize; j++)
2602  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2603 
2604  Y = 0x8000 + av_clip_int16(Y >> 15);
2605  U = 0x8000 + av_clip_int16(U >> 15);
2606  V = 0x8000 + av_clip_int16(V >> 15);
2607  A = 0x8000 + av_clip_int16(A >> 15);
2608 
2609  AV_WL16(dest + 8 * i, hasAlpha ? A : 65535);
2610  AV_WL16(dest + 8 * i + 2, Y);
2611  AV_WL16(dest + 8 * i + 4, U);
2612  AV_WL16(dest + 8 * i + 6, V);
2613  }
2614 }
2615 
2616 static void
2617 yuv2xv30le_X_c(SwsContext *c, const int16_t *lumFilter,
2618  const int16_t **lumSrc, int lumFilterSize,
2619  const int16_t *chrFilter, const int16_t **chrUSrc,
2620  const int16_t **chrVSrc, int chrFilterSize,
2621  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2622 {
2623  int i;
2624  for (i = 0; i < dstW; i++) {
2625  int Y = 1 << 16, U = 1 << 16, V = 1 << 16;
2626  int j;
2627 
2628  for (j = 0; j < lumFilterSize; j++)
2629  Y += lumSrc[j][i] * lumFilter[j];
2630 
2631  for (j = 0; j < chrFilterSize; j++) {
2632  U += chrUSrc[j][i] * chrFilter[j];
2633  V += chrVSrc[j][i] * chrFilter[j];
2634  }
2635 
2636  Y = av_clip_uintp2(Y >> 17, 10);
2637  U = av_clip_uintp2(U >> 17, 10);
2638  V = av_clip_uintp2(V >> 17, 10);
2639 
2640  AV_WL32(dest + 4 * i, U | Y << 10 | V << 20);
2641  }
2642 }
2643 
2644 static void
2645 yuv2xv36le_X_c(SwsContext *c, const int16_t *lumFilter,
2646  const int16_t **lumSrc, int lumFilterSize,
2647  const int16_t *chrFilter, const int16_t **chrUSrc,
2648  const int16_t **chrVSrc, int chrFilterSize,
2649  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2650 {
2651  int i;
2652  for (i = 0; i < dstW; i++) {
2653  int Y = 1 << 14, U = 1 << 14, V = 1 << 14;
2654  int j;
2655 
2656  for (j = 0; j < lumFilterSize; j++)
2657  Y += lumSrc[j][i] * lumFilter[j];
2658 
2659  for (j = 0; j < chrFilterSize; j++) {
2660  U += chrUSrc[j][i] * chrFilter[j];
2661  V += chrVSrc[j][i] * chrFilter[j];
2662  }
2663 
2664  AV_WL16(dest + 8 * i + 2, av_clip_uintp2(Y >> 15, 12) << 4);
2665  AV_WL16(dest + 8 * i + 0, av_clip_uintp2(U >> 15, 12) << 4);
2666  AV_WL16(dest + 8 * i + 4, av_clip_uintp2(V >> 15, 12) << 4);
2667  }
2668 }
2669 
2670 static void
2671 yuv2vuyX_X_c(SwsContext *c, const int16_t *lumFilter,
2672  const int16_t **lumSrc, int lumFilterSize,
2673  const int16_t *chrFilter, const int16_t **chrUSrc,
2674  const int16_t **chrVSrc, int chrFilterSize,
2675  const int16_t **alpSrc, uint8_t *dest, int dstW, int y,
2676  int destHasAlpha)
2677 {
2678  int hasAlpha = destHasAlpha && (!!alpSrc);
2679  int i;
2680 
2681  for (i = 0; i < dstW; i++) {
2682  int j;
2683  int Y = 1 << 18, U = 1 << 18;
2684  int V = 1 << 18, A = 255;
2685 
2686  for (j = 0; j < lumFilterSize; j++)
2687  Y += lumSrc[j][i] * lumFilter[j];
2688 
2689  for (j = 0; j < chrFilterSize; j++)
2690  U += chrUSrc[j][i] * chrFilter[j];
2691 
2692  for (j = 0; j < chrFilterSize; j++)
2693  V += chrVSrc[j][i] * chrFilter[j];
2694 
2695  Y >>= 19;
2696  U >>= 19;
2697  V >>= 19;
2698 
2699  if (Y & 0x100)
2700  Y = av_clip_uint8(Y);
2701  if (U & 0x100)
2702  U = av_clip_uint8(U);
2703  if (V & 0x100)
2704  V = av_clip_uint8(V);
2705 
2706  if (hasAlpha) {
2707  A = 1 << 18;
2708 
2709  for (j = 0; j < lumFilterSize; j++)
2710  A += alpSrc[j][i] * lumFilter[j];
2711 
2712  A >>= 19;
2713 
2714  if (A & 0x100)
2715  A = av_clip_uint8(A);
2716  }
2717 
2718  dest[4 * i ] = V;
2719  dest[4 * i + 1] = U;
2720  dest[4 * i + 2] = Y;
2721  if (destHasAlpha)
2722  dest[4 * i + 3] = A;
2723  }
2724 }
2725 
2726 static void
2727 yuv2vuya_X_c(SwsContext *c, const int16_t *lumFilter,
2728  const int16_t **lumSrc, int lumFilterSize,
2729  const int16_t *chrFilter, const int16_t **chrUSrc,
2730  const int16_t **chrVSrc, int chrFilterSize,
2731  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2732 {
2733  yuv2vuyX_X_c(c, lumFilter, lumSrc, lumFilterSize, chrFilter,
2734  chrUSrc, chrVSrc, chrFilterSize, alpSrc, dest, dstW, y, 1);
2735 }
2736 
2737 static void
2738 yuv2vuyx_X_c(SwsContext *c, const int16_t *lumFilter,
2739  const int16_t **lumSrc, int lumFilterSize,
2740  const int16_t *chrFilter, const int16_t **chrUSrc,
2741  const int16_t **chrVSrc, int chrFilterSize,
2742  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2743 {
2744  yuv2vuyX_X_c(c, lumFilter, lumSrc, lumFilterSize, chrFilter,
2745  chrUSrc, chrVSrc, chrFilterSize, alpSrc, dest, dstW, y, 0);
2746 }
2747 
2748 #define output_pixel(pos, val, bits) \
2749  AV_WL16(pos, av_clip_uintp2(val >> shift, bits) << output_shift);
2750 
2751 #define yuv2y2xx_wrapper(bits) \
2752  static void \
2753  yuv2y2 ## bits ## le_X_c(SwsContext *c, const int16_t *lumFilter, \
2754  const int16_t **lumSrc, int lumFilterSize, \
2755  const int16_t *chrFilter, \
2756  const int16_t **chrUSrc, \
2757  const int16_t **chrVSrc, int chrFilterSize, \
2758  const int16_t **alpSrc, \
2759  uint8_t *dest, int dstW, int y) \
2760  { \
2761  int i, j; \
2762  int shift = 11 + 16 - bits; \
2763  int output_shift = 16 - bits; \
2764  for (i = 0; i < ((dstW + 1) >> 1); i++) { \
2765  int Y1 = 1 << (shift - 1), Y2 = 1 << (shift - 1); \
2766  int U = 1 << (shift - 1), V = 1 << (shift - 1); \
2767  \
2768  for (j = 0; j < lumFilterSize; j++) { \
2769  Y1 += lumSrc[j][i * 2] * lumFilter[j]; \
2770  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; \
2771  } \
2772  \
2773  for (j = 0; j < chrFilterSize; j++) { \
2774  U += chrUSrc[j][i] * chrFilter[j]; \
2775  V += chrVSrc[j][i] * chrFilter[j]; \
2776  } \
2777  \
2778  output_pixel(dest + 8 * i + 0, Y1, bits); \
2779  output_pixel(dest + 8 * i + 2, U, bits); \
2780  output_pixel(dest + 8 * i + 4, Y2, bits); \
2781  output_pixel(dest + 8 * i + 6, V, bits); \
2782  } \
2783  }
2784 
2786 yuv2y2xx_wrapper(12)
2787 
2788 #undef output_pixel
2789 
2791  yuv2planar1_fn *yuv2plane1,
2793  yuv2interleavedX_fn *yuv2nv12cX,
2794  yuv2packed1_fn *yuv2packed1,
2795  yuv2packed2_fn *yuv2packed2,
2796  yuv2packedX_fn *yuv2packedX,
2797  yuv2anyX_fn *yuv2anyX)
2798 {
2799  enum AVPixelFormat dstFormat = c->dstFormat;
2800  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
2801 
2802  if (isSemiPlanarYUV(dstFormat) && isDataInHighBits(dstFormat)) {
2803  if (desc->comp[0].depth == 10) {
2804  *yuv2plane1 = isBE(dstFormat) ? yuv2p010l1_BE_c : yuv2p010l1_LE_c;
2805  *yuv2planeX = isBE(dstFormat) ? yuv2p010lX_BE_c : yuv2p010lX_LE_c;
2806  *yuv2nv12cX = isBE(dstFormat) ? yuv2p010cX_BE_c : yuv2p010cX_LE_c;
2807  } else if (desc->comp[0].depth == 12) {
2808  *yuv2plane1 = isBE(dstFormat) ? yuv2p012l1_BE_c : yuv2p012l1_LE_c;
2809  *yuv2planeX = isBE(dstFormat) ? yuv2p012lX_BE_c : yuv2p012lX_LE_c;
2810  *yuv2nv12cX = isBE(dstFormat) ? yuv2p012cX_BE_c : yuv2p012cX_LE_c;
2811  } else
2812  av_assert0(0);
2813  } else if (is16BPS(dstFormat)) {
2814  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
2815  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
2816  if (isSemiPlanarYUV(dstFormat)) {
2817  *yuv2nv12cX = isBE(dstFormat) ? yuv2nv12cX_16BE_c : yuv2nv12cX_16LE_c;
2818  }
2819  } else if (isNBPS(dstFormat)) {
2820  if (desc->comp[0].depth == 9) {
2821  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
2822  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
2823  } else if (desc->comp[0].depth == 10) {
2824  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
2825  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
2826  } else if (desc->comp[0].depth == 12) {
2827  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
2828  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
2829  } else if (desc->comp[0].depth == 14) {
2830  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
2831  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
2832  } else
2833  av_assert0(0);
2834  } else if (dstFormat == AV_PIX_FMT_GRAYF32BE) {
2835  *yuv2planeX = yuv2planeX_floatBE_c;
2836  *yuv2plane1 = yuv2plane1_floatBE_c;
2837  } else if (dstFormat == AV_PIX_FMT_GRAYF32LE) {
2838  *yuv2planeX = yuv2planeX_floatLE_c;
2839  *yuv2plane1 = yuv2plane1_floatLE_c;
2840  } else {
2841  *yuv2plane1 = yuv2plane1_8_c;
2843  if (isSemiPlanarYUV(dstFormat))
2844  *yuv2nv12cX = yuv2nv12cX_c;
2845  }
2846 
2847  if(c->flags & SWS_FULL_CHR_H_INT) {
2848  switch (dstFormat) {
2849  case AV_PIX_FMT_RGBA:
2850 #if CONFIG_SMALL
2851  *yuv2packedX = yuv2rgba32_full_X_c;
2852  *yuv2packed2 = yuv2rgba32_full_2_c;
2853  *yuv2packed1 = yuv2rgba32_full_1_c;
2854 #else
2855 #if CONFIG_SWSCALE_ALPHA
2856  if (c->needAlpha) {
2857  *yuv2packedX = yuv2rgba32_full_X_c;
2858  *yuv2packed2 = yuv2rgba32_full_2_c;
2859  *yuv2packed1 = yuv2rgba32_full_1_c;
2860  } else
2861 #endif /* CONFIG_SWSCALE_ALPHA */
2862  {
2863  *yuv2packedX = yuv2rgbx32_full_X_c;
2864  *yuv2packed2 = yuv2rgbx32_full_2_c;
2865  *yuv2packed1 = yuv2rgbx32_full_1_c;
2866  }
2867 #endif /* !CONFIG_SMALL */
2868  break;
2869  case AV_PIX_FMT_ARGB:
2870 #if CONFIG_SMALL
2871  *yuv2packedX = yuv2argb32_full_X_c;
2872  *yuv2packed2 = yuv2argb32_full_2_c;
2873  *yuv2packed1 = yuv2argb32_full_1_c;
2874 #else
2875 #if CONFIG_SWSCALE_ALPHA
2876  if (c->needAlpha) {
2877  *yuv2packedX = yuv2argb32_full_X_c;
2878  *yuv2packed2 = yuv2argb32_full_2_c;
2879  *yuv2packed1 = yuv2argb32_full_1_c;
2880  } else
2881 #endif /* CONFIG_SWSCALE_ALPHA */
2882  {
2883  *yuv2packedX = yuv2xrgb32_full_X_c;
2884  *yuv2packed2 = yuv2xrgb32_full_2_c;
2885  *yuv2packed1 = yuv2xrgb32_full_1_c;
2886  }
2887 #endif /* !CONFIG_SMALL */
2888  break;
2889  case AV_PIX_FMT_BGRA:
2890 #if CONFIG_SMALL
2891  *yuv2packedX = yuv2bgra32_full_X_c;
2892  *yuv2packed2 = yuv2bgra32_full_2_c;
2893  *yuv2packed1 = yuv2bgra32_full_1_c;
2894 #else
2895 #if CONFIG_SWSCALE_ALPHA
2896  if (c->needAlpha) {
2897  *yuv2packedX = yuv2bgra32_full_X_c;
2898  *yuv2packed2 = yuv2bgra32_full_2_c;
2899  *yuv2packed1 = yuv2bgra32_full_1_c;
2900  } else
2901 #endif /* CONFIG_SWSCALE_ALPHA */
2902  {
2903  *yuv2packedX = yuv2bgrx32_full_X_c;
2904  *yuv2packed2 = yuv2bgrx32_full_2_c;
2905  *yuv2packed1 = yuv2bgrx32_full_1_c;
2906  }
2907 #endif /* !CONFIG_SMALL */
2908  break;
2909  case AV_PIX_FMT_ABGR:
2910 #if CONFIG_SMALL
2911  *yuv2packedX = yuv2abgr32_full_X_c;
2912  *yuv2packed2 = yuv2abgr32_full_2_c;
2913  *yuv2packed1 = yuv2abgr32_full_1_c;
2914 #else
2915 #if CONFIG_SWSCALE_ALPHA
2916  if (c->needAlpha) {
2917  *yuv2packedX = yuv2abgr32_full_X_c;
2918  *yuv2packed2 = yuv2abgr32_full_2_c;
2919  *yuv2packed1 = yuv2abgr32_full_1_c;
2920  } else
2921 #endif /* CONFIG_SWSCALE_ALPHA */
2922  {
2923  *yuv2packedX = yuv2xbgr32_full_X_c;
2924  *yuv2packed2 = yuv2xbgr32_full_2_c;
2925  *yuv2packed1 = yuv2xbgr32_full_1_c;
2926  }
2927 #endif /* !CONFIG_SMALL */
2928  break;
2929  case AV_PIX_FMT_RGBA64LE:
2930 #if CONFIG_SWSCALE_ALPHA
2931  if (c->needAlpha) {
2932  *yuv2packedX = yuv2rgba64le_full_X_c;
2933  *yuv2packed2 = yuv2rgba64le_full_2_c;
2934  *yuv2packed1 = yuv2rgba64le_full_1_c;
2935  } else
2936 #endif /* CONFIG_SWSCALE_ALPHA */
2937  {
2938  *yuv2packedX = yuv2rgbx64le_full_X_c;
2939  *yuv2packed2 = yuv2rgbx64le_full_2_c;
2940  *yuv2packed1 = yuv2rgbx64le_full_1_c;
2941  }
2942  break;
2943  case AV_PIX_FMT_RGBA64BE:
2944 #if CONFIG_SWSCALE_ALPHA
2945  if (c->needAlpha) {
2946  *yuv2packedX = yuv2rgba64be_full_X_c;
2947  *yuv2packed2 = yuv2rgba64be_full_2_c;
2948  *yuv2packed1 = yuv2rgba64be_full_1_c;
2949  } else
2950 #endif /* CONFIG_SWSCALE_ALPHA */
2951  {
2952  *yuv2packedX = yuv2rgbx64be_full_X_c;
2953  *yuv2packed2 = yuv2rgbx64be_full_2_c;
2954  *yuv2packed1 = yuv2rgbx64be_full_1_c;
2955  }
2956  break;
2957  case AV_PIX_FMT_BGRA64LE:
2958 #if CONFIG_SWSCALE_ALPHA
2959  if (c->needAlpha) {
2960  *yuv2packedX = yuv2bgra64le_full_X_c;
2961  *yuv2packed2 = yuv2bgra64le_full_2_c;
2962  *yuv2packed1 = yuv2bgra64le_full_1_c;
2963  } else
2964 #endif /* CONFIG_SWSCALE_ALPHA */
2965  {
2966  *yuv2packedX = yuv2bgrx64le_full_X_c;
2967  *yuv2packed2 = yuv2bgrx64le_full_2_c;
2968  *yuv2packed1 = yuv2bgrx64le_full_1_c;
2969  }
2970  break;
2971  case AV_PIX_FMT_BGRA64BE:
2972 #if CONFIG_SWSCALE_ALPHA
2973  if (c->needAlpha) {
2974  *yuv2packedX = yuv2bgra64be_full_X_c;
2975  *yuv2packed2 = yuv2bgra64be_full_2_c;
2976  *yuv2packed1 = yuv2bgra64be_full_1_c;
2977  } else
2978 #endif /* CONFIG_SWSCALE_ALPHA */
2979  {
2980  *yuv2packedX = yuv2bgrx64be_full_X_c;
2981  *yuv2packed2 = yuv2bgrx64be_full_2_c;
2982  *yuv2packed1 = yuv2bgrx64be_full_1_c;
2983  }
2984  break;
2985 
2986  case AV_PIX_FMT_RGB24:
2987  *yuv2packedX = yuv2rgb24_full_X_c;
2988  *yuv2packed2 = yuv2rgb24_full_2_c;
2989  *yuv2packed1 = yuv2rgb24_full_1_c;
2990  break;
2991  case AV_PIX_FMT_BGR24:
2992  *yuv2packedX = yuv2bgr24_full_X_c;
2993  *yuv2packed2 = yuv2bgr24_full_2_c;
2994  *yuv2packed1 = yuv2bgr24_full_1_c;
2995  break;
2996  case AV_PIX_FMT_RGB48LE:
2997  *yuv2packedX = yuv2rgb48le_full_X_c;
2998  *yuv2packed2 = yuv2rgb48le_full_2_c;
2999  *yuv2packed1 = yuv2rgb48le_full_1_c;
3000  break;
3001  case AV_PIX_FMT_BGR48LE:
3002  *yuv2packedX = yuv2bgr48le_full_X_c;
3003  *yuv2packed2 = yuv2bgr48le_full_2_c;
3004  *yuv2packed1 = yuv2bgr48le_full_1_c;
3005  break;
3006  case AV_PIX_FMT_RGB48BE:
3007  *yuv2packedX = yuv2rgb48be_full_X_c;
3008  *yuv2packed2 = yuv2rgb48be_full_2_c;
3009  *yuv2packed1 = yuv2rgb48be_full_1_c;
3010  break;
3011  case AV_PIX_FMT_BGR48BE:
3012  *yuv2packedX = yuv2bgr48be_full_X_c;
3013  *yuv2packed2 = yuv2bgr48be_full_2_c;
3014  *yuv2packed1 = yuv2bgr48be_full_1_c;
3015  break;
3016  case AV_PIX_FMT_BGR4_BYTE:
3017  *yuv2packedX = yuv2bgr4_byte_full_X_c;
3018  *yuv2packed2 = yuv2bgr4_byte_full_2_c;
3019  *yuv2packed1 = yuv2bgr4_byte_full_1_c;
3020  break;
3021  case AV_PIX_FMT_RGB4_BYTE:
3022  *yuv2packedX = yuv2rgb4_byte_full_X_c;
3023  *yuv2packed2 = yuv2rgb4_byte_full_2_c;
3024  *yuv2packed1 = yuv2rgb4_byte_full_1_c;
3025  break;
3026  case AV_PIX_FMT_BGR8:
3027  *yuv2packedX = yuv2bgr8_full_X_c;
3028  *yuv2packed2 = yuv2bgr8_full_2_c;
3029  *yuv2packed1 = yuv2bgr8_full_1_c;
3030  break;
3031  case AV_PIX_FMT_RGB8:
3032  *yuv2packedX = yuv2rgb8_full_X_c;
3033  *yuv2packed2 = yuv2rgb8_full_2_c;
3034  *yuv2packed1 = yuv2rgb8_full_1_c;
3035  break;
3036  case AV_PIX_FMT_GBRP:
3037  case AV_PIX_FMT_GBRP9BE:
3038  case AV_PIX_FMT_GBRP9LE:
3039  case AV_PIX_FMT_GBRP10BE:
3040  case AV_PIX_FMT_GBRP10LE:
3041  case AV_PIX_FMT_GBRP12BE:
3042  case AV_PIX_FMT_GBRP12LE:
3043  case AV_PIX_FMT_GBRP14BE:
3044  case AV_PIX_FMT_GBRP14LE:
3045  case AV_PIX_FMT_GBRAP:
3046  case AV_PIX_FMT_GBRAP10BE:
3047  case AV_PIX_FMT_GBRAP10LE:
3048  case AV_PIX_FMT_GBRAP12BE:
3049  case AV_PIX_FMT_GBRAP12LE:
3050  case AV_PIX_FMT_GBRAP14BE:
3051  case AV_PIX_FMT_GBRAP14LE:
3052  *yuv2anyX = yuv2gbrp_full_X_c;
3053  break;
3054  case AV_PIX_FMT_GBRP16BE:
3055  case AV_PIX_FMT_GBRP16LE:
3056  case AV_PIX_FMT_GBRAP16BE:
3057  case AV_PIX_FMT_GBRAP16LE:
3058  *yuv2anyX = yuv2gbrp16_full_X_c;
3059  break;
3060  case AV_PIX_FMT_GBRPF32BE:
3061  case AV_PIX_FMT_GBRPF32LE:
3062  case AV_PIX_FMT_GBRAPF32BE:
3063  case AV_PIX_FMT_GBRAPF32LE:
3064  *yuv2anyX = yuv2gbrpf32_full_X_c;
3065  break;
3066  }
3067  if (!*yuv2packedX && !*yuv2anyX)
3068  goto YUV_PACKED;
3069  } else {
3070  YUV_PACKED:
3071  switch (dstFormat) {
3072  case AV_PIX_FMT_RGBA64LE:
3073 #if CONFIG_SWSCALE_ALPHA
3074  if (c->needAlpha) {
3075  *yuv2packed1 = yuv2rgba64le_1_c;
3076  *yuv2packed2 = yuv2rgba64le_2_c;
3077  *yuv2packedX = yuv2rgba64le_X_c;
3078  } else
3079 #endif /* CONFIG_SWSCALE_ALPHA */
3080  {
3081  *yuv2packed1 = yuv2rgbx64le_1_c;
3082  *yuv2packed2 = yuv2rgbx64le_2_c;
3083  *yuv2packedX = yuv2rgbx64le_X_c;
3084  }
3085  break;
3086  case AV_PIX_FMT_RGBA64BE:
3087 #if CONFIG_SWSCALE_ALPHA
3088  if (c->needAlpha) {
3089  *yuv2packed1 = yuv2rgba64be_1_c;
3090  *yuv2packed2 = yuv2rgba64be_2_c;
3091  *yuv2packedX = yuv2rgba64be_X_c;
3092  } else
3093 #endif /* CONFIG_SWSCALE_ALPHA */
3094  {
3095  *yuv2packed1 = yuv2rgbx64be_1_c;
3096  *yuv2packed2 = yuv2rgbx64be_2_c;
3097  *yuv2packedX = yuv2rgbx64be_X_c;
3098  }
3099  break;
3100  case AV_PIX_FMT_BGRA64LE:
3101 #if CONFIG_SWSCALE_ALPHA
3102  if (c->needAlpha) {
3103  *yuv2packed1 = yuv2bgra64le_1_c;
3104  *yuv2packed2 = yuv2bgra64le_2_c;
3105  *yuv2packedX = yuv2bgra64le_X_c;
3106  } else
3107 #endif /* CONFIG_SWSCALE_ALPHA */
3108  {
3109  *yuv2packed1 = yuv2bgrx64le_1_c;
3110  *yuv2packed2 = yuv2bgrx64le_2_c;
3111  *yuv2packedX = yuv2bgrx64le_X_c;
3112  }
3113  break;
3114  case AV_PIX_FMT_BGRA64BE:
3115 #if CONFIG_SWSCALE_ALPHA
3116  if (c->needAlpha) {
3117  *yuv2packed1 = yuv2bgra64be_1_c;
3118  *yuv2packed2 = yuv2bgra64be_2_c;
3119  *yuv2packedX = yuv2bgra64be_X_c;
3120  } else
3121 #endif /* CONFIG_SWSCALE_ALPHA */
3122  {
3123  *yuv2packed1 = yuv2bgrx64be_1_c;
3124  *yuv2packed2 = yuv2bgrx64be_2_c;
3125  *yuv2packedX = yuv2bgrx64be_X_c;
3126  }
3127  break;
3128  case AV_PIX_FMT_RGB48LE:
3129  *yuv2packed1 = yuv2rgb48le_1_c;
3130  *yuv2packed2 = yuv2rgb48le_2_c;
3131  *yuv2packedX = yuv2rgb48le_X_c;
3132  break;
3133  case AV_PIX_FMT_RGB48BE:
3134  *yuv2packed1 = yuv2rgb48be_1_c;
3135  *yuv2packed2 = yuv2rgb48be_2_c;
3136  *yuv2packedX = yuv2rgb48be_X_c;
3137  break;
3138  case AV_PIX_FMT_BGR48LE:
3139  *yuv2packed1 = yuv2bgr48le_1_c;
3140  *yuv2packed2 = yuv2bgr48le_2_c;
3141  *yuv2packedX = yuv2bgr48le_X_c;
3142  break;
3143  case AV_PIX_FMT_BGR48BE:
3144  *yuv2packed1 = yuv2bgr48be_1_c;
3145  *yuv2packed2 = yuv2bgr48be_2_c;
3146  *yuv2packedX = yuv2bgr48be_X_c;
3147  break;
3148  case AV_PIX_FMT_RGB32:
3149  case AV_PIX_FMT_BGR32:
3150 #if CONFIG_SMALL
3151  *yuv2packed1 = yuv2rgb32_1_c;
3152  *yuv2packed2 = yuv2rgb32_2_c;
3153  *yuv2packedX = yuv2rgb32_X_c;
3154 #else
3155 #if CONFIG_SWSCALE_ALPHA
3156  if (c->needAlpha) {
3157  *yuv2packed1 = yuv2rgba32_1_c;
3158  *yuv2packed2 = yuv2rgba32_2_c;
3159  *yuv2packedX = yuv2rgba32_X_c;
3160  } else
3161 #endif /* CONFIG_SWSCALE_ALPHA */
3162  {
3163  *yuv2packed1 = yuv2rgbx32_1_c;
3164  *yuv2packed2 = yuv2rgbx32_2_c;
3165  *yuv2packedX = yuv2rgbx32_X_c;
3166  }
3167 #endif /* !CONFIG_SMALL */
3168  break;
3169  case AV_PIX_FMT_RGB32_1:
3170  case AV_PIX_FMT_BGR32_1:
3171 #if CONFIG_SMALL
3172  *yuv2packed1 = yuv2rgb32_1_1_c;
3173  *yuv2packed2 = yuv2rgb32_1_2_c;
3174  *yuv2packedX = yuv2rgb32_1_X_c;
3175 #else
3176 #if CONFIG_SWSCALE_ALPHA
3177  if (c->needAlpha) {
3178  *yuv2packed1 = yuv2rgba32_1_1_c;
3179  *yuv2packed2 = yuv2rgba32_1_2_c;
3180  *yuv2packedX = yuv2rgba32_1_X_c;
3181  } else
3182 #endif /* CONFIG_SWSCALE_ALPHA */
3183  {
3184  *yuv2packed1 = yuv2rgbx32_1_1_c;
3185  *yuv2packed2 = yuv2rgbx32_1_2_c;
3186  *yuv2packedX = yuv2rgbx32_1_X_c;
3187  }
3188 #endif /* !CONFIG_SMALL */
3189  break;
3190  case AV_PIX_FMT_RGB24:
3191  *yuv2packed1 = yuv2rgb24_1_c;
3192  *yuv2packed2 = yuv2rgb24_2_c;
3193  *yuv2packedX = yuv2rgb24_X_c;
3194  break;
3195  case AV_PIX_FMT_BGR24:
3196  *yuv2packed1 = yuv2bgr24_1_c;
3197  *yuv2packed2 = yuv2bgr24_2_c;
3198  *yuv2packedX = yuv2bgr24_X_c;
3199  break;
3200  case AV_PIX_FMT_RGB565LE:
3201  case AV_PIX_FMT_RGB565BE:
3202  case AV_PIX_FMT_BGR565LE:
3203  case AV_PIX_FMT_BGR565BE:
3204  *yuv2packed1 = yuv2rgb16_1_c;
3205  *yuv2packed2 = yuv2rgb16_2_c;
3206  *yuv2packedX = yuv2rgb16_X_c;
3207  break;
3208  case AV_PIX_FMT_RGB555LE:
3209  case AV_PIX_FMT_RGB555BE:
3210  case AV_PIX_FMT_BGR555LE:
3211  case AV_PIX_FMT_BGR555BE:
3212  *yuv2packed1 = yuv2rgb15_1_c;
3213  *yuv2packed2 = yuv2rgb15_2_c;
3214  *yuv2packedX = yuv2rgb15_X_c;
3215  break;
3216  case AV_PIX_FMT_RGB444LE:
3217  case AV_PIX_FMT_RGB444BE:
3218  case AV_PIX_FMT_BGR444LE:
3219  case AV_PIX_FMT_BGR444BE:
3220  *yuv2packed1 = yuv2rgb12_1_c;
3221  *yuv2packed2 = yuv2rgb12_2_c;
3222  *yuv2packedX = yuv2rgb12_X_c;
3223  break;
3224  case AV_PIX_FMT_RGB8:
3225  case AV_PIX_FMT_BGR8:
3226  *yuv2packed1 = yuv2rgb8_1_c;
3227  *yuv2packed2 = yuv2rgb8_2_c;
3228  *yuv2packedX = yuv2rgb8_X_c;
3229  break;
3230  case AV_PIX_FMT_RGB4:
3231  case AV_PIX_FMT_BGR4:
3232  *yuv2packed1 = yuv2rgb4_1_c;
3233  *yuv2packed2 = yuv2rgb4_2_c;
3234  *yuv2packedX = yuv2rgb4_X_c;
3235  break;
3236  case AV_PIX_FMT_RGB4_BYTE:
3237  case AV_PIX_FMT_BGR4_BYTE:
3238  *yuv2packed1 = yuv2rgb4b_1_c;
3239  *yuv2packed2 = yuv2rgb4b_2_c;
3240  *yuv2packedX = yuv2rgb4b_X_c;
3241  break;
3242  case AV_PIX_FMT_X2RGB10LE:
3243  case AV_PIX_FMT_X2RGB10BE:
3244  *yuv2packed1 = yuv2x2rgb10_1_c;
3245  *yuv2packed2 = yuv2x2rgb10_2_c;
3246  *yuv2packedX = yuv2x2rgb10_X_c;
3247  break;
3248  case AV_PIX_FMT_X2BGR10LE:
3249  case AV_PIX_FMT_X2BGR10BE:
3250  *yuv2packed1 = yuv2x2bgr10_1_c;
3251  *yuv2packed2 = yuv2x2bgr10_2_c;
3252  *yuv2packedX = yuv2x2bgr10_X_c;
3253  break;
3254  }
3255  }
3256  switch (dstFormat) {
3257  case AV_PIX_FMT_MONOWHITE:
3258  *yuv2packed1 = yuv2monowhite_1_c;
3259  *yuv2packed2 = yuv2monowhite_2_c;
3260  *yuv2packedX = yuv2monowhite_X_c;
3261  break;
3262  case AV_PIX_FMT_MONOBLACK:
3263  *yuv2packed1 = yuv2monoblack_1_c;
3264  *yuv2packed2 = yuv2monoblack_2_c;
3265  *yuv2packedX = yuv2monoblack_X_c;
3266  break;
3267  case AV_PIX_FMT_YUYV422:
3268  *yuv2packed1 = yuv2yuyv422_1_c;
3269  *yuv2packed2 = yuv2yuyv422_2_c;
3270  *yuv2packedX = yuv2yuyv422_X_c;
3271  break;
3272  case AV_PIX_FMT_YVYU422:
3273  *yuv2packed1 = yuv2yvyu422_1_c;
3274  *yuv2packed2 = yuv2yvyu422_2_c;
3275  *yuv2packedX = yuv2yvyu422_X_c;
3276  break;
3277  case AV_PIX_FMT_UYVY422:
3278  *yuv2packed1 = yuv2uyvy422_1_c;
3279  *yuv2packed2 = yuv2uyvy422_2_c;
3280  *yuv2packedX = yuv2uyvy422_X_c;
3281  break;
3282  case AV_PIX_FMT_YA8:
3283  *yuv2packed1 = yuv2ya8_1_c;
3284  *yuv2packed2 = yuv2ya8_2_c;
3285  *yuv2packedX = yuv2ya8_X_c;
3286  break;
3287  case AV_PIX_FMT_YA16LE:
3288  *yuv2packed1 = yuv2ya16le_1_c;
3289  *yuv2packed2 = yuv2ya16le_2_c;
3290  *yuv2packedX = yuv2ya16le_X_c;
3291  break;
3292  case AV_PIX_FMT_YA16BE:
3293  *yuv2packed1 = yuv2ya16be_1_c;
3294  *yuv2packed2 = yuv2ya16be_2_c;
3295  *yuv2packedX = yuv2ya16be_X_c;
3296  break;
3297  case AV_PIX_FMT_AYUV64LE:
3298  *yuv2packedX = yuv2ayuv64le_X_c;
3299  break;
3300  case AV_PIX_FMT_VUYA:
3301  *yuv2packedX = yuv2vuya_X_c;
3302  break;
3303  case AV_PIX_FMT_VUYX:
3304  *yuv2packedX = yuv2vuyx_X_c;
3305  break;
3306  case AV_PIX_FMT_XV30LE:
3307  *yuv2packedX = yuv2xv30le_X_c;
3308  break;
3309  case AV_PIX_FMT_XV36LE:
3310  *yuv2packedX = yuv2xv36le_X_c;
3311  break;
3312  case AV_PIX_FMT_Y210LE:
3313  *yuv2packedX = yuv2y210le_X_c;
3314  break;
3315  case AV_PIX_FMT_Y212LE:
3316  *yuv2packedX = yuv2y212le_X_c;
3317  break;
3318  }
3319 }
yuv2packed2_fn
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Definition: swscale_internal.h:221
A
#define A(x)
Definition: vpx_arith.h:28
yuv2xv30le_X_c
static void yuv2xv30le_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2617
AV_PIX_FMT_XV30LE
@ AV_PIX_FMT_XV30LE
packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb), little-endian, variant of Y410 where alpha channe...
Definition: pixfmt.h:412
yuv2planar1_fn
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
Definition: swscale_internal.h:115
yuv2packed1_fn
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
Definition: swscale_internal.h:188
YUV2PACKEDWRAPPER
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
Definition: output.c:764
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AV_PIX_FMT_BGR48LE
@ AV_PIX_FMT_BGR48LE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:139
av_clip
#define av_clip
Definition: common.h:96
ff_dither_4x4_16
const uint8_t ff_dither_4x4_16[][8]
Definition: output.c:51
X_DITHER
#define X_DITHER(u, v)
r
const char * r
Definition: vf_curves.c:126
acc
int acc
Definition: yuv2rgb.c:554
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:133
AV_PIX_FMT_BGRA64BE
@ AV_PIX_FMT_BGRA64BE
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:197
yuv2ya16_2_c_template
static av_always_inline void yuv2ya16_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes, int is_be)
Definition: output.c:989
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:424
yuv2rgb_X_c_template
static av_always_inline void yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1712
mem_internal.h
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:443
AV_PIX_FMT_RGB444LE
@ AV_PIX_FMT_RGB444LE
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:129
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:164
AV_PIX_FMT_GBRP10BE
@ AV_PIX_FMT_GBRP10BE
planar GBR 4:4:4 30bpp, big-endian
Definition: pixfmt.h:162
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
ff_dither_8x8_32
const uint8_t ff_dither_8x8_32[][8]
Definition: output.c:59
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
yuv2rgb_full_1_c_template
static av_always_inline void yuv2rgb_full_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2164
SWS_DITHER_A_DITHER
@ SWS_DITHER_A_DITHER
Definition: swscale_internal.h:74
accumulate_bit
#define accumulate_bit(acc, val)
yuv2ya16_1_c_template
static av_always_inline void yuv2ya16_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes, int is_be)
Definition: output.c:1022
pixdesc.h
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:195
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AV_PIX_FMT_GBRAPF32LE
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:341
AV_PIX_FMT_X2BGR10BE
@ AV_PIX_FMT_X2BGR10BE
packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:384
AV_PIX_FMT_GBRPF32BE
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:338
yuv2nv12cX_16_c_template
static av_always_inline void yuv2nv12cX_16_c_template(int big_endian, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW, int output_bits)
Definition: output.c:189
b
#define b
Definition: input.c:41
yuv2planeX
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: swscale_ppc_template.c:84
R
#define R
Definition: huffyuv.h:44
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:442
yuv2vuya_X_c
static void yuv2vuya_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2727
b_r
#define b_r
yuv2p01xl1_c
static void yuv2p01xl1_c(const int16_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:473
AV_PIX_FMT_GBRP14BE
@ AV_PIX_FMT_GBRP14BE
planar GBR 4:4:4 42bpp, big-endian
Definition: pixfmt.h:274
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
mathematics.h
yuv2rgb_full_X_c_template
static av_always_inline void yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2069
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
yuv2422_2_c_template
static av_always_inline void yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:857
av_float2int
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
Definition: intfloat.h:50
yuv2plane1_8_c
static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:420
AV_PIX_FMT_GRAYF32LE
@ AV_PIX_FMT_GRAYF32LE
IEEE-754 single precision Y, 32bpp, little-endian.
Definition: pixfmt.h:361
yuv2planeX_10_c_template
static av_always_inline void yuv2planeX_10_c_template(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:340
AV_PIX_FMT_GBRAP14BE
@ AV_PIX_FMT_GBRAP14BE
planar GBR 4:4:4:4 56bpp, big-endian
Definition: pixfmt.h:429
AV_PIX_FMT_RGB555BE
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
AV_PIX_FMT_AYUV64LE
@ AV_PIX_FMT_AYUV64LE
packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
Definition: pixfmt.h:299
SH
#define SH(val, pdst)
Definition: generic_macros_msa.h:154
AV_PIX_FMT_GBRAP12LE
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:308
is16BPS
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:703
rgb
Definition: rpzaenc.c:60
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
yuv2anyX_fn
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
Definition: swscale_internal.h:287
yuv2422_X_c_template
static av_always_inline void yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:818
yuv2mono_1_c_template
static av_always_inline void yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:714
yuv2plane1_16_c_template
static av_always_inline void yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:149
yuv2422_1_c_template
static av_always_inline void yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:890
yuv2gbrp_full_X_c
static void yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Definition: output.c:2245
val
static double val(void *priv, double ch)
Definition: aeval.c:78
isNBPS
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:717
R_B
#define R_B
Definition: output.c:940
yuv2plane1_float
#define yuv2plane1_float(template, dest_type, BE_LE)
Definition: output.c:290
AV_PIX_FMT_VUYA
@ AV_PIX_FMT_VUYA
packed VUYA 4:4:4, 32bpp, VUYAVUYA...
Definition: pixfmt.h:398
yuv2planeX_16_c_template
static av_always_inline void yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:163
r_b
#define r_b
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:83
avassert.h
yuv2vuyx_X_c
static void yuv2vuyx_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2738
YUV2PACKED16WRAPPER
#define YUV2PACKED16WRAPPER(name, base, ext, base_fmt, endianness, hasAlpha, eightbytes)
Definition: output.c:1546
av_cold
#define av_cold
Definition: attributes.h:90
yuv2mono_2_c_template
static av_always_inline void yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:653
yuv2planeX_float
#define yuv2planeX_float(template, dest_type, BE_LE)
Definition: output.c:297
YUVRGB_TABLE_HEADROOM
#define YUVRGB_TABLE_HEADROOM
Definition: swscale_internal.h:42
SWS_DITHER_ED
@ SWS_DITHER_ED
Definition: swscale_internal.h:73
float
float
Definition: af_crystalizer.c:121
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:206
yuv2rgb_full_2_c_template
static av_always_inline void yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2119
intreadwrite.h
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:165
output_pixels
#define output_pixels(pos, Y1, U, Y2, V)
Definition: output.c:799
g
const char * g
Definition: vf_curves.c:127
AV_PIX_FMT_GBRP12LE
@ AV_PIX_FMT_GBRP12LE
planar GBR 4:4:4 36bpp, little-endian
Definition: pixfmt.h:273
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
yuv2plane1_float_bswap_c_template
static av_always_inline void yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
Definition: output.c:235
ff_dither_2x2_4
const uint8_t ff_dither_2x2_4[][8]
Definition: output.c:39
ff_dither_8x8_220
const uint8_t ff_dither_8x8_220[][8]
Definition: output.c:84
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
yuv2ya8_X_c
static void yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2532
AV_PIX_FMT_RGB4
@ AV_PIX_FMT_RGB4
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:87
AV_PIX_FMT_GBRP10LE
@ AV_PIX_FMT_GBRP10LE
planar GBR 4:4:4 30bpp, little-endian
Definition: pixfmt.h:163
yuv2p01xlX_c
static void yuv2p01xlX_c(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:487
yuv2rgba64_2_c_template
static av_always_inline void yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1132
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:444
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
yuv2rgba64_full_1_c_template
static av_always_inline void yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1420
if
if(ret)
Definition: filter_design.txt:179
isSemiPlanarYUV
static av_always_inline int isSemiPlanarYUV(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:749
yuv2nv12cX_16BE_c
static void yuv2nv12cX_16BE_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
Definition: output.c:397
yuv2NBPS
#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t)
Definition: output.c:360
SWS_DITHER_NONE
@ SWS_DITHER_NONE
Definition: swscale_internal.h:70
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:458
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
AV_PIX_FMT_GBRAPF32BE
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:340
AV_PIX_FMT_GBRAP12BE
@ AV_PIX_FMT_GBRAP12BE
planar GBR 4:4:4:4 48bpp, big-endian
Definition: pixfmt.h:307
av_clip_int16
#define av_clip_int16
Definition: common.h:111
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:459
NULL
#define NULL
Definition: coverity.c:32
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:203
yuv2gbrp16_full_X_c
static void yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
Definition: output.c:2327
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
V
#define V
Definition: avdct.c:30
AV_PIX_FMT_BGR565LE
@ AV_PIX_FMT_BGR565LE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
Definition: pixfmt.h:111
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:196
AV_PIX_FMT_Y210LE
@ AV_PIX_FMT_Y210LE
packed YUV 4:2:2 like YUYV422, 20bpp, data in the high bits, little-endian
Definition: pixfmt.h:379
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
yuv2ya8_2_c
static void yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Definition: output.c:2501
AV_PIX_FMT_BGR4
@ AV_PIX_FMT_BGR4
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:84
AV_PIX_FMT_BGR555BE
@ AV_PIX_FMT_BGR555BE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:112
yuv2plane1_float_c_template
static av_always_inline void yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
Definition: output.c:219
A2
#define A2
Definition: binkdsp.c:32
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_PIX_FMT_BGR4_BYTE
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:85
yuv2ya16_X_c_template
static av_always_inline void yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **unused_chrUSrc, const int32_t **unused_chrVSrc, int unused_chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes, int is_be)
Definition: output.c:950
A_DITHER
#define A_DITHER(u, v)
AV_PIX_FMT_X2RGB10LE
@ AV_PIX_FMT_X2RGB10LE
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:381
isDataInHighBits
static av_always_inline int isDataInHighBits(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:912
d64
const uint8_t * d64
Definition: yuv2rgb.c:502
yuv2y2xx_wrapper
#define yuv2y2xx_wrapper(bits)
Definition: output.c:2751
AV_PIX_FMT_X2BGR10
#define AV_PIX_FMT_X2BGR10
Definition: pixfmt.h:527
isBE
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:724
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:87
shift
static int shift(int a, int b)
Definition: bonk.c:262
av_bswap32
#define av_bswap32
Definition: bswap.h:28
yuv2rgba64_full_X_c_template
static av_always_inline void yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1302
AV_PIX_FMT_RGB444BE
@ AV_PIX_FMT_RGB444BE
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:130
SWS_FULL_CHR_H_INT
#define SWS_FULL_CHR_H_INT
Definition: swscale.h:86
yuv2planeX_float_bswap_c_template
static av_always_inline void yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint32_t *dest, int dstW)
Definition: output.c:271
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:202
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:454
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:461
AV_PIX_FMT_GBRP9BE
@ AV_PIX_FMT_GBRP9BE
planar GBR 4:4:4 27bpp, big-endian
Definition: pixfmt.h:160
yuv2rgba64_full_2_c_template
static av_always_inline void yuv2rgba64_full_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1367
AV_PIX_FMT_BGR444BE
@ AV_PIX_FMT_BGR444BE
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:132
output_pixel
#define output_pixel(pos, val, bias, signedness)
Definition: output.c:2748
AV_PIX_FMT_GBRP9LE
@ AV_PIX_FMT_GBRP9LE
planar GBR 4:4:4 27bpp, little-endian
Definition: pixfmt.h:161
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:410
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:441
AV_PIX_FMT_GBRAP10LE
@ AV_PIX_FMT_GBRAP10LE
planar GBR 4:4:4:4 40bpp, little-endian
Definition: pixfmt.h:311
isSwappedChroma
static av_always_inline int isSwappedChroma(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:931
AV_PIX_FMT_BGR565BE
@ AV_PIX_FMT_BGR565BE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
Definition: pixfmt.h:110
yuv2nv12cX_c
static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int chrDstW)
Definition: output.c:430
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
yuv2rgb_write_full
static av_always_inline void yuv2rgb_write_full(SwsContext *c, uint8_t *dest, int i, int Y, int A, int U, int V, int y, enum AVPixelFormat target, int hasAlpha, int err[4])
Definition: output.c:1918
ff_dither_8x8_73
const uint8_t ff_dither_8x8_73[][8]
Definition: output.c:71
Y
#define Y
Definition: boxblur.h:37
yuv2rgb_write
static av_always_inline void yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2, unsigned A1, unsigned A2, const void *_r, const void *_g, const void *_b, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1586
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
AV_PIX_FMT_BGRA64LE
@ AV_PIX_FMT_BGRA64LE
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:198
yuv2planeX_8_c
static void yuv2planeX_8_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:405
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:463
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
AV_PIX_FMT_YA16
#define AV_PIX_FMT_YA16
Definition: pixfmt.h:453
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AV_PIX_FMT_BGR444
#define AV_PIX_FMT_BGR444
Definition: pixfmt.h:462
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:456
yuv2planeX_float_c_template
static av_always_inline void yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src, float *dest, int dstW)
Definition: output.c:251
av_always_inline
#define av_always_inline
Definition: attributes.h:49
swscale_internal.h
yuv2interleavedX_fn
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
Definition: swscale_internal.h:151
yuv2gbrpf32_full_X_c
static void yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
Definition: output.c:2399
AV_PIX_FMT_X2RGB10
#define AV_PIX_FMT_X2RGB10
Definition: pixfmt.h:526
AV_PIX_FMT_X2RGB10BE
@ AV_PIX_FMT_X2RGB10BE
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:382
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:460
yuv2vuyX_X_c
static void yuv2vuyX_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, int destHasAlpha)
Definition: output.c:2671
AV_PIX_FMT_RGB4_BYTE
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:88
AV_PIX_FMT_GBRPF32LE
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:339
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:455
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:207
AV_PIX_FMT_YVYU422
@ AV_PIX_FMT_YVYU422
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
Definition: pixfmt.h:200
bswap.h
AV_PIX_FMT_Y212LE
@ AV_PIX_FMT_Y212LE
packed YUV 4:2:2 like YUYV422, 24bpp, data in the high bits, zeros in the low bits,...
Definition: pixfmt.h:409
AV_PIX_FMT_GRAYF32BE
@ AV_PIX_FMT_GRAYF32BE
IEEE-754 single precision Y, 32bpp, big-endian.
Definition: pixfmt.h:360
yuv2ya8_1_c
static void yuv2ya8_1_c(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y)
Definition: output.c:2475
YUV2RGBWRAPPER
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
Definition: output.c:1885
d128
const uint8_t * d128
Definition: yuv2rgb.c:553
AV_PIX_FMT_GBRP12BE
@ AV_PIX_FMT_GBRP12BE
planar GBR 4:4:4 36bpp, big-endian
Definition: pixfmt.h:272
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
U
#define U(x)
Definition: vpx_arith.h:37
yuv2planarX_fn
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
Definition: swscale_internal.h:131
ff_sws_init_output_funcs
void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
yuv2p01x_wrapper
#define yuv2p01x_wrapper(bits)
Definition: output.c:531
yuv2packedX_fn
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
Definition: swscale_internal.h:253
yuv2p01xcX_c
static void yuv2p01xcX_c(int big_endian, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW, int output_bits)
Definition: output.c:505
yuv2rgba64_1_c_template
static av_always_inline void yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1198
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
G
#define G
Definition: huffyuv.h:43
AV_PIX_FMT_RGB565BE
@ AV_PIX_FMT_RGB565BE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:105
av_clip_uint16
#define av_clip_uint16
Definition: common.h:108
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:83
d32
const uint8_t * d32
Definition: yuv2rgb.c:501
avutil.h
AV_PIX_FMT_X2BGR10LE
@ AV_PIX_FMT_X2BGR10LE
packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:383
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AV_PIX_FMT_BGR555LE
@ AV_PIX_FMT_BGR555LE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:113
yuv2rgb_1_c_template
static av_always_inline void yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1806
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
SWS_DITHER_AUTO
@ SWS_DITHER_AUTO
Definition: swscale_internal.h:71
AV_PIX_FMT_XV36LE
@ AV_PIX_FMT_XV36LE
packed XVYU 4:4:4, 48bpp, data in the high bits, zeros in the low bits, little-endian,...
Definition: pixfmt.h:415
B_R
#define B_R
Definition: output.c:941
AV_PIX_FMT_GBRP14LE
@ AV_PIX_FMT_GBRP14LE
planar GBR 4:4:4 42bpp, little-endian
Definition: pixfmt.h:275
int32_t
int32_t
Definition: audioconvert.c:56
yuv2rgb_2_c_template
static av_always_inline void yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1766
A1
#define A1
Definition: binkdsp.c:31
AV_PIX_FMT_GBRAP10BE
@ AV_PIX_FMT_GBRAP10BE
planar GBR 4:4:4:4 40bpp, big-endian
Definition: pixfmt.h:310
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:403
av_bswap16
#define av_bswap16
Definition: bswap.h:27
SWS_DITHER_X_DITHER
@ SWS_DITHER_X_DITHER
Definition: swscale_internal.h:75
SwsContext
Definition: swscale_internal.h:299
AV_PIX_FMT_BGR444LE
@ AV_PIX_FMT_BGR444LE
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:131
yuv2rgb
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:262
yuv2xv36le_X_c
static void yuv2xv36le_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2645
rgb2rgb.h
AV_PIX_FMT_GBRAP14LE
@ AV_PIX_FMT_GBRAP14LE
planar GBR 4:4:4:4 56bpp, little-endian
Definition: pixfmt.h:430
swscale.h
yuv2ayuv64le_X_c
static void yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **_lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **_chrUSrc, const int16_t **_chrVSrc, int chrFilterSize, const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2568
yuv2rgba64_X_c_template
static av_always_inline void yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes, int is_be)
Definition: output.c:1049
ff_dither_2x2_8
const uint8_t ff_dither_2x2_8[][8]
Definition: output.c:45
AV_PIX_FMT_BGR48BE
@ AV_PIX_FMT_BGR48BE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:138
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:457
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:60