Go to the documentation of this file.
24 #include "config_components.h"
135 0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
140 0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
145 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
152 int bits_per_pixel,
int pass,
153 int color_type,
const uint8_t *
src)
155 int x,
mask, dsp_mask, j, src_x,
b, bpp;
162 switch (bits_per_pixel) {
165 for (x = 0; x <
width; x++) {
167 if ((dsp_mask << j) & 0x80) {
168 b = (
src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
169 dst[x >> 3] &= 0xFF7F>>j;
170 dst[x >> 3] |=
b << (7 - j);
172 if ((
mask << j) & 0x80)
178 for (x = 0; x <
width; x++) {
179 int j2 = 2 * (x & 3);
181 if ((dsp_mask << j) & 0x80) {
182 b = (
src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
183 dst[x >> 2] &= 0xFF3F>>j2;
184 dst[x >> 2] |=
b << (6 - j2);
186 if ((
mask << j) & 0x80)
192 for (x = 0; x <
width; x++) {
195 if ((dsp_mask << j) & 0x80) {
196 b = (
src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
197 dst[x >> 1] &= 0xFF0F>>j2;
198 dst[x >> 1] |=
b << (4 - j2);
200 if ((
mask << j) & 0x80)
205 bpp = bits_per_pixel >> 3;
208 for (x = 0; x <
width; x++) {
210 if ((dsp_mask << j) & 0x80) {
214 if ((
mask << j) & 0x80)
225 for (
i = 0;
i <
w;
i++) {
226 int a,
b,
c,
p, pa, pb, pc;
239 if (pa <= pb && pa <= pc)
249 #define UNROLL1(bpp, op) \
258 for (; i <= size - bpp; i += bpp) { \
259 dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
262 dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
265 dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
268 dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
272 #define UNROLL_FILTER(op) \
275 } else if (bpp == 2) { \
277 } else if (bpp == 3) { \
279 } else if (bpp == 4) { \
282 for (; i < size; i++) { \
283 dst[i] = op(dst[i - bpp], src[i], last[i]); \
288 uint8_t *
src, uint8_t *last,
int size,
int bpp)
292 switch (filter_type) {
297 for (
i = 0;
i < bpp;
i++)
301 for (;
i <
size;
i += bpp) {
302 unsigned s = *(
int *)(
src +
i);
303 p = ((
s & 0x7f7f7f7f) + (
p & 0x7f7f7f7f)) ^ ((
s ^
p) & 0x80808080);
304 *(
int *)(
dst +
i) =
p;
307 #define OP_SUB(x, s, l) ((x) + (s))
315 for (
i = 0;
i < bpp;
i++) {
319 #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
323 for (
i = 0;
i < bpp;
i++) {
327 if (bpp > 2 &&
size > 4) {
344 #define YUV2RGB(NAME, TYPE) \
345 static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
348 for (i = 0; i < size - 2; i += 3 + alpha) { \
349 int g = dst [i + 1]; \
360 if (
s->interlace_type) {
363 return 100 - 100 *
s->y /
s->cur_h;
370 uint8_t *ptr, *last_row;
373 if (!
s->interlace_type) {
374 ptr =
dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
376 last_row =
s->last_row;
378 last_row = ptr - dst_stride;
381 last_row,
s->row_size,
s->bpp);
384 if (
s->bit_depth == 16) {
385 deloco_rgb16((uint16_t *)(ptr - dst_stride),
s->row_size / 2,
388 deloco_rgb8(ptr - dst_stride,
s->row_size,
393 if (
s->y ==
s->cur_h) {
396 if (
s->bit_depth == 16) {
397 deloco_rgb16((uint16_t *)ptr,
s->row_size / 2,
400 deloco_rgb8(ptr,
s->row_size,
408 ptr =
dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
415 s->last_row,
s->pass_row_size,
s->bpp);
416 FFSWAP(uint8_t *,
s->last_row,
s->tmp_row);
417 FFSWAP(
unsigned int,
s->last_row_size,
s->tmp_row_size);
422 s->color_type,
s->last_row);
425 if (
s->y ==
s->cur_h) {
426 memset(
s->last_row, 0,
s->row_size);
437 s->crow_size =
s->pass_row_size + 1;
438 if (
s->pass_row_size != 0)
450 uint8_t *
dst, ptrdiff_t dst_stride)
452 z_stream *
const zstream = &
s->zstream.zstream;
455 zstream->next_in = gb->
buffer;
458 while (zstream->avail_in > 0) {
460 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
464 if (zstream->avail_out == 0) {
468 zstream->avail_out =
s->crow_size;
469 zstream->next_out =
s->crow_buf;
471 if (
ret == Z_STREAM_END && zstream->avail_in > 0) {
473 "%d undecompressed bytes left in buffer\n", zstream->avail_in);
481 const uint8_t *data_end,
void *logctx)
484 z_stream *
const zstream = &z.
zstream;
491 zstream->next_in =
data;
492 zstream->avail_in = data_end -
data;
495 while (zstream->avail_in > 0) {
501 zstream->next_out = buf;
502 zstream->avail_out = buf_size - 1;
504 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
508 bp->len += zstream->next_out - buf;
509 if (
ret == Z_STREAM_END)
513 bp->str[bp->len] = 0;
527 for (
i = 0;
i < size_in;
i++)
528 extra += !!(in[
i] & 0x80);
529 if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
534 for (
i = 0;
i < size_in;
i++) {
536 *(q++) = 0xC0 | (in[
i] >> 6);
537 *(q++) = 0x80 | (in[
i] & 0x3F);
548 size_t len = strlen(txt_utf8);
549 const char *ptr = txt_utf8;
550 const char *end = txt_utf8 +
len;
553 const uint8_t *exif_end;
556 while (*ptr++ !=
'\n') {
562 if (end - ptr < 4 || strncmp(
"exif", ptr, 4))
574 size_t nlen = exif_len * 10 + (*ptr -
'0');
589 if ((exif_len & ~SIZE_MAX) || end - ptr < 2 * exif_len)
604 ptr += strlen(
"Exif ") * 2 - 1;
606 exif_ptr =
s->exif_data->data;
607 exif_end = exif_ptr +
s->exif_data->size;
609 while (exif_ptr < exif_end) {
610 while (++ptr < end) {
611 if (*ptr >=
'0' && *ptr <=
'9') {
612 *exif_ptr = (*ptr -
'0') << 4;
615 if (*ptr >=
'a' && *ptr <=
'f') {
616 *exif_ptr = (*ptr -
'a' + 10) << 4;
620 while (++ptr < end) {
621 if (*ptr >=
'0' && *ptr <=
'9') {
622 *exif_ptr += *ptr -
'0';
625 if (*ptr >=
'a' && *ptr <=
'f') {
626 *exif_ptr += *ptr -
'a' + 10;
643 const char *keyword =
data;
644 const char *keyword_end = memchr(keyword, 0, data_end -
data);
645 char *kw_utf8 =
NULL, *txt_utf8 =
NULL;
652 data = keyword_end + 1;
655 if (
data == data_end)
666 text_len = data_end -
data;
680 if (!strcmp(kw_utf8,
"Raw profile type exif")) {
712 s->width =
s->cur_w = bytestream2_get_be32(gb);
713 s->height =
s->cur_h = bytestream2_get_be32(gb);
715 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
719 s->bit_depth = bytestream2_get_byte(gb);
720 if (
s->bit_depth != 1 &&
s->bit_depth != 2 &&
s->bit_depth != 4 &&
721 s->bit_depth != 8 &&
s->bit_depth != 16) {
725 s->color_type = bytestream2_get_byte(gb);
726 s->compression_type = bytestream2_get_byte(gb);
727 if (
s->compression_type) {
731 s->filter_type = bytestream2_get_byte(gb);
732 s->interlace_type = bytestream2_get_byte(gb);
736 "compression_type=%d filter_type=%d interlace_type=%d\n",
737 s->width,
s->height,
s->bit_depth,
s->color_type,
738 s->compression_type,
s->filter_type,
s->interlace_type);
742 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
799 if (
s->cicp_range == 0) {
802 }
else if (
s->cicp_range != 1) {
806 }
else if (
s->iccp_data) {
809 s->iccp_data_len, &sd);
813 memcpy(sd->
data,
s->iccp_data,
s->iccp_data_len);
816 }
else if (
s->have_srgb) {
819 }
else if (
s->have_chrm) {
838 if (
s->iccp_data ||
s->have_srgb ||
s->have_cicp) {
840 }
else if (
s->gamma) {
850 if (
s->gamma > 45355 &&
s->gamma < 45555)
852 else if (
s->gamma > 35614 &&
s->gamma < 35814)
854 else if (
s->gamma > 38362 &&
s->gamma < 38562)
856 else if (
s->gamma > 99900 &&
s->gamma < 100100)
862 if (!
s->have_cicp ||
s->cicp_range == 1)
870 if (!
s->has_trns &&
s->significant_bits > 0)
885 clli->
MaxCLL =
s->clli_max / 10000;
886 clli->
MaxFALL =
s->clli_avg / 10000;
899 for (
int i = 0;
i < 3;
i++) {
918 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
933 s->bits_per_pixel =
s->bit_depth *
s->channels;
934 s->bpp = (
s->bits_per_pixel + 7) >> 3;
935 s->row_size = (
s->cur_w *
s->bits_per_pixel + 7) >> 3;
937 if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
940 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
943 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
946 }
else if (
s->bit_depth == 16 &&
949 }
else if (
s->bit_depth == 16 &&
952 }
else if (
s->bit_depth == 16 &&
955 }
else if ((
s->bits_per_pixel == 1 ||
s->bits_per_pixel == 2 ||
s->bits_per_pixel == 4 ||
s->bits_per_pixel == 8) &&
960 }
else if (
s->bit_depth == 8 &&
963 }
else if (
s->bit_depth == 16 &&
968 "Bit depth %d color type %d",
969 s->bit_depth,
s->color_type);
993 "and color type %d with TRNS",
994 s->bit_depth,
s->color_type);
998 s->bpp += byte_depth;
1042 if (!
s->interlace_type) {
1043 s->crow_size =
s->row_size + 1;
1049 s->crow_size =
s->pass_row_size + 1;
1051 ff_dlog(avctx,
"row_size=%d crow_size =%d\n",
1052 s->row_size,
s->crow_size);
1056 memcpy(
p->data[1],
s->palette, 256 *
sizeof(uint32_t));
1061 if (
s->interlace_type ||
1073 s->crow_buf =
s->buffer + 15;
1074 s->zstream.zstream.avail_out =
s->crow_size;
1075 s->zstream.zstream.next_out =
s->crow_buf;
1082 s->bpp -= byte_depth;
1087 s->bpp += byte_depth;
1101 if ((length % 3) != 0 || length > 256 * 3)
1105 for (
i = 0;
i < n;
i++) {
1106 r = bytestream2_get_byte(gb);
1107 g = bytestream2_get_byte(gb);
1108 b = bytestream2_get_byte(gb);
1109 s->palette[
i] = (0xFF
U << 24) | (
r << 16) | (
g << 8) |
b;
1111 for (;
i < 256;
i++)
1112 s->palette[
i] = (0xFFU << 24);
1135 if (length > 256 || !(
s->hdr_state &
PNG_PLTE))
1138 for (
i = 0;
i < length;
i++) {
1139 unsigned v = bytestream2_get_byte(gb);
1140 s->palette[
i] = (
s->palette[
i] & 0x00ffffff) | (v << 24);
1148 for (
i = 0;
i < length / 2;
i++) {
1152 if (
s->bit_depth > 8)
1153 AV_WB16(&
s->transparent_color_be[2 *
i], v);
1155 s->transparent_color_be[
i] = v;
1171 while ((
s->iccp_name[cnt++] = bytestream2_get_byte(gb)) && cnt < 81);
1178 if (bytestream2_get_byte(gb) != 0) {
1191 s->iccp_data_len = bp.len;
1195 s->iccp_name[0] = 0;
1226 int b = bytestream2_get_byteu(gb);
1234 s->significant_bits =
bits;
1243 uint8_t *pd =
p->data[0];
1244 for (j = 0; j <
s->height; j++) {
1246 for (k = 7; k >= 1; k--)
1247 if ((
s->width&7) >= k)
1248 pd[8*
i + k - 1] = (pd[
i]>>8-k) & 1;
1249 for (
i--;
i >= 0;
i--) {
1250 pd[8*
i + 7]= pd[
i] & 1;
1251 pd[8*
i + 6]= (pd[
i]>>1) & 1;
1252 pd[8*
i + 5]= (pd[
i]>>2) & 1;
1253 pd[8*
i + 4]= (pd[
i]>>3) & 1;
1254 pd[8*
i + 3]= (pd[
i]>>4) & 1;
1255 pd[8*
i + 2]= (pd[
i]>>5) & 1;
1256 pd[8*
i + 1]= (pd[
i]>>6) & 1;
1257 pd[8*
i + 0]= pd[
i]>>7;
1259 pd +=
p->linesize[0];
1261 }
else if (
s->bits_per_pixel == 2) {
1263 uint8_t *pd =
p->data[0];
1264 for (j = 0; j <
s->height; j++) {
1267 if ((
s->width&3) >= 3) pd[4*
i + 2]= (pd[
i] >> 2) & 3;
1268 if ((
s->width&3) >= 2) pd[4*
i + 1]= (pd[
i] >> 4) & 3;
1269 if ((
s->width&3) >= 1) pd[4*
i + 0]= pd[
i] >> 6;
1270 for (
i--;
i >= 0;
i--) {
1271 pd[4*
i + 3]= pd[
i] & 3;
1272 pd[4*
i + 2]= (pd[
i]>>2) & 3;
1273 pd[4*
i + 1]= (pd[
i]>>4) & 3;
1274 pd[4*
i + 0]= pd[
i]>>6;
1277 if ((
s->width&3) >= 3) pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
1278 if ((
s->width&3) >= 2) pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
1279 if ((
s->width&3) >= 1) pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
1280 for (
i--;
i >= 0;
i--) {
1281 pd[4*
i + 3]= ( pd[
i] & 3)*0x55;
1282 pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
1283 pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
1284 pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
1287 pd +=
p->linesize[0];
1289 }
else if (
s->bits_per_pixel == 4) {
1291 uint8_t *pd =
p->data[0];
1292 for (j = 0; j <
s->height; j++) {
1295 if (
s->width&1) pd[2*
i+0]= pd[
i]>>4;
1296 for (
i--;
i >= 0;
i--) {
1297 pd[2*
i + 1] = pd[
i] & 15;
1298 pd[2*
i + 0] = pd[
i] >> 4;
1301 if (
s->width & 1) pd[2*
i + 0]= (pd[
i] >> 4) * 0x11;
1302 for (
i--;
i >= 0;
i--) {
1303 pd[2*
i + 1] = (pd[
i] & 15) * 0x11;
1304 pd[2*
i + 0] = (pd[
i] >> 4) * 0x11;
1307 pd +=
p->linesize[0];
1315 uint32_t sequence_number;
1316 int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
1331 sequence_number = bytestream2_get_be32(gb);
1332 cur_w = bytestream2_get_be32(gb);
1333 cur_h = bytestream2_get_be32(gb);
1334 x_offset = bytestream2_get_be32(gb);
1335 y_offset = bytestream2_get_be32(gb);
1337 dispose_op = bytestream2_get_byte(gb);
1338 blend_op = bytestream2_get_byte(gb);
1340 if (sequence_number == 0 &&
1341 (cur_w !=
s->width ||
1342 cur_h !=
s->height ||
1345 cur_w <= 0 || cur_h <= 0 ||
1346 x_offset < 0 || y_offset < 0 ||
1347 cur_w >
s->width - x_offset|| cur_h >
s->height - y_offset)
1355 if ((sequence_number == 0 || !
s->last_picture.f) &&
1375 s->x_offset = x_offset;
1376 s->y_offset = y_offset;
1377 s->dispose_op = dispose_op;
1378 s->blend_op = blend_op;
1386 uint8_t *pd =
p->data[0];
1387 uint8_t *pd_last =
s->last_picture.f->data[0];
1390 ls =
FFMIN(ls,
s->width *
s->bpp);
1393 for (j = 0; j <
s->height; j++) {
1394 for (
i = 0;
i < ls;
i++)
1395 pd[
i] += pd_last[
i];
1396 pd +=
p->linesize[0];
1397 pd_last +=
s->last_picture.f->linesize[0];
1403 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
1408 uint8_t *
dst =
p->data[0];
1409 ptrdiff_t dst_stride =
p->linesize[0];
1410 const uint8_t *
src =
s->last_picture.f->data[0];
1411 ptrdiff_t src_stride =
s->last_picture.f->linesize[0];
1427 for (y = 0; y <
s->y_offset; y++)
1428 memcpy(
dst + y * dst_stride,
src + y * src_stride,
p->width * bpp);
1429 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; y++) {
1430 memcpy(
dst + y * dst_stride,
src + y * src_stride,
s->x_offset * bpp);
1431 memcpy(
dst + y * dst_stride + (
s->x_offset +
s->cur_w) * bpp,
1432 src + y * src_stride + (
s->x_offset +
s->cur_w) * bpp,
1433 (
p->width -
s->cur_w -
s->x_offset) * bpp);
1435 for (y =
s->y_offset +
s->cur_h; y < p->
height; y++)
1436 memcpy(
dst + y * dst_stride,
src + y * src_stride,
p->width * bpp);
1440 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; ++y) {
1441 uint8_t *foreground =
dst + dst_stride * y + bpp *
s->x_offset;
1442 const uint8_t *background =
src + src_stride * y + bpp *
s->x_offset;
1443 for (x =
s->x_offset; x < s->x_offset +
s->cur_w; ++x, foreground += bpp, background += bpp) {
1445 uint8_t foreground_alpha, background_alpha, output_alpha;
1454 foreground_alpha = foreground[3];
1455 background_alpha = background[3];
1459 foreground_alpha = foreground[1];
1460 background_alpha = background[1];
1464 if (foreground_alpha == 255)
1467 if (foreground_alpha == 0) {
1468 memcpy(foreground, background, bpp);
1472 output_alpha = foreground_alpha +
FAST_DIV255((255 - foreground_alpha) * background_alpha);
1476 for (
b = 0;
b < bpp - 1; ++
b) {
1477 if (output_alpha == 0) {
1479 }
else if (background_alpha == 255) {
1480 output[
b] =
FAST_DIV255(foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background[
b]);
1482 output[
b] = (255 * foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background_alpha * background[
b]) / (255 * output_alpha);
1486 memcpy(foreground,
output, bpp);
1499 const ptrdiff_t dst_stride =
s->picture.f->linesize[0];
1500 uint8_t *
dst =
s->picture.f->data[0] +
s->y_offset * dst_stride + bpp *
s->x_offset;
1504 for (
size_t y = 0; y <
s->cur_h; y++) {
1505 memset(
dst, 0, bpp *
s->cur_w);
1514 uint32_t
tag, length;
1515 int decode_next_dat = 0;
1543 length = bytestream2_get_be32(&
s->gb);
1550 uint32_t crc_sig =
AV_RB32(
s->gb.buffer + length + 4);
1551 uint32_t crc_cal = ~
av_crc(crc_tab, UINT32_MAX,
s->gb.buffer, length + 4);
1552 if (crc_sig ^ crc_cal) {
1564 tag = bytestream2_get_le32(&
s->gb);
1575 case MKTAG(
'I',
'H',
'D',
'R'):
1576 case MKTAG(
'p',
'H',
'Y',
's'):
1577 case MKTAG(
't',
'E',
'X',
't'):
1578 case MKTAG(
'I',
'D',
'A',
'T'):
1579 case MKTAG(
't',
'R',
'N',
'S'):
1580 case MKTAG(
's',
'R',
'G',
'B'):
1581 case MKTAG(
'c',
'I',
'C',
'P'):
1582 case MKTAG(
'c',
'H',
'R',
'M'):
1583 case MKTAG(
'g',
'A',
'M',
'A'):
1591 case MKTAG(
'I',
'H',
'D',
'R'):
1595 case MKTAG(
'p',
'H',
'Y',
's'):
1599 case MKTAG(
'f',
'c',
'T',
'L'):
1604 decode_next_dat = 1;
1606 case MKTAG(
'f',
'd',
'A',
'T'):
1613 bytestream2_get_be32(&gb_chunk);
1615 case MKTAG(
'I',
'D',
'A',
'T'):
1621 case MKTAG(
'P',
'L',
'T',
'E'):
1624 case MKTAG(
't',
'R',
'N',
'S'):
1627 case MKTAG(
't',
'E',
'X',
't'):
1631 case MKTAG(
'z',
'T',
'X',
't'):
1635 case MKTAG(
's',
'T',
'E',
'R'): {
1636 int mode = bytestream2_get_byte(&gb_chunk);
1639 s->stereo_mode =
mode;
1642 "Unknown value in sTER chunk (%d)\n",
mode);
1646 case MKTAG(
'c',
'I',
'C',
'P'):
1647 s->cicp_primaries = bytestream2_get_byte(&gb_chunk);
1648 s->cicp_trc = bytestream2_get_byte(&gb_chunk);
1649 if (bytestream2_get_byte(&gb_chunk) != 0)
1651 s->cicp_range = bytestream2_get_byte(&gb_chunk);
1652 if (
s->cicp_range != 0 &&
s->cicp_range != 1)
1656 case MKTAG(
's',
'R',
'G',
'B'):
1661 case MKTAG(
'i',
'C',
'C',
'P'): {
1666 case MKTAG(
'c',
'H',
'R',
'M'): {
1669 s->white_point[0] = bytestream2_get_be32(&gb_chunk);
1670 s->white_point[1] = bytestream2_get_be32(&gb_chunk);
1673 for (
i = 0;
i < 3;
i++) {
1674 s->display_primaries[
i][0] = bytestream2_get_be32(&gb_chunk);
1675 s->display_primaries[
i][1] = bytestream2_get_be32(&gb_chunk);
1680 case MKTAG(
's',
'B',
'I',
'T'):
1684 case MKTAG(
'g',
'A',
'M',
'A'): {
1687 s->gamma = bytestream2_get_be32(&gb_chunk);
1699 case MKTAG(
'c',
'L',
'L',
'i'):
1700 case MKTAG(
'c',
'L',
'L',
'I'):
1706 s->clli_max = bytestream2_get_be32u(&gb_chunk);
1707 s->clli_avg = bytestream2_get_be32u(&gb_chunk);
1709 case MKTAG(
'm',
'D',
'C',
'v'):
1710 case MKTAG(
'm',
'D',
'C',
'V'):
1716 for (
int i = 0;
i < 3;
i++) {
1717 s->mdcv_primaries[
i][0] = bytestream2_get_be16u(&gb_chunk);
1718 s->mdcv_primaries[
i][1] = bytestream2_get_be16u(&gb_chunk);
1720 s->mdcv_white_point[0] = bytestream2_get_be16u(&gb_chunk);
1721 s->mdcv_white_point[1] = bytestream2_get_be16u(&gb_chunk);
1722 s->mdcv_max_lum = bytestream2_get_be32u(&gb_chunk);
1723 s->mdcv_min_lum = bytestream2_get_be32u(&gb_chunk);
1725 case MKTAG(
'e',
'X',
'I',
'f'):
1730 case MKTAG(
'I',
'E',
'N',
'D'):
1755 if (
s->bits_per_pixel <= 4)
1770 for (
int y = 0; y <
s->height; y++) {
1771 uint8_t *row = &
p->data[0][
p->linesize[0] * y];
1773 for (
int x =
s->width - 1; x >= 0; x--) {
1774 const uint8_t idx = row[x];
1776 row[4*x+2] =
s->palette[idx] & 0xFF;
1777 row[4*x+1] = (
s->palette[idx] >> 8 ) & 0xFF;
1778 row[4*x+0] = (
s->palette[idx] >> 16) & 0xFF;
1779 row[4*x+3] =
s->palette[idx] >> 24;
1786 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
1787 size_t raw_bpp =
s->bpp - byte_depth;
1792 for (y = 0; y <
s->height; ++y) {
1793 uint8_t *row = &
p->data[0][
p->linesize[0] * y];
1795 if (
s->bpp == 2 && byte_depth == 1) {
1796 uint8_t *
pixel = &row[2 *
s->width - 1];
1797 uint8_t *rowp = &row[1 *
s->width - 1];
1798 int tcolor =
s->transparent_color_be[0];
1799 for (x =
s->width; x > 0; --x) {
1800 *
pixel-- = *rowp == tcolor ? 0 : 0xff;
1803 }
else if (
s->bpp == 4 && byte_depth == 1) {
1804 uint8_t *
pixel = &row[4 *
s->width - 1];
1805 uint8_t *rowp = &row[3 *
s->width - 1];
1806 int tcolor =
AV_RL24(
s->transparent_color_be);
1807 for (x =
s->width; x > 0; --x) {
1815 for (x =
s->width; x > 0; --x) {
1816 uint8_t *
pixel = &row[
s->bpp * (x - 1)];
1817 memmove(
pixel, &row[raw_bpp * (x - 1)], raw_bpp);
1819 if (!memcmp(
pixel,
s->transparent_color_be, raw_bpp)) {
1820 memset(&
pixel[raw_bpp], 0, byte_depth);
1822 memset(&
pixel[raw_bpp], 0xff, byte_depth);
1830 if (
s->last_picture.f) {
1832 &&
s->last_picture.f->width ==
p->width
1833 &&
s->last_picture.f->height==
p->height
1834 &&
s->last_picture.f->format==
p->format
1838 else if (CONFIG_APNG_DECODER &&
1858 s->iccp_data_len = 0;
1859 s->iccp_name[0] = 0;
1861 s->stereo_mode = -1;
1874 if (
s->stereo_mode >= 0) {
1893 #if CONFIG_PNG_DECODER
1898 const uint8_t *buf = avpkt->
data;
1899 int buf_size = avpkt->
size;
1908 sig = bytestream2_get_be64(&
s->gb);
1915 s->y =
s->has_trns = 0;
1920 ret = inflateReset(&
s->zstream.zstream);
1951 #if CONFIG_APNG_DECODER
1964 if ((
ret = inflateReset(&
s->zstream.zstream)) != Z_OK)
1972 if ((
ret = inflateReset(&
s->zstream.zstream)) != Z_OK)
2056 s->last_row_size = 0;
2058 s->tmp_row_size = 0;
2068 #if CONFIG_APNG_DECODER
2086 #if CONFIG_PNG_DECODER
static void error(const char *err)
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_BPRINT_SIZE_UNLIMITED
#define AV_EF_EXPLODE
abort decoding on minor error detection
static void clear_frame_metadata(PNGDecContext *s)
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
enum AVAlphaMode alpha_mode
Indicates how the alpha channel of the video is represented.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
enum AVColorRange cicp_range
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
@ AVALPHA_MODE_STRAIGHT
Alpha channel is independent of color values.
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
enum AVColorSpace colorspace
YUV colorspace type.
AVColorTransferCharacteristic
Color Transfer Characteristic.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Struct that contains both white point location and primaries location, providing the complete descrip...
unsigned int last_row_size
#define APNG_FCTL_CHUNK_SIZE
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
int ff_png_get_nb_channels(int color_type)
void(* add_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static int decode_text_chunk(PNGDecContext *s, GetByteContext *gb, int compressed)
uint16_t mdcv_white_point[2]
unsigned MaxCLL
Max content light level (cd/m^2).
This structure describes decoded (raw) audio or video data.
@ AVCOL_TRC_NB
Not part of ABI.
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
AVColorPrimaries
Chromaticity coordinates of the source primaries.
unsigned int tmp_row_size
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
@ APNG_DISPOSE_OP_BACKGROUND
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
#define FF_DEBUG_PICT_INFO
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
static av_cold void close(AVCodecParserContext *s)
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define PNG_FILTER_TYPE_LOCO
AVCodec p
The public AVCodec.
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
enum AVDiscard skip_frame
Skip decoding for selected frames.
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
static av_cold int png_dec_end(AVCodecContext *avctx)
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
enum PNGImageState pic_state
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb, AVFrame *p)
static const uint8_t png_pass_dsp_ymask[NB_PASSES]
#define PNG_COLOR_TYPE_RGB_ALPHA
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
#define YUV2RGB(NAME, TYPE)
#define FF_CODEC_DECODE_CB(func)
@ AVCOL_PRI_NB
Not part of ABI.
const FFCodec ff_apng_decoder
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
#define PNG_COLOR_TYPE_RGB
const FFCodec ff_png_decoder
#define AV_EF_IGNORE_ERR
ignore errors and continue
#define av_assert0(cond)
assert() equivalent, that is always enabled.
enum PNGHeaderState hdr_state
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
static int percent_missing(PNGDecContext *s)
enum AVColorPrimaries av_csp_primaries_id_from_desc(const AVColorPrimariesDesc *prm)
Detects which enum AVColorPrimaries constant corresponds to the given complete gamut description.
#define CODEC_LONG_NAME(str)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame, AVMasteringDisplayMetadata **mdm)
Wrapper around av_mastering_display_metadata_create_side_data(), which rejects side data overridden b...
int flags
Additional information about the frame packing.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
enum AVColorPrimaries cicp_primaries
static int png_decode_idat(PNGDecContext *s, GetByteContext *gb, uint8_t *dst, ptrdiff_t dst_stride)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_cold int png_dec_init(AVCodecContext *avctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Rational number (pair of numerator and denominator).
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
@ AV_PICTURE_TYPE_I
Intra.
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
@ APNG_DISPOSE_OP_PREVIOUS
#define PNG_COLOR_TYPE_GRAY
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
#define UPDATE_THREAD_CONTEXT(func)
static int decode_sbit_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
static void apng_reset_background(PNGDecContext *s, const AVFrame *p)
@ AVCOL_RANGE_UNSPECIFIED
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const uint8_t ff_png_pass_ymask[NB_PASSES]
static int output_frame(PNGDecContext *s, AVFrame *f)
static const uint8_t png_pass_mask[NB_PASSES]
static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
#define PNG_FILTER_VALUE_NONE
int(* init)(AVBSFContext *ctx)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int ff_decode_exif_attach_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferRef **pbuf, enum AVExifHeaderMode header_mode)
Attach the data buffer to the frame.
static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
static void png_put_interlaced_row(uint8_t *dst, int width, int bits_per_pixel, int pass, int color_type, const uint8_t *src)
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
#define PNG_FILTER_VALUE_AVG
static AVRational av_make_q(int num, int den)
Create an AVRational.
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define PNG_FILTER_VALUE_PAETH
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
static av_const int av_isgraph(int c)
Locale-independent conversion of ASCII isgraph.
#define PNG_FILTER_VALUE_UP
#define FF_COMPLIANCE_NORMAL
static av_const int av_isdigit(int c)
Locale-independent conversion of ASCII isdigit.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
static int populate_avctx_color_fields(AVCodecContext *avctx, AVFrame *frame)
#define FF_THREAD_FRAME
Decode more than one frame at once.
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
AVDictionary * frame_metadata
void(* add_bytes_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
#define PNG_FILTER_VALUE_SUB
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define i(width, name, range_min, range_max)
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
static int decode_exif_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
uint32_t display_primaries[3][2]
uint16_t mdcv_primaries[3][2]
void av_bprint_get_buffer(AVBPrint *buf, unsigned size, unsigned char **mem, unsigned *actual_size)
Allocate bytes in the buffer for external use.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static void png_handle_row(PNGDecContext *s, uint8_t *dst, ptrdiff_t dst_stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define FF_DEBUG_STARTCODE
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
const char * name
Name of the codec implementation.
void ff_inflate_end(FFZStream *zstream)
Wrapper around inflateEnd().
const uint8_t * buffer_end
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
#define FFSWAP(type, a, b)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
enum AVStereo3DType type
How views are packed within the video.
int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame, AVContentLightMetadata **clm)
Wrapper around av_content_light_metadata_create_side_data(), which rejects side data overridden by th...
void av_bprintf(AVBPrint *buf, const char *fmt,...)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
static char * iso88591_to_utf8(const char *in, size_t size_in)
void ff_png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, uint8_t *src, uint8_t *last, int size, int bpp)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, const AVPacket *avpkt)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
#define UNROLL_FILTER(op)
ProgressFrame last_picture
uint8_t transparent_color_be[6]
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call.
static const uint8_t png_pass_dsp_mask[NB_PASSES]
int discard_damaged_percentage
The percentage of damaged samples to discard a frame.
#define PNG_COLOR_MASK_PALETTE
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
static int decode_text_to_exif(PNGDecContext *s, const char *txt_utf8)
static int decode_iccp_chunk(PNGDecContext *s, GetByteContext *gb)
A reference to a data buffer.
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
#define avpriv_request_sample(...)
Structure to hold side data for an AVFrame.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
The ProgressFrame structure.
This structure stores compressed data.
unsigned MaxFALL
Max average light level per frame (cd/m^2).
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
int ff_inflate_init(FFZStream *zstream, void *logctx)
Wrapper around inflateInit().
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define PNG_COLOR_TYPE_GRAY_ALPHA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
@ AVCOL_TRC_SMPTE428
SMPTE ST 428-1.
enum AVColorTransferCharacteristic cicp_trc
#define MKTAG(a, b, c, d)
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
AVColorRange
Visual content value range.
static int decode_zbuf(AVBPrint *bp, const uint8_t *data, const uint8_t *data_end, void *logctx)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define PNG_COLOR_TYPE_PALETTE
#define AV_DICT_DONT_STRDUP_KEY
Take ownership of a key that's been allocated with av_malloc() or another memory allocation function.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define av_fourcc2str(fourcc)