FFmpeg
webp.c
Go to the documentation of this file.
1 /*
2  * WebP (.webp) image decoder
3  * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
4  * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * WebP image decoder
26  *
27  * @author Aneesh Dogra <aneesh@sugarlabs.org>
28  * Container and Lossy decoding
29  *
30  * @author Justin Ruggles <justin.ruggles@gmail.com>
31  * Lossless decoder
32  * Compressed alpha for lossy
33  *
34  * @author James Almer <jamrial@gmail.com>
35  * Exif metadata
36  * ICC profile
37  *
38  * @author Thilo Borgmann <thilo.borgmann _at_ mail.de>
39  * XMP metadata
40  *
41  * Unimplemented:
42  * - Animation
43  */
44 
45 #include "libavutil/imgutils.h"
46 #include "libavutil/mem.h"
47 
48 #define BITSTREAM_READER_LE
49 #include "avcodec.h"
50 #include "bytestream.h"
51 #include "codec_internal.h"
52 #include "decode.h"
53 #include "exif_internal.h"
54 #include "get_bits.h"
55 #include "thread.h"
56 #include "tiff_common.h"
57 #include "vp8.h"
58 
59 #define VP8X_FLAG_ANIMATION 0x02
60 #define VP8X_FLAG_XMP_METADATA 0x04
61 #define VP8X_FLAG_EXIF_METADATA 0x08
62 #define VP8X_FLAG_ALPHA 0x10
63 #define VP8X_FLAG_ICC 0x20
64 
65 #define MAX_PALETTE_SIZE 256
66 #define MAX_CACHE_BITS 11
67 #define NUM_CODE_LENGTH_CODES 19
68 #define HUFFMAN_CODES_PER_META_CODE 5
69 #define NUM_LITERAL_CODES 256
70 #define NUM_LENGTH_CODES 24
71 #define NUM_DISTANCE_CODES 40
72 #define NUM_SHORT_DISTANCES 120
73 #define MAX_HUFFMAN_CODE_LENGTH 15
74 
75 static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE] = {
79 };
80 
82  17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
83 };
84 
85 static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2] = {
86  { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
87  { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
88  { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
89  { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
90  { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
91  { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
92  { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
93  { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
94  { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
95  { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
96  { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
97  { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
98  { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
99  { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
100  { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
101 };
102 
106 };
107 
113 };
114 
120 };
121 
137 };
138 
145 };
146 
147 /* The structure of WebP lossless is an optional series of transformation data,
148  * followed by the primary image. The primary image also optionally contains
149  * an entropy group mapping if there are multiple entropy groups. There is a
150  * basic image type called an "entropy coded image" that is used for all of
151  * these. The type of each entropy coded image is referred to by the
152  * specification as its role. */
153 enum ImageRole {
154  /* Primary Image: Stores the actual pixels of the image. */
156 
157  /* Entropy Image: Defines which Huffman group to use for different areas of
158  * the primary image. */
160 
161  /* Predictors: Defines which predictor type to use for different areas of
162  * the primary image. */
164 
165  /* Color Transform Data: Defines the color transformation for different
166  * areas of the primary image. */
168 
169  /* Color Index: Stored as an image of height == 1. */
171 
173 };
174 
175 typedef struct HuffReader {
176  VLC vlc; /* Huffman decoder context */
177  int simple; /* whether to use simple mode */
178  int nb_symbols; /* number of coded symbols */
179  uint16_t simple_symbols[2]; /* symbols for simple mode */
180 } HuffReader;
181 
182 typedef struct ImageContext {
183  enum ImageRole role; /* role of this image */
184  AVFrame *frame; /* AVFrame for data */
185  int color_cache_bits; /* color cache size, log2 */
186  uint32_t *color_cache; /* color cache data */
187  int nb_huffman_groups; /* number of huffman groups */
188  HuffReader *huffman_groups; /* reader for each huffman group */
189  /* relative size compared to primary image, log2.
190  * for IMAGE_ROLE_COLOR_INDEXING with <= 16 colors, this is log2 of the
191  * number of pixels per byte in the primary image (pixel packing) */
194 } ImageContext;
195 
196 typedef struct WebPContext {
197  VP8Context v; /* VP8 Context used for lossy decoding */
198  GetBitContext gb; /* bitstream reader for main image chunk */
199  AVFrame *alpha_frame; /* AVFrame for alpha data decompressed from VP8L */
200  AVPacket *pkt; /* AVPacket to be passed to the underlying VP8 decoder */
201  AVCodecContext *avctx; /* parent AVCodecContext */
202  int initialized; /* set once the VP8 context is initialized */
203  int has_alpha; /* has a separate alpha chunk */
204  enum AlphaCompression alpha_compression; /* compression type for alpha chunk */
205  enum AlphaFilter alpha_filter; /* filtering method for alpha chunk */
206  const uint8_t *alpha_data; /* alpha chunk data */
207  int alpha_data_size; /* alpha chunk data size */
208  int has_exif; /* set after an EXIF chunk has been processed */
209  int has_iccp; /* set after an ICCP chunk has been processed */
210  int has_xmp; /* set after an XMP chunk has been processed */
211  int width; /* image width */
212  int height; /* image height */
213 
214  int nb_transforms; /* number of transforms */
215  enum TransformType transforms[4]; /* transformations used in the image, in order */
216  /* reduced width when using a color indexing transform with <= 16 colors (pixel packing)
217  * before pixels are unpacked, or same as width otherwise. */
219  int nb_huffman_groups; /* number of huffman groups in the primary image */
220  ImageContext image[IMAGE_ROLE_NB]; /* image context for each role */
221 } WebPContext;
222 
223 #define GET_PIXEL(frame, x, y) \
224  ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
225 
226 #define GET_PIXEL_COMP(frame, x, y, c) \
227  (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
228 
230 {
231  int i, j;
232 
233  av_free(img->color_cache);
234  if (img->role != IMAGE_ROLE_ARGB && !img->is_alpha_primary)
235  av_frame_free(&img->frame);
236  if (img->huffman_groups) {
237  for (i = 0; i < img->nb_huffman_groups; i++) {
238  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++)
239  ff_vlc_free(&img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE + j].vlc);
240  }
241  av_free(img->huffman_groups);
242  }
243  memset(img, 0, sizeof(*img));
244 }
245 
247 {
248  if (r->simple) {
249  if (r->nb_symbols == 1)
250  return r->simple_symbols[0];
251  else
252  return r->simple_symbols[get_bits1(gb)];
253  } else
254  return get_vlc2(gb, r->vlc.table, 8, 2);
255 }
256 
257 static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths,
258  uint16_t len_counts[MAX_HUFFMAN_CODE_LENGTH + 1],
259  uint8_t lens[], uint16_t syms[],
260  int alphabet_size, void *logctx)
261 {
262  unsigned nb_codes = 0;
263  int ret;
264 
265  // Count the number of symbols of each length and transform len_counts
266  // into an array of offsets.
267  for (int len = 1; len <= MAX_HUFFMAN_CODE_LENGTH; ++len) {
268  unsigned cnt = len_counts[len];
269  len_counts[len] = nb_codes;
270  nb_codes += cnt;
271  }
272 
273  for (int sym = 0; sym < alphabet_size; ++sym) {
274  if (code_lengths[sym]) {
275  unsigned idx = len_counts[code_lengths[sym]]++;
276  syms[idx] = sym;
277  lens[idx] = code_lengths[sym];
278  }
279  }
280 
281  if (nb_codes == 0) {
282  // No symbols
283  return AVERROR_INVALIDDATA;
284  }
285  if (nb_codes == 1) {
286  // Special-case 1 symbol since the VLC reader cannot handle it
287  r->nb_symbols = 1;
288  r->simple = 1;
289  r->simple_symbols[0] = syms[0];
290  return 0;
291  }
292 
293  ret = ff_vlc_init_from_lengths(&r->vlc, 8, nb_codes, lens, 1,
294  syms, 2, 2, 0, VLC_INIT_OUTPUT_LE, logctx);
295  if (ret < 0)
296  return ret;
297  r->simple = 0;
298 
299  return 0;
300 }
301 
303 {
304  hc->nb_symbols = get_bits1(&s->gb) + 1;
305 
306  if (get_bits1(&s->gb))
307  hc->simple_symbols[0] = get_bits(&s->gb, 8);
308  else
309  hc->simple_symbols[0] = get_bits1(&s->gb);
310 
311  if (hc->nb_symbols == 2)
312  hc->simple_symbols[1] = get_bits(&s->gb, 8);
313 
314  hc->simple = 1;
315 }
316 
318  int alphabet_size)
319 {
320  HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
321  uint8_t *code_lengths;
322  uint8_t code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
323  uint8_t reordered_code_length_code_lengths[NUM_CODE_LENGTH_CODES];
324  uint16_t reordered_code_length_syms[NUM_CODE_LENGTH_CODES];
325  uint16_t len_counts[MAX_HUFFMAN_CODE_LENGTH + 1] = { 0 };
326  int symbol, max_symbol, prev_code_len, ret;
327  int num_codes = 4 + get_bits(&s->gb, 4);
328 
329  av_assert1(num_codes <= NUM_CODE_LENGTH_CODES);
330 
331  for (int i = 0; i < num_codes; i++) {
332  unsigned len = get_bits(&s->gb, 3);
333  code_length_code_lengths[code_length_code_order[i]] = len;
334  len_counts[len]++;
335  }
336 
337  if (get_bits1(&s->gb)) {
338  int bits = 2 + 2 * get_bits(&s->gb, 3);
339  max_symbol = 2 + get_bits(&s->gb, bits);
340  if (max_symbol > alphabet_size) {
341  av_log(s->avctx, AV_LOG_ERROR, "max symbol %d > alphabet size %d\n",
342  max_symbol, alphabet_size);
343  return AVERROR_INVALIDDATA;
344  }
345  } else {
346  max_symbol = alphabet_size;
347  }
348 
349  ret = huff_reader_build_canonical(&code_len_hc, code_length_code_lengths, len_counts,
350  reordered_code_length_code_lengths,
351  reordered_code_length_syms,
352  NUM_CODE_LENGTH_CODES, s->avctx);
353  if (ret < 0)
354  return ret;
355 
356  code_lengths = av_malloc_array(alphabet_size, 2 * sizeof(uint8_t) + sizeof(uint16_t));
357  if (!code_lengths) {
358  ret = AVERROR(ENOMEM);
359  goto finish;
360  }
361 
362  prev_code_len = 8;
363  symbol = 0;
364  memset(len_counts, 0, sizeof(len_counts));
365  while (symbol < alphabet_size) {
366  int code_len;
367 
368  if (!max_symbol--)
369  break;
370  code_len = huff_reader_get_symbol(&code_len_hc, &s->gb);
371  if (code_len < 16U) {
372  /* Code length code [0..15] indicates literal code lengths. */
373  code_lengths[symbol++] = code_len;
374  len_counts[code_len]++;
375  if (code_len)
376  prev_code_len = code_len;
377  } else {
378  int repeat = 0, length = 0;
379  switch (code_len) {
380  default:
382  goto finish;
383  case 16:
384  /* Code 16 repeats the previous non-zero value [3..6] times,
385  * i.e., 3 + ReadBits(2) times. If code 16 is used before a
386  * non-zero value has been emitted, a value of 8 is repeated. */
387  repeat = 3 + get_bits(&s->gb, 2);
388  length = prev_code_len;
389  len_counts[length] += repeat;
390  break;
391  case 17:
392  /* Code 17 emits a streak of zeros [3..10], i.e.,
393  * 3 + ReadBits(3) times. */
394  repeat = 3 + get_bits(&s->gb, 3);
395  break;
396  case 18:
397  /* Code 18 emits a streak of zeros of length [11..138], i.e.,
398  * 11 + ReadBits(7) times. */
399  repeat = 11 + get_bits(&s->gb, 7);
400  break;
401  }
402  if (symbol + repeat > alphabet_size) {
403  av_log(s->avctx, AV_LOG_ERROR,
404  "invalid symbol %d + repeat %d > alphabet size %d\n",
405  symbol, repeat, alphabet_size);
407  goto finish;
408  }
409  while (repeat-- > 0)
410  code_lengths[symbol++] = length;
411  }
412  }
413 
414  ret = huff_reader_build_canonical(hc, code_lengths, len_counts,
415  code_lengths + symbol,
416  (uint16_t*)(code_lengths + 2 * symbol),
417  symbol, s->avctx);
418 
419 finish:
420  ff_vlc_free(&code_len_hc.vlc);
421  av_free(code_lengths);
422  return ret;
423 }
424 
425 static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
426  int w, int h);
427 
428 #define PARSE_BLOCK_SIZE(w, h) do { \
429  block_bits = get_bits(&s->gb, 3) + 2; \
430  blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
431  blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
432 } while (0)
433 
435 {
436  ImageContext *img;
437  int ret, block_bits, blocks_w, blocks_h, x, y, max;
438 
439  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
440 
441  ret = decode_entropy_coded_image(s, IMAGE_ROLE_ENTROPY, blocks_w, blocks_h);
442  if (ret < 0)
443  return ret;
444 
445  img = &s->image[IMAGE_ROLE_ENTROPY];
446  img->size_reduction = block_bits;
447 
448  /* the number of huffman groups is determined by the maximum group number
449  * coded in the entropy image */
450  max = 0;
451  for (y = 0; y < img->frame->height; y++) {
452  for (x = 0; x < img->frame->width; x++) {
453  int p0 = GET_PIXEL_COMP(img->frame, x, y, 1);
454  int p1 = GET_PIXEL_COMP(img->frame, x, y, 2);
455  int p = p0 << 8 | p1;
456  max = FFMAX(max, p);
457  }
458  }
459  s->nb_huffman_groups = max + 1;
460 
461  return 0;
462 }
463 
465 {
466  int block_bits, blocks_w, blocks_h, ret;
467 
468  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
469 
471  blocks_h);
472  if (ret < 0)
473  return ret;
474 
475  s->image[IMAGE_ROLE_PREDICTOR].size_reduction = block_bits;
476 
477  return 0;
478 }
479 
481 {
482  int block_bits, blocks_w, blocks_h, ret;
483 
484  PARSE_BLOCK_SIZE(s->reduced_width, s->height);
485 
487  blocks_h);
488  if (ret < 0)
489  return ret;
490 
491  s->image[IMAGE_ROLE_COLOR_TRANSFORM].size_reduction = block_bits;
492 
493  return 0;
494 }
495 
497 {
498  ImageContext *img;
499  int width_bits, index_size, ret, x;
500  uint8_t *ct;
501 
502  index_size = get_bits(&s->gb, 8) + 1;
503 
504  if (index_size <= 2)
505  width_bits = 3;
506  else if (index_size <= 4)
507  width_bits = 2;
508  else if (index_size <= 16)
509  width_bits = 1;
510  else
511  width_bits = 0;
512 
514  index_size, 1);
515  if (ret < 0)
516  return ret;
517 
518  img = &s->image[IMAGE_ROLE_COLOR_INDEXING];
519  img->size_reduction = width_bits;
520  if (width_bits > 0)
521  s->reduced_width = (s->width + ((1 << width_bits) - 1)) >> width_bits;
522 
523  /* color index values are delta-coded */
524  ct = img->frame->data[0] + 4;
525  for (x = 4; x < img->frame->width * 4; x++, ct++)
526  ct[0] += ct[-4];
527 
528  return 0;
529 }
530 
532  int x, int y)
533 {
534  ImageContext *gimg = &s->image[IMAGE_ROLE_ENTROPY];
535  int group = 0;
536 
537  if (gimg->size_reduction > 0) {
538  int group_x = x >> gimg->size_reduction;
539  int group_y = y >> gimg->size_reduction;
540  int g0 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 1);
541  int g1 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 2);
542  group = g0 << 8 | g1;
543  }
544 
545  return &img->huffman_groups[group * HUFFMAN_CODES_PER_META_CODE];
546 }
547 
549 {
550  uint32_t cache_idx = (0x1E35A7BD * c) >> (32 - img->color_cache_bits);
551  img->color_cache[cache_idx] = c;
552 }
553 
555  int w, int h)
556 {
557  ImageContext *img;
558  HuffReader *hg;
559  int i, j, ret, x, y, width;
560 
561  img = &s->image[role];
562  img->role = role;
563 
564  if (!img->frame) {
565  img->frame = av_frame_alloc();
566  if (!img->frame)
567  return AVERROR(ENOMEM);
568  }
569 
570  img->frame->format = AV_PIX_FMT_ARGB;
571  img->frame->width = w;
572  img->frame->height = h;
573 
574  if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
575  ret = ff_thread_get_buffer(s->avctx, img->frame, 0);
576  } else
577  ret = av_frame_get_buffer(img->frame, 1);
578  if (ret < 0)
579  return ret;
580 
581  if (get_bits1(&s->gb)) {
582  img->color_cache_bits = get_bits(&s->gb, 4);
583  if (img->color_cache_bits < 1 || img->color_cache_bits > 11) {
584  av_log(s->avctx, AV_LOG_ERROR, "invalid color cache bits: %d\n",
585  img->color_cache_bits);
586  return AVERROR_INVALIDDATA;
587  }
588  img->color_cache = av_calloc(1 << img->color_cache_bits,
589  sizeof(*img->color_cache));
590  if (!img->color_cache)
591  return AVERROR(ENOMEM);
592  } else {
593  img->color_cache_bits = 0;
594  }
595 
596  img->nb_huffman_groups = 1;
597  if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) {
599  if (ret < 0)
600  return ret;
601  img->nb_huffman_groups = s->nb_huffman_groups;
602  }
603  img->huffman_groups = av_calloc(img->nb_huffman_groups,
605  sizeof(*img->huffman_groups));
606  if (!img->huffman_groups)
607  return AVERROR(ENOMEM);
608 
609  for (i = 0; i < img->nb_huffman_groups; i++) {
610  hg = &img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE];
611  for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) {
612  int alphabet_size = alphabet_sizes[j];
613  if (!j && img->color_cache_bits > 0)
614  alphabet_size += 1 << img->color_cache_bits;
615 
616  if (get_bits1(&s->gb)) {
617  read_huffman_code_simple(s, &hg[j]);
618  } else {
619  ret = read_huffman_code_normal(s, &hg[j], alphabet_size);
620  if (ret < 0)
621  return ret;
622  }
623  }
624  }
625 
626  width = img->frame->width;
627  if (role == IMAGE_ROLE_ARGB)
628  width = s->reduced_width;
629 
630  x = 0; y = 0;
631  while (y < img->frame->height) {
632  int v;
633 
634  if (get_bits_left(&s->gb) < 0)
635  return AVERROR_INVALIDDATA;
636 
637  hg = get_huffman_group(s, img, x, y);
638  v = huff_reader_get_symbol(&hg[HUFF_IDX_GREEN], &s->gb);
639  if (v < NUM_LITERAL_CODES) {
640  /* literal pixel values */
641  uint8_t *p = GET_PIXEL(img->frame, x, y);
642  p[2] = v;
643  p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb);
644  p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb);
645  p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb);
646  if (img->color_cache_bits)
648  x++;
649  if (x == width) {
650  x = 0;
651  y++;
652  }
653  } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) {
654  /* LZ77 backwards mapping */
655  int prefix_code, length, distance, ref_x, ref_y;
656 
657  /* parse length and distance */
658  prefix_code = v - NUM_LITERAL_CODES;
659  if (prefix_code < 4) {
660  length = prefix_code + 1;
661  } else {
662  int extra_bits = (prefix_code - 2) >> 1;
663  int offset = 2 + (prefix_code & 1) << extra_bits;
664  length = offset + get_bits(&s->gb, extra_bits) + 1;
665  }
666  prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
667  if (prefix_code > 39U) {
668  av_log(s->avctx, AV_LOG_ERROR,
669  "distance prefix code too large: %d\n", prefix_code);
670  return AVERROR_INVALIDDATA;
671  }
672  if (prefix_code < 4) {
673  distance = prefix_code + 1;
674  } else {
675  int extra_bits = prefix_code - 2 >> 1;
676  int offset = 2 + (prefix_code & 1) << extra_bits;
677  distance = offset + get_bits(&s->gb, extra_bits) + 1;
678  }
679 
680  /* find reference location */
681  if (distance <= NUM_SHORT_DISTANCES) {
682  int xi = lz77_distance_offsets[distance - 1][0];
683  int yi = lz77_distance_offsets[distance - 1][1];
684  distance = FFMAX(1, xi + yi * width);
685  } else {
687  }
688  ref_x = x;
689  ref_y = y;
690  if (distance <= x) {
691  ref_x -= distance;
692  distance = 0;
693  } else {
694  ref_x = 0;
695  distance -= x;
696  }
697  while (distance >= width) {
698  ref_y--;
699  distance -= width;
700  }
701  if (distance > 0) {
702  ref_x = width - distance;
703  ref_y--;
704  }
705  ref_x = FFMAX(0, ref_x);
706  ref_y = FFMAX(0, ref_y);
707 
708  if (ref_y == y && ref_x >= x)
709  return AVERROR_INVALIDDATA;
710 
711  /* copy pixels
712  * source and dest regions can overlap and wrap lines, so just
713  * copy per-pixel */
714  for (i = 0; i < length; i++) {
715  uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y);
716  uint8_t *p = GET_PIXEL(img->frame, x, y);
717 
718  AV_COPY32(p, p_ref);
719  if (img->color_cache_bits)
721  x++;
722  ref_x++;
723  if (x == width) {
724  x = 0;
725  y++;
726  }
727  if (ref_x == width) {
728  ref_x = 0;
729  ref_y++;
730  }
731  if (y == img->frame->height || ref_y == img->frame->height)
732  break;
733  }
734  } else {
735  /* read from color cache */
736  uint8_t *p = GET_PIXEL(img->frame, x, y);
737  int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
738 
739  if (!img->color_cache_bits) {
740  av_log(s->avctx, AV_LOG_ERROR, "color cache not found\n");
741  return AVERROR_INVALIDDATA;
742  }
743  if (cache_idx >= 1 << img->color_cache_bits) {
744  av_log(s->avctx, AV_LOG_ERROR,
745  "color cache index out-of-bounds\n");
746  return AVERROR_INVALIDDATA;
747  }
748  AV_WB32(p, img->color_cache[cache_idx]);
749  x++;
750  if (x == width) {
751  x = 0;
752  y++;
753  }
754  }
755  }
756 
757  return 0;
758 }
759 
760 /* PRED_MODE_BLACK */
761 static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
762  const uint8_t *p_t, const uint8_t *p_tr)
763 {
764  AV_WB32(p, 0xFF000000);
765 }
766 
767 /* PRED_MODE_L */
768 static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
769  const uint8_t *p_t, const uint8_t *p_tr)
770 {
771  AV_COPY32(p, p_l);
772 }
773 
774 /* PRED_MODE_T */
775 static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
776  const uint8_t *p_t, const uint8_t *p_tr)
777 {
778  AV_COPY32(p, p_t);
779 }
780 
781 /* PRED_MODE_TR */
782 static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
783  const uint8_t *p_t, const uint8_t *p_tr)
784 {
785  AV_COPY32(p, p_tr);
786 }
787 
788 /* PRED_MODE_TL */
789 static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
790  const uint8_t *p_t, const uint8_t *p_tr)
791 {
792  AV_COPY32(p, p_tl);
793 }
794 
795 /* PRED_MODE_AVG_T_AVG_L_TR */
796 static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
797  const uint8_t *p_t, const uint8_t *p_tr)
798 {
799  p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
800  p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
801  p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
802  p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
803 }
804 
805 /* PRED_MODE_AVG_L_TL */
806 static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
807  const uint8_t *p_t, const uint8_t *p_tr)
808 {
809  p[0] = p_l[0] + p_tl[0] >> 1;
810  p[1] = p_l[1] + p_tl[1] >> 1;
811  p[2] = p_l[2] + p_tl[2] >> 1;
812  p[3] = p_l[3] + p_tl[3] >> 1;
813 }
814 
815 /* PRED_MODE_AVG_L_T */
816 static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
817  const uint8_t *p_t, const uint8_t *p_tr)
818 {
819  p[0] = p_l[0] + p_t[0] >> 1;
820  p[1] = p_l[1] + p_t[1] >> 1;
821  p[2] = p_l[2] + p_t[2] >> 1;
822  p[3] = p_l[3] + p_t[3] >> 1;
823 }
824 
825 /* PRED_MODE_AVG_TL_T */
826 static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
827  const uint8_t *p_t, const uint8_t *p_tr)
828 {
829  p[0] = p_tl[0] + p_t[0] >> 1;
830  p[1] = p_tl[1] + p_t[1] >> 1;
831  p[2] = p_tl[2] + p_t[2] >> 1;
832  p[3] = p_tl[3] + p_t[3] >> 1;
833 }
834 
835 /* PRED_MODE_AVG_T_TR */
836 static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
837  const uint8_t *p_t, const uint8_t *p_tr)
838 {
839  p[0] = p_t[0] + p_tr[0] >> 1;
840  p[1] = p_t[1] + p_tr[1] >> 1;
841  p[2] = p_t[2] + p_tr[2] >> 1;
842  p[3] = p_t[3] + p_tr[3] >> 1;
843 }
844 
845 /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
846 static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
847  const uint8_t *p_t, const uint8_t *p_tr)
848 {
849  p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
850  p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
851  p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
852  p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
853 }
854 
855 /* PRED_MODE_SELECT */
856 static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
857  const uint8_t *p_t, const uint8_t *p_tr)
858 {
859  int diff = (FFABS(p_l[0] - p_tl[0]) - FFABS(p_t[0] - p_tl[0])) +
860  (FFABS(p_l[1] - p_tl[1]) - FFABS(p_t[1] - p_tl[1])) +
861  (FFABS(p_l[2] - p_tl[2]) - FFABS(p_t[2] - p_tl[2])) +
862  (FFABS(p_l[3] - p_tl[3]) - FFABS(p_t[3] - p_tl[3]));
863  if (diff <= 0)
864  AV_COPY32(p, p_t);
865  else
866  AV_COPY32(p, p_l);
867 }
868 
869 /* PRED_MODE_ADD_SUBTRACT_FULL */
870 static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
871  const uint8_t *p_t, const uint8_t *p_tr)
872 {
873  p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
874  p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
875  p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
876  p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
877 }
878 
879 static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
880 {
881  int d = a + b >> 1;
882  return av_clip_uint8(d + (d - c) / 2);
883 }
884 
885 /* PRED_MODE_ADD_SUBTRACT_HALF */
886 static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
887  const uint8_t *p_t, const uint8_t *p_tr)
888 {
889  p[0] = clamp_add_subtract_half(p_l[0], p_t[0], p_tl[0]);
890  p[1] = clamp_add_subtract_half(p_l[1], p_t[1], p_tl[1]);
891  p[2] = clamp_add_subtract_half(p_l[2], p_t[2], p_tl[2]);
892  p[3] = clamp_add_subtract_half(p_l[3], p_t[3], p_tl[3]);
893 }
894 
895 typedef void (*inv_predict_func)(uint8_t *p, const uint8_t *p_l,
896  const uint8_t *p_tl, const uint8_t *p_t,
897  const uint8_t *p_tr);
898 
899 static const inv_predict_func inverse_predict[14] = {
904 };
905 
906 static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
907 {
908  uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
909  uint8_t p[4];
910 
911  dec = GET_PIXEL(frame, x, y);
912  p_l = GET_PIXEL(frame, x - 1, y);
913  p_tl = GET_PIXEL(frame, x - 1, y - 1);
914  p_t = GET_PIXEL(frame, x, y - 1);
915  if (x == frame->width - 1)
916  p_tr = GET_PIXEL(frame, 0, y);
917  else
918  p_tr = GET_PIXEL(frame, x + 1, y - 1);
919 
920  inverse_predict[m](p, p_l, p_tl, p_t, p_tr);
921 
922  dec[0] += p[0];
923  dec[1] += p[1];
924  dec[2] += p[2];
925  dec[3] += p[3];
926 }
927 
929 {
930  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
931  ImageContext *pimg = &s->image[IMAGE_ROLE_PREDICTOR];
932  int x, y;
933 
934  for (y = 0; y < img->frame->height; y++) {
935  for (x = 0; x < s->reduced_width; x++) {
936  int tx = x >> pimg->size_reduction;
937  int ty = y >> pimg->size_reduction;
938  enum PredictionMode m = GET_PIXEL_COMP(pimg->frame, tx, ty, 2);
939 
940  if (x == 0) {
941  if (y == 0)
942  m = PRED_MODE_BLACK;
943  else
944  m = PRED_MODE_T;
945  } else if (y == 0)
946  m = PRED_MODE_L;
947 
948  if (m > 13) {
949  av_log(s->avctx, AV_LOG_ERROR,
950  "invalid predictor mode: %d\n", m);
951  return AVERROR_INVALIDDATA;
952  }
953  inverse_prediction(img->frame, m, x, y);
954  }
955  }
956  return 0;
957 }
958 
959 static av_always_inline uint8_t color_transform_delta(uint8_t color_pred,
960  uint8_t color)
961 {
962  return (int)ff_u8_to_s8(color_pred) * ff_u8_to_s8(color) >> 5;
963 }
964 
966 {
967  ImageContext *img, *cimg;
968  int x, y, cx, cy;
969  uint8_t *p, *cp;
970 
971  img = &s->image[IMAGE_ROLE_ARGB];
972  cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
973 
974  for (y = 0; y < img->frame->height; y++) {
975  for (x = 0; x < s->reduced_width; x++) {
976  cx = x >> cimg->size_reduction;
977  cy = y >> cimg->size_reduction;
978  cp = GET_PIXEL(cimg->frame, cx, cy);
979  p = GET_PIXEL(img->frame, x, y);
980 
981  p[1] += color_transform_delta(cp[3], p[2]);
982  p[3] += color_transform_delta(cp[2], p[2]) +
983  color_transform_delta(cp[1], p[1]);
984  }
985  }
986  return 0;
987 }
988 
990 {
991  int x, y;
992  ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
993 
994  for (y = 0; y < img->frame->height; y++) {
995  for (x = 0; x < s->reduced_width; x++) {
996  uint8_t *p = GET_PIXEL(img->frame, x, y);
997  p[1] += p[2];
998  p[3] += p[2];
999  }
1000  }
1001  return 0;
1002 }
1003 
1005 {
1006  ImageContext *img;
1007  ImageContext *pal;
1008  int i, x, y;
1009  uint8_t *p;
1010 
1011  img = &s->image[IMAGE_ROLE_ARGB];
1012  pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
1013 
1014  if (pal->size_reduction > 0) { // undo pixel packing
1015  GetBitContext gb_g;
1016  uint8_t *line;
1017  int pixel_bits = 8 >> pal->size_reduction;
1018 
1019  line = av_malloc(img->frame->linesize[0] + AV_INPUT_BUFFER_PADDING_SIZE);
1020  if (!line)
1021  return AVERROR(ENOMEM);
1022 
1023  for (y = 0; y < img->frame->height; y++) {
1024  p = GET_PIXEL(img->frame, 0, y);
1025  memcpy(line, p, img->frame->linesize[0]);
1026  init_get_bits(&gb_g, line, img->frame->linesize[0] * 8);
1027  skip_bits(&gb_g, 16);
1028  i = 0;
1029  for (x = 0; x < img->frame->width; x++) {
1030  p = GET_PIXEL(img->frame, x, y);
1031  p[2] = get_bits(&gb_g, pixel_bits);
1032  i++;
1033  if (i == 1 << pal->size_reduction) {
1034  skip_bits(&gb_g, 24);
1035  i = 0;
1036  }
1037  }
1038  }
1039  av_free(line);
1040  s->reduced_width = s->width; // we are back to full size
1041  }
1042 
1043  // switch to local palette if it's worth initializing it
1044  if (img->frame->height * img->frame->width > 300) {
1045  uint8_t palette[256 * 4];
1046  const int size = pal->frame->width * 4;
1047  av_assert0(size <= 1024U);
1048  memcpy(palette, GET_PIXEL(pal->frame, 0, 0), size); // copy palette
1049  // set extra entries to transparent black
1050  memset(palette + size, 0, 256 * 4 - size);
1051  for (y = 0; y < img->frame->height; y++) {
1052  for (x = 0; x < img->frame->width; x++) {
1053  p = GET_PIXEL(img->frame, x, y);
1054  i = p[2];
1055  AV_COPY32(p, &palette[i * 4]);
1056  }
1057  }
1058  } else {
1059  for (y = 0; y < img->frame->height; y++) {
1060  for (x = 0; x < img->frame->width; x++) {
1061  p = GET_PIXEL(img->frame, x, y);
1062  i = p[2];
1063  if (i >= pal->frame->width) {
1064  AV_WB32(p, 0x00000000);
1065  } else {
1066  const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
1067  AV_COPY32(p, pi);
1068  }
1069  }
1070  }
1071  }
1072 
1073  return 0;
1074 }
1075 
1076 static void update_canvas_size(AVCodecContext *avctx, int w, int h)
1077 {
1078  WebPContext *s = avctx->priv_data;
1079  if (s->width && s->width != w) {
1080  av_log(avctx, AV_LOG_WARNING, "Width mismatch. %d != %d\n",
1081  s->width, w);
1082  }
1083  s->width = w;
1084  if (s->height && s->height != h) {
1085  av_log(avctx, AV_LOG_WARNING, "Height mismatch. %d != %d\n",
1086  s->height, h);
1087  }
1088  s->height = h;
1089 }
1090 
1092  int *got_frame, const uint8_t *data_start,
1093  unsigned int data_size, int is_alpha_chunk)
1094 {
1095  WebPContext *s = avctx->priv_data;
1096  int w, h, ret, i, used;
1097 
1098  if (!is_alpha_chunk)
1099  avctx->pix_fmt = AV_PIX_FMT_ARGB;
1100 
1101  ret = init_get_bits8(&s->gb, data_start, data_size);
1102  if (ret < 0)
1103  return ret;
1104 
1105  if (!is_alpha_chunk) {
1106  if (get_bits(&s->gb, 8) != 0x2F) {
1107  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless signature\n");
1108  return AVERROR_INVALIDDATA;
1109  }
1110 
1111  w = get_bits(&s->gb, 14) + 1;
1112  h = get_bits(&s->gb, 14) + 1;
1113 
1114  update_canvas_size(avctx, w, h);
1115 
1116  ret = ff_set_dimensions(avctx, s->width, s->height);
1117  if (ret < 0)
1118  return ret;
1119 
1120  s->has_alpha = get_bits1(&s->gb);
1121 
1122  if (get_bits(&s->gb, 3) != 0x0) {
1123  av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless version\n");
1124  return AVERROR_INVALIDDATA;
1125  }
1126  } else {
1127  if (!s->width || !s->height)
1128  return AVERROR_BUG;
1129  w = s->width;
1130  h = s->height;
1131  }
1132 
1133  /* parse transformations */
1134  s->nb_transforms = 0;
1135  s->reduced_width = s->width;
1136  used = 0;
1137  while (get_bits1(&s->gb)) {
1138  enum TransformType transform = get_bits(&s->gb, 2);
1139  if (used & (1 << transform)) {
1140  av_log(avctx, AV_LOG_ERROR, "Transform %d used more than once\n",
1141  transform);
1143  goto free_and_return;
1144  }
1145  used |= (1 << transform);
1146  s->transforms[s->nb_transforms++] = transform;
1147  switch (transform) {
1148  case PREDICTOR_TRANSFORM:
1150  break;
1151  case COLOR_TRANSFORM:
1153  break;
1156  break;
1157  }
1158  if (ret < 0)
1159  goto free_and_return;
1160  }
1161 
1162  /* decode primary image */
1163  s->image[IMAGE_ROLE_ARGB].frame = p;
1164  if (is_alpha_chunk)
1165  s->image[IMAGE_ROLE_ARGB].is_alpha_primary = 1;
1167  if (ret < 0)
1168  goto free_and_return;
1169 
1170  /* apply transformations */
1171  for (i = s->nb_transforms - 1; i >= 0; i--) {
1172  switch (s->transforms[i]) {
1173  case PREDICTOR_TRANSFORM:
1175  break;
1176  case COLOR_TRANSFORM:
1178  break;
1179  case SUBTRACT_GREEN:
1181  break;
1184  break;
1185  }
1186  if (ret < 0)
1187  goto free_and_return;
1188  }
1189 
1190  *got_frame = 1;
1191  p->pict_type = AV_PICTURE_TYPE_I;
1192  p->flags |= AV_FRAME_FLAG_KEY;
1193  p->flags |= AV_FRAME_FLAG_LOSSLESS;
1194  ret = data_size;
1195 
1196 free_and_return:
1197  for (i = 0; i < IMAGE_ROLE_NB; i++)
1198  image_ctx_free(&s->image[i]);
1199 
1200  return ret;
1201 }
1202 
1204 {
1205  int x, y, ls;
1206  uint8_t *dec;
1207 
1208  ls = frame->linesize[3];
1209 
1210  /* filter first row using horizontal filter */
1211  dec = frame->data[3] + 1;
1212  for (x = 1; x < frame->width; x++, dec++)
1213  *dec += *(dec - 1);
1214 
1215  /* filter first column using vertical filter */
1216  dec = frame->data[3] + ls;
1217  for (y = 1; y < frame->height; y++, dec += ls)
1218  *dec += *(dec - ls);
1219 
1220  /* filter the rest using the specified filter */
1221  switch (m) {
1223  for (y = 1; y < frame->height; y++) {
1224  dec = frame->data[3] + y * ls + 1;
1225  for (x = 1; x < frame->width; x++, dec++)
1226  *dec += *(dec - 1);
1227  }
1228  break;
1229  case ALPHA_FILTER_VERTICAL:
1230  for (y = 1; y < frame->height; y++) {
1231  dec = frame->data[3] + y * ls + 1;
1232  for (x = 1; x < frame->width; x++, dec++)
1233  *dec += *(dec - ls);
1234  }
1235  break;
1236  case ALPHA_FILTER_GRADIENT:
1237  for (y = 1; y < frame->height; y++) {
1238  dec = frame->data[3] + y * ls + 1;
1239  for (x = 1; x < frame->width; x++, dec++)
1240  dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
1241  }
1242  break;
1243  }
1244 }
1245 
1247  const uint8_t *data_start,
1248  unsigned int data_size)
1249 {
1250  WebPContext *s = avctx->priv_data;
1251  int x, y, ret;
1252 
1253  if (s->alpha_compression == ALPHA_COMPRESSION_NONE) {
1254  GetByteContext gb;
1255 
1256  bytestream2_init(&gb, data_start, data_size);
1257  for (y = 0; y < s->height; y++)
1258  bytestream2_get_buffer(&gb, p->data[3] + p->linesize[3] * y,
1259  s->width);
1260  } else if (s->alpha_compression == ALPHA_COMPRESSION_VP8L) {
1261  uint8_t *ap, *pp;
1262  int alpha_got_frame = 0;
1263 
1264  s->alpha_frame = av_frame_alloc();
1265  if (!s->alpha_frame)
1266  return AVERROR(ENOMEM);
1267 
1268  ret = vp8_lossless_decode_frame(avctx, s->alpha_frame, &alpha_got_frame,
1269  data_start, data_size, 1);
1270  if (ret < 0) {
1271  av_frame_free(&s->alpha_frame);
1272  return ret;
1273  }
1274  if (!alpha_got_frame) {
1275  av_frame_free(&s->alpha_frame);
1276  return AVERROR_INVALIDDATA;
1277  }
1278 
1279  /* copy green component of alpha image to alpha plane of primary image */
1280  for (y = 0; y < s->height; y++) {
1281  ap = GET_PIXEL(s->alpha_frame, 0, y) + 2;
1282  pp = p->data[3] + p->linesize[3] * y;
1283  for (x = 0; x < s->width; x++) {
1284  *pp = *ap;
1285  pp++;
1286  ap += 4;
1287  }
1288  }
1289  av_frame_free(&s->alpha_frame);
1290  }
1291 
1292  /* apply alpha filtering */
1293  if (s->alpha_filter)
1294  alpha_inverse_prediction(p, s->alpha_filter);
1295 
1296  return 0;
1297 }
1298 
1300  int *got_frame, uint8_t *data_start,
1301  unsigned int data_size)
1302 {
1303  WebPContext *s = avctx->priv_data;
1304  int ret;
1305 
1306  if (!s->initialized) {
1307  ff_vp8_decode_init(avctx);
1308  s->initialized = 1;
1309  s->v.actually_webp = 1;
1310  }
1311  avctx->pix_fmt = s->has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
1312 
1313  if (data_size > INT_MAX) {
1314  av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n");
1315  return AVERROR_PATCHWELCOME;
1316  }
1317 
1318  av_packet_unref(s->pkt);
1319  s->pkt->data = data_start;
1320  s->pkt->size = data_size;
1321 
1322  ret = ff_vp8_decode_frame(avctx, p, got_frame, s->pkt);
1323  if (ret < 0)
1324  return ret;
1325 
1326  if (!*got_frame)
1327  return AVERROR_INVALIDDATA;
1328 
1329  update_canvas_size(avctx, avctx->width, avctx->height);
1330 
1331  if (s->has_alpha) {
1332  ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data,
1333  s->alpha_data_size);
1334  if (ret < 0)
1335  return ret;
1336  }
1337  return ret;
1338 }
1339 
1341  int *got_frame, AVPacket *avpkt)
1342 {
1343  WebPContext *s = avctx->priv_data;
1344  GetByteContext gb;
1345  int ret;
1346  uint32_t chunk_type, chunk_size;
1347  int vp8x_flags = 0;
1348 
1349  s->avctx = avctx;
1350  s->width = 0;
1351  s->height = 0;
1352  *got_frame = 0;
1353  s->has_alpha = 0;
1354  s->has_exif = 0;
1355  s->has_iccp = 0;
1356  s->has_xmp = 0;
1357  bytestream2_init(&gb, avpkt->data, avpkt->size);
1358 
1359  if (bytestream2_get_bytes_left(&gb) < 12)
1360  return AVERROR_INVALIDDATA;
1361 
1362  if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
1363  av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
1364  return AVERROR_INVALIDDATA;
1365  }
1366 
1367  chunk_size = bytestream2_get_le32(&gb);
1368  if (bytestream2_get_bytes_left(&gb) < chunk_size)
1369  return AVERROR_INVALIDDATA;
1370 
1371  if (bytestream2_get_le32(&gb) != MKTAG('W', 'E', 'B', 'P')) {
1372  av_log(avctx, AV_LOG_ERROR, "missing WEBP tag\n");
1373  return AVERROR_INVALIDDATA;
1374  }
1375 
1376  while (bytestream2_get_bytes_left(&gb) > 8) {
1377  chunk_type = bytestream2_get_le32(&gb);
1378  chunk_size = bytestream2_get_le32(&gb);
1379  if (chunk_size == UINT32_MAX)
1380  return AVERROR_INVALIDDATA;
1381  chunk_size += chunk_size & 1;
1382 
1383  if (bytestream2_get_bytes_left(&gb) < chunk_size) {
1384  /* we seem to be running out of data, but it could also be that the
1385  bitstream has trailing junk leading to bogus chunk_size. */
1386  break;
1387  }
1388 
1389  switch (chunk_type) {
1390  case MKTAG('V', 'P', '8', ' '):
1391  if (!*got_frame) {
1392  ret = vp8_lossy_decode_frame(avctx, p, got_frame,
1393  avpkt->data + bytestream2_tell(&gb),
1394  chunk_size);
1395  if (ret < 0)
1396  return ret;
1397  }
1398  bytestream2_skip(&gb, chunk_size);
1399  break;
1400  case MKTAG('V', 'P', '8', 'L'):
1401  if (!*got_frame) {
1402  ret = vp8_lossless_decode_frame(avctx, p, got_frame,
1403  avpkt->data + bytestream2_tell(&gb),
1404  chunk_size, 0);
1405  if (ret < 0)
1406  return ret;
1407 #if FF_API_CODEC_PROPS
1411 #endif
1412  }
1413  bytestream2_skip(&gb, chunk_size);
1414  break;
1415  case MKTAG('V', 'P', '8', 'X'):
1416  if (s->width || s->height || *got_frame) {
1417  av_log(avctx, AV_LOG_ERROR, "Canvas dimensions are already set\n");
1418  return AVERROR_INVALIDDATA;
1419  }
1420  vp8x_flags = bytestream2_get_byte(&gb);
1421  bytestream2_skip(&gb, 3);
1422  s->width = bytestream2_get_le24(&gb) + 1;
1423  s->height = bytestream2_get_le24(&gb) + 1;
1424  ret = av_image_check_size(s->width, s->height, 0, avctx);
1425  if (ret < 0)
1426  return ret;
1427  break;
1428  case MKTAG('A', 'L', 'P', 'H'): {
1429  int alpha_header, filter_m, compression;
1430 
1431  if (!(vp8x_flags & VP8X_FLAG_ALPHA)) {
1432  av_log(avctx, AV_LOG_WARNING,
1433  "ALPHA chunk present, but alpha bit not set in the "
1434  "VP8X header\n");
1435  }
1436  if (chunk_size == 0) {
1437  av_log(avctx, AV_LOG_ERROR, "invalid ALPHA chunk size\n");
1438  return AVERROR_INVALIDDATA;
1439  }
1440  alpha_header = bytestream2_get_byte(&gb);
1441  s->alpha_data = avpkt->data + bytestream2_tell(&gb);
1442  s->alpha_data_size = chunk_size - 1;
1443  bytestream2_skip(&gb, s->alpha_data_size);
1444 
1445  filter_m = (alpha_header >> 2) & 0x03;
1446  compression = alpha_header & 0x03;
1447 
1448  if (compression > ALPHA_COMPRESSION_VP8L) {
1449  av_log(avctx, AV_LOG_VERBOSE,
1450  "skipping unsupported ALPHA chunk\n");
1451  } else {
1452  s->has_alpha = 1;
1453  s->alpha_compression = compression;
1454  s->alpha_filter = filter_m;
1455  }
1456 
1457  break;
1458  }
1459  case MKTAG('E', 'X', 'I', 'F'): {
1460  AVBufferRef *exif_buf = NULL;
1461 
1462  if (s->has_exif) {
1463  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra EXIF chunk\n");
1464  goto exif_end;
1465  }
1466 
1467  if (!(vp8x_flags & VP8X_FLAG_EXIF_METADATA))
1468  av_log(avctx, AV_LOG_WARNING,
1469  "EXIF chunk present, but Exif bit not set in the "
1470  "VP8X header\n");
1471 
1472  exif_buf = av_buffer_alloc(chunk_size);
1473  if (!exif_buf) {
1474  av_log(avctx, AV_LOG_WARNING, "unable to allocate EXIF buffer\n");
1475  goto exif_end;
1476  }
1477  s->has_exif = 1;
1478  memcpy(exif_buf->data, gb.buffer, chunk_size);
1479 
1480  ret = ff_decode_exif_attach_buffer(avctx, p, &exif_buf, AV_EXIF_TIFF_HEADER);
1481  if (ret < 0)
1482  av_log(avctx, AV_LOG_WARNING, "unable to attach EXIF buffer\n");
1483 
1484 exif_end:
1485  bytestream2_skip(&gb, chunk_size);
1486  break;
1487  }
1488  case MKTAG('I', 'C', 'C', 'P'): {
1489  AVFrameSideData *sd;
1490 
1491  if (s->has_iccp) {
1492  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra ICCP chunk\n");
1493  bytestream2_skip(&gb, chunk_size);
1494  break;
1495  }
1496  if (!(vp8x_flags & VP8X_FLAG_ICC))
1497  av_log(avctx, AV_LOG_WARNING,
1498  "ICCP chunk present, but ICC Profile bit not set in the "
1499  "VP8X header\n");
1500 
1501  s->has_iccp = 1;
1502 
1503  ret = ff_frame_new_side_data(avctx, p, AV_FRAME_DATA_ICC_PROFILE, chunk_size, &sd);
1504  if (ret < 0)
1505  return ret;
1506 
1507  if (sd) {
1508  bytestream2_get_buffer(&gb, sd->data, chunk_size);
1509  } else {
1510  bytestream2_skip(&gb, chunk_size);
1511  }
1512  break;
1513  }
1514  case MKTAG('A', 'N', 'I', 'M'):
1515  case MKTAG('A', 'N', 'M', 'F'):
1516  av_log(avctx, AV_LOG_WARNING, "skipping unsupported chunk: %s\n",
1517  av_fourcc2str(chunk_type));
1518  bytestream2_skip(&gb, chunk_size);
1519  break;
1520  case MKTAG('X', 'M', 'P', ' '): {
1521  if (s->has_xmp) {
1522  av_log(avctx, AV_LOG_VERBOSE, "Ignoring extra XMP chunk\n");
1523  bytestream2_skip(&gb, chunk_size);
1524  break;
1525  }
1526  if (!(vp8x_flags & VP8X_FLAG_XMP_METADATA))
1527  av_log(avctx, AV_LOG_WARNING,
1528  "XMP chunk present, but XMP bit not set in the "
1529  "VP8X header\n");
1530 
1531  s->has_xmp = 1;
1532 
1533  // there are at least chunk_size bytes left to read
1534  uint8_t *buffer = av_malloc(chunk_size + 1);
1535  if (!buffer)
1536  return AVERROR(ENOMEM);
1537 
1538  bytestream2_get_buffer(&gb, buffer, chunk_size);
1539  buffer[chunk_size] = '\0';
1540 
1541  av_dict_set(&p->metadata, "xmp", buffer, AV_DICT_DONT_STRDUP_VAL);
1542  break;
1543  }
1544  default:
1545  av_log(avctx, AV_LOG_VERBOSE, "skipping unknown chunk: %s\n",
1546  av_fourcc2str(chunk_type));
1547  bytestream2_skip(&gb, chunk_size);
1548  break;
1549  }
1550  }
1551 
1552  if (!*got_frame) {
1553  av_log(avctx, AV_LOG_ERROR, "image data not found\n");
1554  return AVERROR_INVALIDDATA;
1555  }
1556 
1557  return avpkt->size;
1558 }
1559 
1561 {
1562  WebPContext *s = avctx->priv_data;
1563 
1564  s->pkt = av_packet_alloc();
1565  if (!s->pkt)
1566  return AVERROR(ENOMEM);
1567 
1568  return 0;
1569 }
1570 
1572 {
1573  WebPContext *s = avctx->priv_data;
1574 
1575  av_packet_free(&s->pkt);
1576 
1577  if (s->initialized)
1578  return ff_vp8_decode_free(avctx);
1579 
1580  return 0;
1581 }
1582 
1584  .p.name = "webp",
1585  CODEC_LONG_NAME("WebP image"),
1586  .p.type = AVMEDIA_TYPE_VIDEO,
1587  .p.id = AV_CODEC_ID_WEBP,
1588  .priv_data_size = sizeof(WebPContext),
1591  .close = webp_decode_close,
1592  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1593  .caps_internal = FF_CODEC_CAP_ICC_PROFILES |
1595 };
WebPContext::width
int width
Definition: webp.c:211
WebPContext::alpha_frame
AVFrame * alpha_frame
Definition: webp.c:199
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:433
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
ff_vp8_decode_free
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2814
HuffReader::vlc
VLC vlc
Definition: webp.c:176
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
inv_predict_12
static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:870
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:688
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
NUM_SHORT_DISTANCES
#define NUM_SHORT_DISTANCES
Definition: webp.c:72
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(const GetByteContext *g)
Definition: bytestream.h:158
vp8_lossy_decode_frame
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1299
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
color
Definition: vf_paletteuse.c:513
PRED_MODE_AVG_T_AVG_L_TR
@ PRED_MODE_AVG_T_AVG_L_TR
Definition: webp.c:128
ALPHA_FILTER_HORIZONTAL
@ ALPHA_FILTER_HORIZONTAL
Definition: webp.c:110
HuffReader::simple_symbols
uint16_t simple_symbols[2]
Definition: webp.c:179
GetByteContext
Definition: bytestream.h:33
bytestream2_tell
static av_always_inline int bytestream2_tell(const GetByteContext *g)
Definition: bytestream.h:192
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_u8_to_s8
static int8_t ff_u8_to_s8(uint8_t a)
Definition: mathops.h:247
block_bits
static const uint8_t block_bits[]
Definition: imm4.c:104
PRED_MODE_BLACK
@ PRED_MODE_BLACK
Definition: webp.c:123
inv_predict_4
static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:789
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
inv_predict_2
static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:775
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:435
AVFrame::width
int width
Definition: frame.h:507
GET_PIXEL_COMP
#define GET_PIXEL_COMP(frame, x, y, c)
Definition: webp.c:226
AVPacket::data
uint8_t * data
Definition: packet.h:595
PRED_MODE_ADD_SUBTRACT_FULL
@ PRED_MODE_ADD_SUBTRACT_FULL
Definition: webp.c:135
COLOR_INDEXING_TRANSFORM
@ COLOR_INDEXING_TRANSFORM
Definition: webp.c:119
b
#define b
Definition: input.c:43
SUBTRACT_GREEN
@ SUBTRACT_GREEN
Definition: webp.c:118
ImageContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:187
parse_transform_color
static int parse_transform_color(WebPContext *s)
Definition: webp.c:480
FFCodec
Definition: codec_internal.h:127
PRED_MODE_AVG_TL_T
@ PRED_MODE_AVG_TL_T
Definition: webp.c:131
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
thread.h
WebPContext::transforms
enum TransformType transforms[4]
Definition: webp.c:215
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
PRED_MODE_TR
@ PRED_MODE_TR
Definition: webp.c:126
PRED_MODE_AVG_L_T
@ PRED_MODE_AVG_L_T
Definition: webp.c:130
vp8_lossless_decode_frame
static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, const uint8_t *data_start, unsigned int data_size, int is_alpha_chunk)
Definition: webp.c:1091
HuffReader::simple
int simple
Definition: webp.c:177
PRED_MODE_TL
@ PRED_MODE_TL
Definition: webp.c:127
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
WebPContext::alpha_compression
enum AlphaCompression alpha_compression
Definition: webp.c:204
inv_predict_10
static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:846
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
inv_predict_8
static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:826
WebPContext::avctx
AVCodecContext * avctx
Definition: webp.c:201
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
ALPHA_COMPRESSION_NONE
@ ALPHA_COMPRESSION_NONE
Definition: webp.c:104
WebPContext::nb_transforms
int nb_transforms
Definition: webp.c:214
GetBitContext
Definition: get_bits.h:109
update_canvas_size
static void update_canvas_size(AVCodecContext *avctx, int w, int h)
Definition: webp.c:1076
WebPContext::alpha_data_size
int alpha_data_size
Definition: webp.c:207
inv_predict_func
void(* inv_predict_func)(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:895
COLOR_TRANSFORM
@ COLOR_TRANSFORM
Definition: webp.c:117
VP8X_FLAG_EXIF_METADATA
#define VP8X_FLAG_EXIF_METADATA
Definition: webp.c:61
inv_predict_3
static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:782
ff_webp_decoder
const FFCodec ff_webp_decoder
Definition: webp.c:1583
color_transform_delta
static av_always_inline uint8_t color_transform_delta(uint8_t color_pred, uint8_t color)
Definition: webp.c:959
decode_entropy_coded_image
static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role, int w, int h)
Definition: webp.c:554
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
HUFF_IDX_GREEN
@ HUFF_IDX_GREEN
Definition: webp.c:140
WebPContext::has_exif
int has_exif
Definition: webp.c:208
read_huffman_code_normal
static int read_huffman_code_normal(WebPContext *s, HuffReader *hc, int alphabet_size)
Definition: webp.c:317
WebPContext::has_alpha
int has_alpha
Definition: webp.c:203
PredictionMode
PredictionMode
Definition: webp.c:122
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:69
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
av_cold
#define av_cold
Definition: attributes.h:119
ImageContext::frame
AVFrame * frame
Definition: webp.c:184
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1650
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:650
inverse_prediction
static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
Definition: webp.c:906
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
TransformType
TransformType
Definition: webp.c:115
PRED_MODE_AVG_T_TR
@ PRED_MODE_AVG_T_TR
Definition: webp.c:132
transform
static const int8_t transform[32][32]
Definition: dsp.c:27
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1044
HUFFMAN_CODES_PER_META_CODE
#define HUFFMAN_CODES_PER_META_CODE
Definition: webp.c:68
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
code_length_code_order
static const uint8_t code_length_code_order[NUM_CODE_LENGTH_CODES]
Definition: webp.c:81
color_cache_put
static av_always_inline void color_cache_put(ImageContext *img, uint32_t c)
Definition: webp.c:548
bits
uint8_t bits
Definition: vp3data.h:128
NUM_DISTANCE_CODES
#define NUM_DISTANCE_CODES
Definition: webp.c:71
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
inv_predict_11
static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:856
NUM_CODE_LENGTH_CODES
#define NUM_CODE_LENGTH_CODES
Definition: webp.c:67
ImageContext
Definition: webp.c:182
decode.h
get_bits.h
ImageContext::color_cache
uint32_t * color_cache
Definition: webp.c:186
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
GET_PIXEL
#define GET_PIXEL(frame, x, y)
Definition: webp.c:223
ImageContext::is_alpha_primary
int is_alpha_primary
Definition: webp.c:193
PRED_MODE_AVG_L_TL
@ PRED_MODE_AVG_L_TL
Definition: webp.c:129
webp_decode_close
static av_cold int webp_decode_close(AVCodecContext *avctx)
Definition: webp.c:1571
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
ImageContext::huffman_groups
HuffReader * huffman_groups
Definition: webp.c:188
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
ff_vp8_decode_init
int ff_vp8_decode_init(AVCodecContext *avctx)
apply_subtract_green_transform
static int apply_subtract_green_transform(WebPContext *s)
Definition: webp.c:989
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
HuffReader::nb_symbols
int nb_symbols
Definition: webp.c:178
WebPContext::height
int height
Definition: webp.c:212
ALPHA_FILTER_NONE
@ ALPHA_FILTER_NONE
Definition: webp.c:109
clamp_add_subtract_half
static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
Definition: webp.c:879
HUFF_IDX_DIST
@ HUFF_IDX_DIST
Definition: webp.c:144
NULL
#define NULL
Definition: coverity.c:32
exif_internal.h
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
inverse_predict
static const inv_predict_func inverse_predict[14]
Definition: webp.c:899
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
tiff_common.h
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
ImageContext::color_cache_bits
int color_cache_bits
Definition: webp.c:185
parse_transform_color_indexing
static int parse_transform_color_indexing(WebPContext *s)
Definition: webp.c:496
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
webp_decode_init
static av_cold int webp_decode_init(AVCodecContext *avctx)
Definition: webp.c:1560
WebPContext::v
VP8Context v
Definition: webp.c:197
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
alphabet_sizes
static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE]
Definition: webp.c:75
NUM_LITERAL_CODES
#define NUM_LITERAL_CODES
Definition: webp.c:69
IMAGE_ROLE_PREDICTOR
@ IMAGE_ROLE_PREDICTOR
Definition: webp.c:163
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:645
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp8.h
alpha_inverse_prediction
static void alpha_inverse_prediction(AVFrame *frame, enum AlphaFilter m)
Definition: webp.c:1203
AV_WB32
#define AV_WB32(p, v)
Definition: intreadwrite.h:415
IMAGE_ROLE_COLOR_INDEXING
@ IMAGE_ROLE_COLOR_INDEXING
Definition: webp.c:170
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:551
inv_predict_0
static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:761
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
IMAGE_ROLE_NB
@ IMAGE_ROLE_NB
Definition: webp.c:172
VP8X_FLAG_ICC
#define VP8X_FLAG_ICC
Definition: webp.c:63
AVPacket::size
int size
Definition: packet.h:596
ff_decode_exif_attach_buffer
int ff_decode_exif_attach_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferRef **pbuf, enum AVExifHeaderMode header_mode)
Attach the data buffer to the frame.
Definition: decode.c:2484
codec_internal.h
AlphaCompression
AlphaCompression
Definition: webp.c:103
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
PREDICTOR_TRANSFORM
@ PREDICTOR_TRANSFORM
Definition: webp.c:116
ImageContext::size_reduction
int size_reduction
Definition: webp.c:192
size
int size
Definition: twinvq_data.h:10344
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2172
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AVFrameSideData::data
uint8_t * data
Definition: frame.h:292
ImageContext::role
enum ImageRole role
Definition: webp.c:183
decode_entropy_image
static int decode_entropy_image(WebPContext *s)
Definition: webp.c:434
apply_color_transform
static int apply_color_transform(WebPContext *s)
Definition: webp.c:965
VP8X_FLAG_ALPHA
#define VP8X_FLAG_ALPHA
Definition: webp.c:62
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
img
#define img
Definition: vf_colormatrix.c:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
HuffReader
Definition: webp.c:175
parse_transform_predictor
static int parse_transform_predictor(WebPContext *s)
Definition: webp.c:464
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
@ PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
Definition: webp.c:133
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
ALPHA_FILTER_GRADIENT
@ ALPHA_FILTER_GRADIENT
Definition: webp.c:112
WebPContext::nb_huffman_groups
int nb_huffman_groups
Definition: webp.c:219
inv_predict_5
static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:796
WebPContext::reduced_width
int reduced_width
Definition: webp.c:218
NUM_LENGTH_CODES
#define NUM_LENGTH_CODES
Definition: webp.c:70
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
WebPContext::pkt
AVPacket * pkt
Definition: webp.c:200
AlphaFilter
AlphaFilter
Definition: webp.c:108
PRED_MODE_SELECT
@ PRED_MODE_SELECT
Definition: webp.c:134
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
lz77_distance_offsets
static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2]
Definition: webp.c:85
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
WebPContext::gb
GetBitContext gb
Definition: webp.c:198
apply_predictor_transform
static int apply_predictor_transform(WebPContext *s)
Definition: webp.c:928
av_always_inline
#define av_always_inline
Definition: attributes.h:76
HuffmanIndex
HuffmanIndex
Definition: webp.c:139
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:634
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AV_CODEC_ID_WEBP
@ AV_CODEC_ID_WEBP
Definition: codec_id.h:226
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
inv_predict_7
static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:816
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
huff_reader_get_symbol
static int huff_reader_get_symbol(HuffReader *r, GetBitContext *gb)
Definition: webp.c:246
VP8X_FLAG_XMP_METADATA
#define VP8X_FLAG_XMP_METADATA
Definition: webp.c:60
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
avcodec.h
inv_predict_13
static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:886
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
WebPContext::image
ImageContext image[IMAGE_ROLE_NB]
Definition: webp.c:220
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_vp8_decode_frame
int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
inv_predict_6
static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:806
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
U
#define U(x)
Definition: vpx_arith.h:37
vp8_lossy_decode_alpha
static int vp8_lossy_decode_alpha(AVCodecContext *avctx, AVFrame *p, const uint8_t *data_start, unsigned int data_size)
Definition: webp.c:1246
AVCodecContext
main external API structure.
Definition: avcodec.h:439
HUFF_IDX_BLUE
@ HUFF_IDX_BLUE
Definition: webp.c:142
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
IMAGE_ROLE_ENTROPY
@ IMAGE_ROLE_ENTROPY
Definition: webp.c:159
VLC
Definition: vlc.h:50
webp_decode_frame
static int webp_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: webp.c:1340
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
image_ctx_free
static void image_ctx_free(ImageContext *img)
Definition: webp.c:229
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
WebPContext::has_xmp
int has_xmp
Definition: webp.c:210
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
WebPContext::initialized
int initialized
Definition: webp.c:202
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
apply_color_indexing_transform
static int apply_color_indexing_transform(WebPContext *s)
Definition: webp.c:1004
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
WebPContext::alpha_data
const uint8_t * alpha_data
Definition: webp.c:206
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:290
VLC_INIT_OUTPUT_LE
#define VLC_INIT_OUTPUT_LE
Definition: vlc.h:196
MAX_HUFFMAN_CODE_LENGTH
#define MAX_HUFFMAN_CODE_LENGTH
Definition: webp.c:73
ALPHA_FILTER_VERTICAL
@ ALPHA_FILTER_VERTICAL
Definition: webp.c:111
w
uint8_t w
Definition: llvidencdsp.c:39
PARSE_BLOCK_SIZE
#define PARSE_BLOCK_SIZE(w, h)
Definition: webp.c:428
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
PRED_MODE_L
@ PRED_MODE_L
Definition: webp.c:124
WebPContext
Definition: webp.c:196
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AVPacket
This structure stores compressed data.
Definition: packet.h:572
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
VP8Context
Definition: vp8.h:161
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
ImageRole
ImageRole
Definition: webp.c:153
bytestream.h
distance
static float distance(float x, float y, int band)
Definition: nellymoserenc.c:231
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1649
read_huffman_code_simple
static void read_huffman_code_simple(WebPContext *s, HuffReader *hc)
Definition: webp.c:302
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
HUFF_IDX_ALPHA
@ HUFF_IDX_ALPHA
Definition: webp.c:143
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
huff_reader_build_canonical
static int huff_reader_build_canonical(HuffReader *r, const uint8_t *code_lengths, uint16_t len_counts[MAX_HUFFMAN_CODE_LENGTH+1], uint8_t lens[], uint16_t syms[], int alphabet_size, void *logctx)
Definition: webp.c:257
h
h
Definition: vp9dsp_template.c:2070
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
WebPContext::has_iccp
int has_iccp
Definition: webp.c:209
get_huffman_group
static HuffReader * get_huffman_group(WebPContext *s, ImageContext *img, int x, int y)
Definition: webp.c:531
width
#define width
Definition: dsp.h:89
xi
#define xi(width, name, var, range_min, range_max, subs,...)
Definition: cbs_h264.c:190
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:671
inv_predict_9
static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:836
ALPHA_COMPRESSION_VP8L
@ ALPHA_COMPRESSION_VP8L
Definition: webp.c:105
inv_predict_1
static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Definition: webp.c:768
PRED_MODE_T
@ PRED_MODE_T
Definition: webp.c:125
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
WebPContext::alpha_filter
enum AlphaFilter alpha_filter
Definition: webp.c:205
HUFF_IDX_RED
@ HUFF_IDX_RED
Definition: webp.c:141
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347
IMAGE_ROLE_ARGB
@ IMAGE_ROLE_ARGB
Definition: webp.c:155
PRED_MODE_ADD_SUBTRACT_HALF
@ PRED_MODE_ADD_SUBTRACT_HALF
Definition: webp.c:136
IMAGE_ROLE_COLOR_TRANSFORM
@ IMAGE_ROLE_COLOR_TRANSFORM
Definition: webp.c:167