FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/display.h"
36 #include "libavutil/emms.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/avassert.h"
39 #include "libavutil/opt.h"
40 #include "avcodec.h"
41 #include "blockdsp.h"
42 #include "codec_internal.h"
43 #include "copy_block.h"
44 #include "decode.h"
45 #include "hwaccel_internal.h"
46 #include "hwconfig.h"
47 #include "idctdsp.h"
48 #include "internal.h"
49 #include "jpegtables.h"
50 #include "mjpeg.h"
51 #include "mjpegdec.h"
52 #include "jpeglsdec.h"
53 #include "profiles.h"
54 #include "put_bits.h"
55 #include "tiff.h"
56 #include "exif.h"
57 #include "bytestream.h"
58 #include "tiff_common.h"
59 
60 
62 {
63  static const struct {
64  int class;
65  int index;
66  const uint8_t *bits;
67  const uint8_t *values;
68  int length;
69  } ht[] = {
71  ff_mjpeg_val_dc, 12 },
73  ff_mjpeg_val_dc, 12 },
82  };
83  int i, ret;
84 
85  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
86  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
87  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
88  ht[i].bits, ht[i].values,
89  ht[i].class == 1, s->avctx);
90  if (ret < 0)
91  return ret;
92 
93  if (ht[i].class < 2) {
94  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
95  ht[i].bits + 1, 16);
96  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
97  ht[i].values, ht[i].length);
98  }
99  }
100 
101  return 0;
102 }
103 
104 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
105 {
106  s->buggy_avid = 1;
107  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
108  s->interlace_polarity = 1;
109  if (len > 14 && buf[12] == 2) /* 2 - PAL */
110  s->interlace_polarity = 0;
111  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
112  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
113 }
114 
115 static void init_idct(AVCodecContext *avctx)
116 {
117  MJpegDecodeContext *s = avctx->priv_data;
118 
119  ff_idctdsp_init(&s->idsp, avctx);
120  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
121  s->idsp.idct_permutation);
122 }
123 
125 {
126  MJpegDecodeContext *s = avctx->priv_data;
127  int ret;
128 
129  if (!s->picture_ptr) {
130  s->picture = av_frame_alloc();
131  if (!s->picture)
132  return AVERROR(ENOMEM);
133  s->picture_ptr = s->picture;
134  }
135 
136  s->avctx = avctx;
137  ff_blockdsp_init(&s->bdsp);
138  ff_hpeldsp_init(&s->hdsp, avctx->flags);
139  init_idct(avctx);
140  s->buffer_size = 0;
141  s->buffer = NULL;
142  s->start_code = -1;
143  s->first_picture = 1;
144  s->got_picture = 0;
145  s->orig_height = avctx->coded_height;
147  avctx->colorspace = AVCOL_SPC_BT470BG;
148  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
149 
150  if ((ret = init_default_huffman_tables(s)) < 0)
151  return ret;
152 
153  if (s->extern_huff) {
154  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
155  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
156  return ret;
157  if (ff_mjpeg_decode_dht(s)) {
158  av_log(avctx, AV_LOG_ERROR,
159  "error using external huffman table, switching back to internal\n");
160  if ((ret = init_default_huffman_tables(s)) < 0)
161  return ret;
162  }
163  }
164  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
165  s->interlace_polarity = 1; /* bottom field first */
166  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
167  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
168  if (avctx->codec_tag == AV_RL32("MJPG"))
169  s->interlace_polarity = 1;
170  }
171 
172  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
173  if (avctx->extradata_size >= 4)
174  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
175 
176  if (s->smv_frames_per_jpeg <= 0) {
177  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
178  return AVERROR_INVALIDDATA;
179  }
180 
181  s->smv_frame = av_frame_alloc();
182  if (!s->smv_frame)
183  return AVERROR(ENOMEM);
184  } else if (avctx->extradata_size > 8
185  && AV_RL32(avctx->extradata) == 0x2C
186  && AV_RL32(avctx->extradata+4) == 0x18) {
187  parse_avid(s, avctx->extradata, avctx->extradata_size);
188  }
189 
190  if (avctx->codec->id == AV_CODEC_ID_AMV)
191  s->flipped = 1;
192 
193  return 0;
194 }
195 
196 
197 /* quantize tables */
199 {
200  int len, index, i;
201 
202  len = get_bits(&s->gb, 16) - 2;
203 
204  if (8*len > get_bits_left(&s->gb)) {
205  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
206  return AVERROR_INVALIDDATA;
207  }
208 
209  while (len >= 65) {
210  int pr = get_bits(&s->gb, 4);
211  if (pr > 1) {
212  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
213  return AVERROR_INVALIDDATA;
214  }
215  index = get_bits(&s->gb, 4);
216  if (index >= 4)
217  return -1;
218  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
219  /* read quant table */
220  for (i = 0; i < 64; i++) {
221  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
222  if (s->quant_matrixes[index][i] == 0) {
223  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
224  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
225  if (s->avctx->err_recognition & AV_EF_EXPLODE)
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  for (i = 0; i < n; i++) {
274  v = get_bits(&s->gb, 8);
275  val_table[i] = v;
276  }
277  len -= n;
278 
279  /* build VLC and flush previous vlc if present */
280  ff_vlc_free(&s->vlcs[class][index]);
281  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
282  class, index, n);
283  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
284  val_table, class > 0, s->avctx)) < 0)
285  return ret;
286 
287  if (class > 0) {
288  ff_vlc_free(&s->vlcs[2][index]);
289  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
290  val_table, 0, s->avctx)) < 0)
291  return ret;
292  }
293 
294  for (i = 0; i < 16; i++)
295  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
296  for (i = 0; i < 256; i++)
297  s->raw_huffman_values[class][index][i] = val_table[i];
298  }
299  return 0;
300 }
301 
303 {
304  int len, nb_components, i, width, height, bits, ret, size_change;
305  unsigned pix_fmt_id;
306  int h_count[MAX_COMPONENTS] = { 0 };
307  int v_count[MAX_COMPONENTS] = { 0 };
308 
309  s->cur_scan = 0;
310  memset(s->upscale_h, 0, sizeof(s->upscale_h));
311  memset(s->upscale_v, 0, sizeof(s->upscale_v));
312 
313  len = get_bits(&s->gb, 16);
314  bits = get_bits(&s->gb, 8);
315 
316  if (bits > 16 || bits < 1) {
317  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
318  return AVERROR_INVALIDDATA;
319  }
320 
321  if (s->avctx->bits_per_raw_sample != bits) {
322  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
323  s->avctx->bits_per_raw_sample = bits;
324  init_idct(s->avctx);
325  }
326  if (s->pegasus_rct)
327  bits = 9;
328  if (bits == 9 && !s->pegasus_rct)
329  s->rct = 1; // FIXME ugly
330 
331  if(s->lossless && s->avctx->lowres){
332  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
333  return -1;
334  }
335 
336  height = get_bits(&s->gb, 16);
337  width = get_bits(&s->gb, 16);
338 
339  // HACK for odd_height.mov
340  if (s->interlaced && s->width == width && s->height == height + 1)
341  height= s->height;
342 
343  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
344  if (av_image_check_size(width, height, 0, s->avctx) < 0)
345  return AVERROR_INVALIDDATA;
346  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
347  return AVERROR_INVALIDDATA;
348 
349  nb_components = get_bits(&s->gb, 8);
350  if (nb_components <= 0 ||
351  nb_components > MAX_COMPONENTS)
352  return -1;
353  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
354  if (nb_components != s->nb_components) {
355  av_log(s->avctx, AV_LOG_ERROR,
356  "nb_components changing in interlaced picture\n");
357  return AVERROR_INVALIDDATA;
358  }
359  }
360  if (s->ls && !(bits <= 8 || nb_components == 1)) {
362  "JPEG-LS that is not <= 8 "
363  "bits/component or 16-bit gray");
364  return AVERROR_PATCHWELCOME;
365  }
366  if (len != 8 + 3 * nb_components) {
367  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
368  return AVERROR_INVALIDDATA;
369  }
370 
371  s->nb_components = nb_components;
372  s->h_max = 1;
373  s->v_max = 1;
374  for (i = 0; i < nb_components; i++) {
375  /* component id */
376  s->component_id[i] = get_bits(&s->gb, 8);
377  h_count[i] = get_bits(&s->gb, 4);
378  v_count[i] = get_bits(&s->gb, 4);
379  /* compute hmax and vmax (only used in interleaved case) */
380  if (h_count[i] > s->h_max)
381  s->h_max = h_count[i];
382  if (v_count[i] > s->v_max)
383  s->v_max = v_count[i];
384  s->quant_index[i] = get_bits(&s->gb, 8);
385  if (s->quant_index[i] >= 4) {
386  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
387  return AVERROR_INVALIDDATA;
388  }
389  if (!h_count[i] || !v_count[i]) {
390  av_log(s->avctx, AV_LOG_ERROR,
391  "Invalid sampling factor in component %d %d:%d\n",
392  i, h_count[i], v_count[i]);
393  return AVERROR_INVALIDDATA;
394  }
395 
396  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
397  i, h_count[i], v_count[i],
398  s->component_id[i], s->quant_index[i]);
399  }
400  if ( nb_components == 4
401  && s->component_id[0] == 'C'
402  && s->component_id[1] == 'M'
403  && s->component_id[2] == 'Y'
404  && s->component_id[3] == 'K')
405  s->adobe_transform = 0;
406 
407  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
408  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
409  return AVERROR_PATCHWELCOME;
410  }
411 
412  if (s->bayer) {
413  if (nb_components == 2) {
414  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
415  width stored in their SOF3 markers is the width of each one. We only output
416  a single component, therefore we need to adjust the output image width. We
417  handle the deinterleaving (but not the debayering) in this file. */
418  width *= 2;
419  }
420  /* They can also contain 1 component, which is double the width and half the height
421  of the final image (rows are interleaved). We don't handle the decoding in this
422  file, but leave that to the TIFF/DNG decoder. */
423  }
424 
425  /* if different size, realloc/alloc picture */
426  if (width != s->width || height != s->height || bits != s->bits ||
427  memcmp(s->h_count, h_count, sizeof(h_count)) ||
428  memcmp(s->v_count, v_count, sizeof(v_count))) {
429  size_change = 1;
430 
431  s->width = width;
432  s->height = height;
433  s->bits = bits;
434  memcpy(s->h_count, h_count, sizeof(h_count));
435  memcpy(s->v_count, v_count, sizeof(v_count));
436  s->interlaced = 0;
437  s->got_picture = 0;
438 
439  /* test interlaced mode */
440  if (s->first_picture &&
441  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
442  s->orig_height != 0 &&
443  s->height < ((s->orig_height * 3) / 4)) {
444  s->interlaced = 1;
445  s->bottom_field = s->interlace_polarity;
446  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
447  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
448  height *= 2;
449  }
450 
451  ret = ff_set_dimensions(s->avctx, width, height);
452  if (ret < 0)
453  return ret;
454 
455  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
456  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
457  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
458  s->orig_height < height)
459  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
460 
461  s->first_picture = 0;
462  } else {
463  size_change = 0;
464  }
465 
466  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
467  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
468  if (s->avctx->height <= 0)
469  return AVERROR_INVALIDDATA;
470  }
471 
472  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
473  if (s->progressive) {
474  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
475  return AVERROR_INVALIDDATA;
476  }
477  } else {
478  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
479  s->rgb = 1;
480  else if (!s->lossless)
481  s->rgb = 0;
482  /* XXX: not complete test ! */
483  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
484  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
485  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
486  (s->h_count[3] << 4) | s->v_count[3];
487  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
488  /* NOTE we do not allocate pictures large enough for the possible
489  * padding of h/v_count being 4 */
490  if (!(pix_fmt_id & 0xD0D0D0D0))
491  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
492  if (!(pix_fmt_id & 0x0D0D0D0D))
493  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
494 
495  for (i = 0; i < 8; i++) {
496  int j = 6 + (i&1) - (i&6);
497  int is = (pix_fmt_id >> (4*i)) & 0xF;
498  int js = (pix_fmt_id >> (4*j)) & 0xF;
499 
500  if (is == 1 && js != 2 && (i < 2 || i > 5))
501  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
502  if (is == 1 && js != 2 && (i < 2 || i > 5))
503  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
504 
505  if (is == 1 && js == 2) {
506  if (i & 1) s->upscale_h[j/2] = 1;
507  else s->upscale_v[j/2] = 1;
508  }
509  }
510 
511  if (s->bayer) {
512  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
513  goto unk_pixfmt;
514  }
515 
516  switch (pix_fmt_id) {
517  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
518  if (!s->bayer)
519  goto unk_pixfmt;
520  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
521  break;
522  case 0x11111100:
523  if (s->rgb)
524  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
525  else {
526  if ( s->adobe_transform == 0
527  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
528  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
529  } else {
530  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
531  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
532  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
533  }
534  }
535  av_assert0(s->nb_components == 3);
536  break;
537  case 0x11111111:
538  if (s->rgb)
539  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
540  else {
541  if (s->adobe_transform == 0 && s->bits <= 8) {
542  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
543  } else {
544  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
545  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
546  }
547  }
548  av_assert0(s->nb_components == 4);
549  break;
550  case 0x11412100:
551  if (s->bits > 8)
552  goto unk_pixfmt;
553  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
554  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
555  s->upscale_h[0] = 4;
556  s->upscale_h[1] = 0;
557  s->upscale_h[2] = 1;
558  } else {
559  goto unk_pixfmt;
560  }
561  break;
562  case 0x22111122:
563  case 0x22111111:
564  if (s->adobe_transform == 0 && s->bits <= 8) {
565  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
566  s->upscale_v[1] = s->upscale_v[2] = 1;
567  s->upscale_h[1] = s->upscale_h[2] = 1;
568  } else if (s->adobe_transform == 2 && s->bits <= 8) {
569  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
570  s->upscale_v[1] = s->upscale_v[2] = 1;
571  s->upscale_h[1] = s->upscale_h[2] = 1;
572  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
573  } else {
574  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
575  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
576  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
577  }
578  av_assert0(s->nb_components == 4);
579  break;
580  case 0x12121100:
581  case 0x22122100:
582  case 0x21211100:
583  case 0x21112100:
584  case 0x22211200:
585  case 0x22221100:
586  case 0x22112200:
587  case 0x11222200:
588  if (s->bits > 8)
589  goto unk_pixfmt;
590  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
591  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
592  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
593  } else {
594  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
595  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
596  }
597  break;
598  case 0x11000000:
599  case 0x13000000:
600  case 0x14000000:
601  case 0x31000000:
602  case 0x33000000:
603  case 0x34000000:
604  case 0x41000000:
605  case 0x43000000:
606  case 0x44000000:
607  if(s->bits <= 8)
608  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
609  else
610  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
611  break;
612  case 0x12111100:
613  case 0x14121200:
614  case 0x14111100:
615  case 0x22211100:
616  case 0x22112100:
617  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
618  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
619  else
620  goto unk_pixfmt;
621  s->upscale_v[1] = s->upscale_v[2] = 1;
622  } else {
623  if (pix_fmt_id == 0x14111100)
624  s->upscale_v[1] = s->upscale_v[2] = 1;
625  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
626  else
627  goto unk_pixfmt;
628  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
629  }
630  break;
631  case 0x21111100:
632  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
633  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
634  else
635  goto unk_pixfmt;
636  s->upscale_h[1] = s->upscale_h[2] = 1;
637  } else {
638  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
639  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
640  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
641  }
642  break;
643  case 0x11311100:
644  if (s->bits > 8)
645  goto unk_pixfmt;
646  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
647  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
648  else
649  goto unk_pixfmt;
650  s->upscale_h[0] = s->upscale_h[2] = 2;
651  break;
652  case 0x31111100:
653  if (s->bits > 8)
654  goto unk_pixfmt;
655  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
656  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
657  s->upscale_h[1] = s->upscale_h[2] = 2;
658  break;
659  case 0x22121100:
660  case 0x22111200:
661  case 0x41211100:
662  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
663  else
664  goto unk_pixfmt;
665  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
666  break;
667  case 0x22111100:
668  case 0x23111100:
669  case 0x42111100:
670  case 0x24111100:
671  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
672  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
673  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
674  if (pix_fmt_id == 0x42111100) {
675  if (s->bits > 8)
676  goto unk_pixfmt;
677  s->upscale_h[1] = s->upscale_h[2] = 1;
678  } else if (pix_fmt_id == 0x24111100) {
679  if (s->bits > 8)
680  goto unk_pixfmt;
681  s->upscale_v[1] = s->upscale_v[2] = 1;
682  } else if (pix_fmt_id == 0x23111100) {
683  if (s->bits > 8)
684  goto unk_pixfmt;
685  s->upscale_v[1] = s->upscale_v[2] = 2;
686  }
687  break;
688  case 0x41111100:
689  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
690  else
691  goto unk_pixfmt;
692  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
693  break;
694  default:
695  unk_pixfmt:
696  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
697  memset(s->upscale_h, 0, sizeof(s->upscale_h));
698  memset(s->upscale_v, 0, sizeof(s->upscale_v));
699  return AVERROR_PATCHWELCOME;
700  }
701  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
702  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
703  return AVERROR_PATCHWELCOME;
704  }
705  if (s->ls) {
706  memset(s->upscale_h, 0, sizeof(s->upscale_h));
707  memset(s->upscale_v, 0, sizeof(s->upscale_v));
708  if (s->nb_components == 3) {
709  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
710  } else if (s->nb_components != 1) {
711  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
712  return AVERROR_PATCHWELCOME;
713  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
714  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
715  else if (s->bits <= 8)
716  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
717  else
718  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
719  }
720 
721  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
722  if (!s->pix_desc) {
723  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
724  return AVERROR_BUG;
725  }
726 
727  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
728  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
729  } else {
730  enum AVPixelFormat pix_fmts[] = {
731 #if CONFIG_MJPEG_NVDEC_HWACCEL
733 #endif
734 #if CONFIG_MJPEG_VAAPI_HWACCEL
736 #endif
737  s->avctx->pix_fmt,
739  };
740  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
741  if (s->hwaccel_pix_fmt < 0)
742  return AVERROR(EINVAL);
743 
744  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
745  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
746  }
747 
748  if (s->avctx->skip_frame == AVDISCARD_ALL) {
749  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
750  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
751  s->got_picture = 1;
752  return 0;
753  }
754 
755  av_frame_unref(s->picture_ptr);
756  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
757  return -1;
758  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
759  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
760  s->got_picture = 1;
761 
762  // Lets clear the palette to avoid leaving uninitialized values in it
763  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
764  memset(s->picture_ptr->data[1], 0, 1024);
765 
766  for (i = 0; i < 4; i++)
767  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
768 
769  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
770  s->width, s->height, s->linesize[0], s->linesize[1],
771  s->interlaced, s->avctx->height);
772 
773  }
774 
775  if ((s->rgb && !s->lossless && !s->ls) ||
776  (!s->rgb && s->ls && s->nb_components > 1) ||
777  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
778  ) {
779  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
780  return AVERROR_PATCHWELCOME;
781  }
782 
783  /* totally blank picture as progressive JPEG will only add details to it */
784  if (s->progressive) {
785  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
786  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
787  for (i = 0; i < s->nb_components; i++) {
788  int size = bw * bh * s->h_count[i] * s->v_count[i];
789  av_freep(&s->blocks[i]);
790  av_freep(&s->last_nnz[i]);
791  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
792  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
793  if (!s->blocks[i] || !s->last_nnz[i])
794  return AVERROR(ENOMEM);
795  s->block_stride[i] = bw * s->h_count[i];
796  }
797  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
798  }
799 
800  if (s->avctx->hwaccel) {
801  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
802  s->hwaccel_picture_private =
804  if (!s->hwaccel_picture_private)
805  return AVERROR(ENOMEM);
806 
807  ret = hwaccel->start_frame(s->avctx, s->raw_image_buffer,
808  s->raw_image_buffer_size);
809  if (ret < 0)
810  return ret;
811  }
812 
813  return 0;
814 }
815 
816 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
817 {
818  int code;
819  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
820  if (code < 0 || code > 16) {
821  av_log(s->avctx, AV_LOG_WARNING,
822  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
823  0, dc_index, &s->vlcs[0][dc_index]);
824  return 0xfffff;
825  }
826 
827  if (code)
828  return get_xbits(&s->gb, code);
829  else
830  return 0;
831 }
832 
833 /* decode block and dequantize */
834 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
835  int dc_index, int ac_index, uint16_t *quant_matrix)
836 {
837  int code, i, j, level, val;
838 
839  /* DC coef */
840  val = mjpeg_decode_dc(s, dc_index);
841  if (val == 0xfffff) {
842  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
843  return AVERROR_INVALIDDATA;
844  }
845  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
846  val = av_clip_int16(val);
847  s->last_dc[component] = val;
848  block[0] = val;
849  /* AC coefs */
850  i = 0;
851  {OPEN_READER(re, &s->gb);
852  do {
853  UPDATE_CACHE(re, &s->gb);
854  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
855 
856  i += ((unsigned)code) >> 4;
857  code &= 0xf;
858  if (code) {
859  if (code > MIN_CACHE_BITS - 16)
860  UPDATE_CACHE(re, &s->gb);
861 
862  {
863  int cache = GET_CACHE(re, &s->gb);
864  int sign = (~cache) >> 31;
865  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
866  }
867 
868  LAST_SKIP_BITS(re, &s->gb, code);
869 
870  if (i > 63) {
871  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
872  return AVERROR_INVALIDDATA;
873  }
874  j = s->permutated_scantable[i];
875  block[j] = level * quant_matrix[i];
876  }
877  } while (i < 63);
878  CLOSE_READER(re, &s->gb);}
879 
880  return 0;
881 }
882 
884  int component, int dc_index,
885  uint16_t *quant_matrix, int Al)
886 {
887  unsigned val;
888  s->bdsp.clear_block(block);
889  val = mjpeg_decode_dc(s, dc_index);
890  if (val == 0xfffff) {
891  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
892  return AVERROR_INVALIDDATA;
893  }
894  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
895  s->last_dc[component] = val;
896  block[0] = val;
897  return 0;
898 }
899 
900 /* decode block and dequantize - progressive JPEG version */
902  uint8_t *last_nnz, int ac_index,
903  uint16_t *quant_matrix,
904  int ss, int se, int Al, int *EOBRUN)
905 {
906  int code, i, j, val, run;
907  unsigned level;
908 
909  if (*EOBRUN) {
910  (*EOBRUN)--;
911  return 0;
912  }
913 
914  {
915  OPEN_READER(re, &s->gb);
916  for (i = ss; ; i++) {
917  UPDATE_CACHE(re, &s->gb);
918  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
919 
920  run = ((unsigned) code) >> 4;
921  code &= 0xF;
922  if (code) {
923  i += run;
924  if (code > MIN_CACHE_BITS - 16)
925  UPDATE_CACHE(re, &s->gb);
926 
927  {
928  int cache = GET_CACHE(re, &s->gb);
929  int sign = (~cache) >> 31;
930  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
931  }
932 
933  LAST_SKIP_BITS(re, &s->gb, code);
934 
935  if (i >= se) {
936  if (i == se) {
937  j = s->permutated_scantable[se];
938  block[j] = level * (quant_matrix[se] << Al);
939  break;
940  }
941  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
942  return AVERROR_INVALIDDATA;
943  }
944  j = s->permutated_scantable[i];
945  block[j] = level * (quant_matrix[i] << Al);
946  } else {
947  if (run == 0xF) {// ZRL - skip 15 coefficients
948  i += 15;
949  if (i >= se) {
950  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
951  return AVERROR_INVALIDDATA;
952  }
953  } else {
954  val = (1 << run);
955  if (run) {
956  UPDATE_CACHE(re, &s->gb);
957  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
958  LAST_SKIP_BITS(re, &s->gb, run);
959  }
960  *EOBRUN = val - 1;
961  break;
962  }
963  }
964  }
965  CLOSE_READER(re, &s->gb);
966  }
967 
968  if (i > *last_nnz)
969  *last_nnz = i;
970 
971  return 0;
972 }
973 
974 #define REFINE_BIT(j) { \
975  UPDATE_CACHE(re, &s->gb); \
976  sign = block[j] >> 15; \
977  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
978  ((quant_matrix[i] ^ sign) - sign) << Al; \
979  LAST_SKIP_BITS(re, &s->gb, 1); \
980 }
981 
982 #define ZERO_RUN \
983 for (; ; i++) { \
984  if (i > last) { \
985  i += run; \
986  if (i > se) { \
987  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
988  return -1; \
989  } \
990  break; \
991  } \
992  j = s->permutated_scantable[i]; \
993  if (block[j]) \
994  REFINE_BIT(j) \
995  else if (run-- == 0) \
996  break; \
997 }
998 
999 /* decode block and dequantize - progressive JPEG refinement pass */
1001  uint8_t *last_nnz,
1002  int ac_index, uint16_t *quant_matrix,
1003  int ss, int se, int Al, int *EOBRUN)
1004 {
1005  int code, i = ss, j, sign, val, run;
1006  int last = FFMIN(se, *last_nnz);
1007 
1008  OPEN_READER(re, &s->gb);
1009  if (*EOBRUN) {
1010  (*EOBRUN)--;
1011  } else {
1012  for (; ; i++) {
1013  UPDATE_CACHE(re, &s->gb);
1014  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1015 
1016  if (code & 0xF) {
1017  run = ((unsigned) code) >> 4;
1018  UPDATE_CACHE(re, &s->gb);
1019  val = SHOW_UBITS(re, &s->gb, 1);
1020  LAST_SKIP_BITS(re, &s->gb, 1);
1021  ZERO_RUN;
1022  j = s->permutated_scantable[i];
1023  val--;
1024  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1025  if (i == se) {
1026  if (i > *last_nnz)
1027  *last_nnz = i;
1028  CLOSE_READER(re, &s->gb);
1029  return 0;
1030  }
1031  } else {
1032  run = ((unsigned) code) >> 4;
1033  if (run == 0xF) {
1034  ZERO_RUN;
1035  } else {
1036  val = run;
1037  run = (1 << run);
1038  if (val) {
1039  UPDATE_CACHE(re, &s->gb);
1040  run += SHOW_UBITS(re, &s->gb, val);
1041  LAST_SKIP_BITS(re, &s->gb, val);
1042  }
1043  *EOBRUN = run - 1;
1044  break;
1045  }
1046  }
1047  }
1048 
1049  if (i > *last_nnz)
1050  *last_nnz = i;
1051  }
1052 
1053  for (; i <= last; i++) {
1054  j = s->permutated_scantable[i];
1055  if (block[j])
1056  REFINE_BIT(j)
1057  }
1058  CLOSE_READER(re, &s->gb);
1059 
1060  return 0;
1061 }
1062 #undef REFINE_BIT
1063 #undef ZERO_RUN
1064 
1065 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1066 {
1067  int i;
1068  int reset = 0;
1069 
1070  if (s->restart_interval) {
1071  s->restart_count--;
1072  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1073  align_get_bits(&s->gb);
1074  for (i = 0; i < nb_components; i++) /* reset dc */
1075  s->last_dc[i] = (4 << s->bits);
1076  }
1077 
1078  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1079  /* skip RSTn */
1080  if (s->restart_count == 0) {
1081  if( show_bits(&s->gb, i) == (1 << i) - 1
1082  || show_bits(&s->gb, i) == 0xFF) {
1083  int pos = get_bits_count(&s->gb);
1084  align_get_bits(&s->gb);
1085  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1086  skip_bits(&s->gb, 8);
1087  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1088  for (i = 0; i < nb_components; i++) /* reset dc */
1089  s->last_dc[i] = (4 << s->bits);
1090  reset = 1;
1091  } else
1092  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1093  }
1094  }
1095  }
1096  return reset;
1097 }
1098 
1099 /* Handles 1 to 4 components */
1100 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1101 {
1102  int i, mb_x, mb_y;
1103  unsigned width;
1104  uint16_t (*buffer)[4];
1105  int left[4], top[4], topleft[4];
1106  const int linesize = s->linesize[0];
1107  const int mask = ((1 << s->bits) - 1) << point_transform;
1108  int resync_mb_y = 0;
1109  int resync_mb_x = 0;
1110  int vpred[6];
1111 
1112  if (!s->bayer && s->nb_components < 3)
1113  return AVERROR_INVALIDDATA;
1114  if (s->bayer && s->nb_components > 2)
1115  return AVERROR_INVALIDDATA;
1116  if (s->nb_components <= 0 || s->nb_components > 4)
1117  return AVERROR_INVALIDDATA;
1118  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1119  return AVERROR_INVALIDDATA;
1120  if (s->bayer) {
1121  if (s->rct || s->pegasus_rct)
1122  return AVERROR_INVALIDDATA;
1123  }
1124 
1125 
1126  s->restart_count = s->restart_interval;
1127 
1128  if (s->restart_interval == 0)
1129  s->restart_interval = INT_MAX;
1130 
1131  if (s->bayer)
1132  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1133  else
1134  width = s->mb_width;
1135 
1136  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1137  if (!s->ljpeg_buffer)
1138  return AVERROR(ENOMEM);
1139 
1140  buffer = s->ljpeg_buffer;
1141 
1142  for (i = 0; i < 4; i++)
1143  buffer[0][i] = 1 << (s->bits - 1);
1144 
1145  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1146  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1147 
1148  if (s->interlaced && s->bottom_field)
1149  ptr += linesize >> 1;
1150 
1151  for (i = 0; i < 4; i++)
1152  top[i] = left[i] = topleft[i] = buffer[0][i];
1153 
1154  if ((mb_y * s->width) % s->restart_interval == 0) {
1155  for (i = 0; i < 6; i++)
1156  vpred[i] = 1 << (s->bits-1);
1157  }
1158 
1159  for (mb_x = 0; mb_x < width; mb_x++) {
1160  int modified_predictor = predictor;
1161 
1162  if (get_bits_left(&s->gb) < 1) {
1163  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1164  return AVERROR_INVALIDDATA;
1165  }
1166 
1167  if (s->restart_interval && !s->restart_count){
1168  s->restart_count = s->restart_interval;
1169  resync_mb_x = mb_x;
1170  resync_mb_y = mb_y;
1171  for(i=0; i<4; i++)
1172  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1173  }
1174  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1175  modified_predictor = 1;
1176 
1177  for (i=0;i<nb_components;i++) {
1178  int pred, dc;
1179 
1180  topleft[i] = top[i];
1181  top[i] = buffer[mb_x][i];
1182 
1183  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1184  if(dc == 0xFFFFF)
1185  return -1;
1186 
1187  if (!s->bayer || mb_x) {
1188  pred = left[i];
1189  } else { /* This path runs only for the first line in bayer images */
1190  vpred[i] += dc;
1191  pred = vpred[i] - dc;
1192  }
1193 
1194  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1195 
1196  left[i] = buffer[mb_x][i] =
1197  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1198  }
1199 
1200  if (s->restart_interval && !--s->restart_count) {
1201  align_get_bits(&s->gb);
1202  skip_bits(&s->gb, 16); /* skip RSTn */
1203  }
1204  }
1205  if (s->rct && s->nb_components == 4) {
1206  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1207  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1208  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1209  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1210  ptr[4*mb_x + 0] = buffer[mb_x][3];
1211  }
1212  } else if (s->nb_components == 4) {
1213  for(i=0; i<nb_components; i++) {
1214  int c= s->comp_index[i];
1215  if (s->bits <= 8) {
1216  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1217  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1218  }
1219  } else if(s->bits == 9) {
1220  return AVERROR_PATCHWELCOME;
1221  } else {
1222  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1223  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1224  }
1225  }
1226  }
1227  } else if (s->rct) {
1228  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1229  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1230  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1231  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1232  }
1233  } else if (s->pegasus_rct) {
1234  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1235  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1236  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1237  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1238  }
1239  } else if (s->bayer) {
1240  if (s->bits <= 8)
1241  return AVERROR_PATCHWELCOME;
1242  if (nb_components == 1) {
1243  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1244  for (mb_x = 0; mb_x < width; mb_x++)
1245  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1246  } else if (nb_components == 2) {
1247  for (mb_x = 0; mb_x < width; mb_x++) {
1248  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1249  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1250  }
1251  }
1252  } else {
1253  for(i=0; i<nb_components; i++) {
1254  int c= s->comp_index[i];
1255  if (s->bits <= 8) {
1256  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1257  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1258  }
1259  } else if(s->bits == 9) {
1260  return AVERROR_PATCHWELCOME;
1261  } else {
1262  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1263  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1264  }
1265  }
1266  }
1267  }
1268  }
1269  return 0;
1270 }
1271 
1273  int point_transform, int nb_components)
1274 {
1275  int i, mb_x, mb_y, mask;
1276  int bits= (s->bits+7)&~7;
1277  int resync_mb_y = 0;
1278  int resync_mb_x = 0;
1279 
1280  point_transform += bits - s->bits;
1281  mask = ((1 << s->bits) - 1) << point_transform;
1282 
1283  av_assert0(nb_components>=1 && nb_components<=4);
1284 
1285  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1286  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1287  if (get_bits_left(&s->gb) < 1) {
1288  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1289  return AVERROR_INVALIDDATA;
1290  }
1291  if (s->restart_interval && !s->restart_count){
1292  s->restart_count = s->restart_interval;
1293  resync_mb_x = mb_x;
1294  resync_mb_y = mb_y;
1295  }
1296 
1297  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1298  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1299  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1300  for (i = 0; i < nb_components; i++) {
1301  uint8_t *ptr;
1302  uint16_t *ptr16;
1303  int n, h, v, x, y, c, j, linesize;
1304  n = s->nb_blocks[i];
1305  c = s->comp_index[i];
1306  h = s->h_scount[i];
1307  v = s->v_scount[i];
1308  x = 0;
1309  y = 0;
1310  linesize= s->linesize[c];
1311 
1312  if(bits>8) linesize /= 2;
1313 
1314  for(j=0; j<n; j++) {
1315  int pred, dc;
1316 
1317  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1318  if(dc == 0xFFFFF)
1319  return -1;
1320  if ( h * mb_x + x >= s->width
1321  || v * mb_y + y >= s->height) {
1322  // Nothing to do
1323  } else if (bits<=8) {
1324  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1325  if(y==0 && toprow){
1326  if(x==0 && leftcol){
1327  pred= 1 << (bits - 1);
1328  }else{
1329  pred= ptr[-1];
1330  }
1331  }else{
1332  if(x==0 && leftcol){
1333  pred= ptr[-linesize];
1334  }else{
1335  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1336  }
1337  }
1338 
1339  if (s->interlaced && s->bottom_field)
1340  ptr += linesize >> 1;
1341  pred &= mask;
1342  *ptr= pred + ((unsigned)dc << point_transform);
1343  }else{
1344  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1345  if(y==0 && toprow){
1346  if(x==0 && leftcol){
1347  pred= 1 << (bits - 1);
1348  }else{
1349  pred= ptr16[-1];
1350  }
1351  }else{
1352  if(x==0 && leftcol){
1353  pred= ptr16[-linesize];
1354  }else{
1355  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1356  }
1357  }
1358 
1359  if (s->interlaced && s->bottom_field)
1360  ptr16 += linesize >> 1;
1361  pred &= mask;
1362  *ptr16= pred + ((unsigned)dc << point_transform);
1363  }
1364  if (++x == h) {
1365  x = 0;
1366  y++;
1367  }
1368  }
1369  }
1370  } else {
1371  for (i = 0; i < nb_components; i++) {
1372  uint8_t *ptr;
1373  uint16_t *ptr16;
1374  int n, h, v, x, y, c, j, linesize, dc;
1375  n = s->nb_blocks[i];
1376  c = s->comp_index[i];
1377  h = s->h_scount[i];
1378  v = s->v_scount[i];
1379  x = 0;
1380  y = 0;
1381  linesize = s->linesize[c];
1382 
1383  if(bits>8) linesize /= 2;
1384 
1385  for (j = 0; j < n; j++) {
1386  int pred;
1387 
1388  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1389  if(dc == 0xFFFFF)
1390  return -1;
1391  if ( h * mb_x + x >= s->width
1392  || v * mb_y + y >= s->height) {
1393  // Nothing to do
1394  } else if (bits<=8) {
1395  ptr = s->picture_ptr->data[c] +
1396  (linesize * (v * mb_y + y)) +
1397  (h * mb_x + x); //FIXME optimize this crap
1398  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1399 
1400  pred &= mask;
1401  *ptr = pred + ((unsigned)dc << point_transform);
1402  }else{
1403  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1404  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1405 
1406  pred &= mask;
1407  *ptr16= pred + ((unsigned)dc << point_transform);
1408  }
1409 
1410  if (++x == h) {
1411  x = 0;
1412  y++;
1413  }
1414  }
1415  }
1416  }
1417  if (s->restart_interval && !--s->restart_count) {
1418  align_get_bits(&s->gb);
1419  skip_bits(&s->gb, 16); /* skip RSTn */
1420  }
1421  }
1422  }
1423  return 0;
1424 }
1425 
1427  uint8_t *dst, const uint8_t *src,
1428  int linesize, int lowres)
1429 {
1430  switch (lowres) {
1431  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1432  break;
1433  case 1: copy_block4(dst, src, linesize, linesize, 4);
1434  break;
1435  case 2: copy_block2(dst, src, linesize, linesize, 2);
1436  break;
1437  case 3: *dst = *src;
1438  break;
1439  }
1440 }
1441 
1442 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1443 {
1444  int block_x, block_y;
1445  int size = 8 >> s->avctx->lowres;
1446  if (s->bits > 8) {
1447  for (block_y=0; block_y<size; block_y++)
1448  for (block_x=0; block_x<size; block_x++)
1449  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1450  } else {
1451  for (block_y=0; block_y<size; block_y++)
1452  for (block_x=0; block_x<size; block_x++)
1453  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1454  }
1455 }
1456 
1457 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1458  int Al, const uint8_t *mb_bitmask,
1459  int mb_bitmask_size,
1460  const AVFrame *reference)
1461 {
1462  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1463  uint8_t *data[MAX_COMPONENTS];
1464  const uint8_t *reference_data[MAX_COMPONENTS];
1465  int linesize[MAX_COMPONENTS];
1466  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1467  int bytes_per_pixel = 1 + (s->bits > 8);
1468 
1469  if (mb_bitmask) {
1470  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1471  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1472  return AVERROR_INVALIDDATA;
1473  }
1474  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1475  }
1476 
1477  s->restart_count = 0;
1478 
1479  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1480  &chroma_v_shift);
1481  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1482  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1483 
1484  for (i = 0; i < nb_components; i++) {
1485  int c = s->comp_index[i];
1486  data[c] = s->picture_ptr->data[c];
1487  reference_data[c] = reference ? reference->data[c] : NULL;
1488  linesize[c] = s->linesize[c];
1489  s->coefs_finished[c] |= 1;
1490  }
1491 
1492  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1493  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1494  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1495 
1496  if (s->restart_interval && !s->restart_count)
1497  s->restart_count = s->restart_interval;
1498 
1499  if (get_bits_left(&s->gb) < 0) {
1500  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1501  -get_bits_left(&s->gb));
1502  return AVERROR_INVALIDDATA;
1503  }
1504  for (i = 0; i < nb_components; i++) {
1505  uint8_t *ptr;
1506  int n, h, v, x, y, c, j;
1507  int block_offset;
1508  n = s->nb_blocks[i];
1509  c = s->comp_index[i];
1510  h = s->h_scount[i];
1511  v = s->v_scount[i];
1512  x = 0;
1513  y = 0;
1514  for (j = 0; j < n; j++) {
1515  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1516  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1517 
1518  if (s->interlaced && s->bottom_field)
1519  block_offset += linesize[c] >> 1;
1520  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1521  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1522  ptr = data[c] + block_offset;
1523  } else
1524  ptr = NULL;
1525  if (!s->progressive) {
1526  if (copy_mb) {
1527  if (ptr)
1528  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1529  linesize[c], s->avctx->lowres);
1530 
1531  } else {
1532  s->bdsp.clear_block(s->block);
1533  if (decode_block(s, s->block, i,
1534  s->dc_index[i], s->ac_index[i],
1535  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1536  av_log(s->avctx, AV_LOG_ERROR,
1537  "error y=%d x=%d\n", mb_y, mb_x);
1538  return AVERROR_INVALIDDATA;
1539  }
1540  if (ptr && linesize[c]) {
1541  s->idsp.idct_put(ptr, linesize[c], s->block);
1542  if (s->bits & 7)
1543  shift_output(s, ptr, linesize[c]);
1544  }
1545  }
1546  } else {
1547  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1548  (h * mb_x + x);
1549  int16_t *block = s->blocks[c][block_idx];
1550  if (Ah)
1551  block[0] += get_bits1(&s->gb) *
1552  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1553  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1554  s->quant_matrixes[s->quant_sindex[i]],
1555  Al) < 0) {
1556  av_log(s->avctx, AV_LOG_ERROR,
1557  "error y=%d x=%d\n", mb_y, mb_x);
1558  return AVERROR_INVALIDDATA;
1559  }
1560  }
1561  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1562  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1563  mb_x, mb_y, x, y, c, s->bottom_field,
1564  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1565  if (++x == h) {
1566  x = 0;
1567  y++;
1568  }
1569  }
1570  }
1571 
1572  handle_rstn(s, nb_components);
1573  }
1574  }
1575  return 0;
1576 }
1577 
1579  int se, int Ah, int Al)
1580 {
1581  int mb_x, mb_y;
1582  int EOBRUN = 0;
1583  int c = s->comp_index[0];
1584  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1585 
1586  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1587  if (se < ss || se > 63) {
1588  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1589  return AVERROR_INVALIDDATA;
1590  }
1591 
1592  // s->coefs_finished is a bitmask for coefficients coded
1593  // ss and se are parameters telling start and end coefficients
1594  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1595 
1596  s->restart_count = 0;
1597 
1598  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1599  int block_idx = mb_y * s->block_stride[c];
1600  int16_t (*block)[64] = &s->blocks[c][block_idx];
1601  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1602  if (get_bits_left(&s->gb) <= 0) {
1603  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1604  return AVERROR_INVALIDDATA;
1605  }
1606  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1607  int ret;
1608  if (s->restart_interval && !s->restart_count)
1609  s->restart_count = s->restart_interval;
1610 
1611  if (Ah)
1612  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1613  quant_matrix, ss, se, Al, &EOBRUN);
1614  else
1615  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1616  quant_matrix, ss, se, Al, &EOBRUN);
1617 
1618  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1620  if (ret < 0) {
1621  av_log(s->avctx, AV_LOG_ERROR,
1622  "error y=%d x=%d\n", mb_y, mb_x);
1623  return AVERROR_INVALIDDATA;
1624  }
1625 
1626  if (handle_rstn(s, 0))
1627  EOBRUN = 0;
1628  }
1629  }
1630  return 0;
1631 }
1632 
1634 {
1635  int mb_x, mb_y;
1636  int c;
1637  const int bytes_per_pixel = 1 + (s->bits > 8);
1638  const int block_size = s->lossless ? 1 : 8;
1639 
1640  for (c = 0; c < s->nb_components; c++) {
1641  uint8_t *data = s->picture_ptr->data[c];
1642  int linesize = s->linesize[c];
1643  int h = s->h_max / s->h_count[c];
1644  int v = s->v_max / s->v_count[c];
1645  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1646  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1647 
1648  if (~s->coefs_finished[c])
1649  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1650 
1651  if (s->interlaced && s->bottom_field)
1652  data += linesize >> 1;
1653 
1654  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1655  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1656  int block_idx = mb_y * s->block_stride[c];
1657  int16_t (*block)[64] = &s->blocks[c][block_idx];
1658  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1659  s->idsp.idct_put(ptr, linesize, *block);
1660  if (s->bits & 7)
1661  shift_output(s, ptr, linesize);
1662  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1663  }
1664  }
1665  }
1666 }
1667 
1668 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1669  int mb_bitmask_size, const AVFrame *reference)
1670 {
1671  int len, nb_components, i, h, v, predictor, point_transform;
1672  int index, id, ret;
1673  const int block_size = s->lossless ? 1 : 8;
1674  int ilv, prev_shift;
1675 
1676  if (!s->got_picture) {
1677  av_log(s->avctx, AV_LOG_WARNING,
1678  "Can not process SOS before SOF, skipping\n");
1679  return -1;
1680  }
1681 
1682  if (reference) {
1683  if (reference->width != s->picture_ptr->width ||
1684  reference->height != s->picture_ptr->height ||
1685  reference->format != s->picture_ptr->format) {
1686  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1687  return AVERROR_INVALIDDATA;
1688  }
1689  }
1690 
1691  /* XXX: verify len field validity */
1692  len = get_bits(&s->gb, 16);
1693  nb_components = get_bits(&s->gb, 8);
1694  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1696  "decode_sos: nb_components (%d)",
1697  nb_components);
1698  return AVERROR_PATCHWELCOME;
1699  }
1700  if (len != 6 + 2 * nb_components) {
1701  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1702  return AVERROR_INVALIDDATA;
1703  }
1704  for (i = 0; i < nb_components; i++) {
1705  id = get_bits(&s->gb, 8);
1706  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1707  /* find component index */
1708  for (index = 0; index < s->nb_components; index++)
1709  if (id == s->component_id[index])
1710  break;
1711  if (index == s->nb_components) {
1712  av_log(s->avctx, AV_LOG_ERROR,
1713  "decode_sos: index(%d) out of components\n", index);
1714  return AVERROR_INVALIDDATA;
1715  }
1716  /* Metasoft MJPEG codec has Cb and Cr swapped */
1717  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1718  && nb_components == 3 && s->nb_components == 3 && i)
1719  index = 3 - i;
1720 
1721  s->quant_sindex[i] = s->quant_index[index];
1722  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1723  s->h_scount[i] = s->h_count[index];
1724  s->v_scount[i] = s->v_count[index];
1725 
1726  s->comp_index[i] = index;
1727 
1728  s->dc_index[i] = get_bits(&s->gb, 4);
1729  s->ac_index[i] = get_bits(&s->gb, 4);
1730 
1731  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1732  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1733  goto out_of_range;
1734  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1735  goto out_of_range;
1736  }
1737 
1738  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1739  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1740  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1741  prev_shift = get_bits(&s->gb, 4); /* Ah */
1742  point_transform = get_bits(&s->gb, 4); /* Al */
1743  }else
1744  prev_shift = point_transform = 0;
1745 
1746  if (nb_components > 1) {
1747  /* interleaved stream */
1748  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1749  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1750  } else if (!s->ls) { /* skip this for JPEG-LS */
1751  h = s->h_max / s->h_scount[0];
1752  v = s->v_max / s->v_scount[0];
1753  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1754  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1755  s->nb_blocks[0] = 1;
1756  s->h_scount[0] = 1;
1757  s->v_scount[0] = 1;
1758  }
1759 
1760  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1761  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1762  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1763  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1764  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1765 
1766 
1767  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1768  for (i = s->mjpb_skiptosod; i > 0; i--)
1769  skip_bits(&s->gb, 8);
1770 
1771 next_field:
1772  for (i = 0; i < nb_components; i++)
1773  s->last_dc[i] = (4 << s->bits);
1774 
1775  if (s->avctx->hwaccel) {
1776  int bytes_to_start = get_bits_count(&s->gb) / 8;
1777  av_assert0(bytes_to_start >= 0 &&
1778  s->raw_scan_buffer_size >= bytes_to_start);
1779 
1780  ret = FF_HW_CALL(s->avctx, decode_slice,
1781  s->raw_scan_buffer + bytes_to_start,
1782  s->raw_scan_buffer_size - bytes_to_start);
1783  if (ret < 0)
1784  return ret;
1785 
1786  } else if (s->lossless) {
1787  av_assert0(s->picture_ptr == s->picture);
1788  if (CONFIG_JPEGLS_DECODER && s->ls) {
1789 // for () {
1790 // reset_ls_coding_parameters(s, 0);
1791 
1793  point_transform, ilv)) < 0)
1794  return ret;
1795  } else {
1796  if (s->rgb || s->bayer) {
1797  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1798  return ret;
1799  } else {
1801  point_transform,
1802  nb_components)) < 0)
1803  return ret;
1804  }
1805  }
1806  } else {
1807  if (s->progressive && predictor) {
1808  av_assert0(s->picture_ptr == s->picture);
1810  ilv, prev_shift,
1811  point_transform)) < 0)
1812  return ret;
1813  } else {
1814  if ((ret = mjpeg_decode_scan(s, nb_components,
1815  prev_shift, point_transform,
1816  mb_bitmask, mb_bitmask_size, reference)) < 0)
1817  return ret;
1818  }
1819  }
1820 
1821  if (s->interlaced &&
1822  get_bits_left(&s->gb) > 32 &&
1823  show_bits(&s->gb, 8) == 0xFF) {
1824  GetBitContext bak = s->gb;
1825  align_get_bits(&bak);
1826  if (show_bits(&bak, 16) == 0xFFD1) {
1827  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1828  s->gb = bak;
1829  skip_bits(&s->gb, 16);
1830  s->bottom_field ^= 1;
1831 
1832  goto next_field;
1833  }
1834  }
1835 
1836  emms_c();
1837  return 0;
1838  out_of_range:
1839  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1840  return AVERROR_INVALIDDATA;
1841 }
1842 
1844 {
1845  if (get_bits(&s->gb, 16) != 4)
1846  return AVERROR_INVALIDDATA;
1847  s->restart_interval = get_bits(&s->gb, 16);
1848  s->restart_count = 0;
1849  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1850  s->restart_interval);
1851 
1852  return 0;
1853 }
1854 
1856 {
1857  int len, id, i;
1858 
1859  len = get_bits(&s->gb, 16);
1860  if (len < 6) {
1861  if (s->bayer) {
1862  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1863  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1864  skip_bits(&s->gb, len);
1865  return 0;
1866  } else
1867  return AVERROR_INVALIDDATA;
1868  }
1869  if (8 * len > get_bits_left(&s->gb))
1870  return AVERROR_INVALIDDATA;
1871 
1872  id = get_bits_long(&s->gb, 32);
1873  len -= 6;
1874 
1875  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1876  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1877  av_fourcc2str(av_bswap32(id)), id, len);
1878 
1879  /* Buggy AVID, it puts EOI only at every 10th frame. */
1880  /* Also, this fourcc is used by non-avid files too, it holds some
1881  information, but it's always present in AVID-created files. */
1882  if (id == AV_RB32("AVI1")) {
1883  /* structure:
1884  4bytes AVI1
1885  1bytes polarity
1886  1bytes always zero
1887  4bytes field_size
1888  4bytes field_size_less_padding
1889  */
1890  s->buggy_avid = 1;
1891  i = get_bits(&s->gb, 8); len--;
1892  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1893  goto out;
1894  }
1895 
1896  if (id == AV_RB32("JFIF")) {
1897  int t_w, t_h, v1, v2;
1898  if (len < 8)
1899  goto out;
1900  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1901  v1 = get_bits(&s->gb, 8);
1902  v2 = get_bits(&s->gb, 8);
1903  skip_bits(&s->gb, 8);
1904 
1905  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1906  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1907  if ( s->avctx->sample_aspect_ratio.num <= 0
1908  || s->avctx->sample_aspect_ratio.den <= 0) {
1909  s->avctx->sample_aspect_ratio.num = 0;
1910  s->avctx->sample_aspect_ratio.den = 1;
1911  }
1912 
1913  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1914  av_log(s->avctx, AV_LOG_INFO,
1915  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1916  v1, v2,
1917  s->avctx->sample_aspect_ratio.num,
1918  s->avctx->sample_aspect_ratio.den);
1919 
1920  len -= 8;
1921  if (len >= 2) {
1922  t_w = get_bits(&s->gb, 8);
1923  t_h = get_bits(&s->gb, 8);
1924  if (t_w && t_h) {
1925  /* skip thumbnail */
1926  if (len -10 - (t_w * t_h * 3) > 0)
1927  len -= t_w * t_h * 3;
1928  }
1929  len -= 2;
1930  }
1931  goto out;
1932  }
1933 
1934  if ( id == AV_RB32("Adob")
1935  && len >= 7
1936  && show_bits(&s->gb, 8) == 'e'
1937  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1938  skip_bits(&s->gb, 8); /* 'e' */
1939  skip_bits(&s->gb, 16); /* version */
1940  skip_bits(&s->gb, 16); /* flags0 */
1941  skip_bits(&s->gb, 16); /* flags1 */
1942  s->adobe_transform = get_bits(&s->gb, 8);
1943  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1944  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1945  len -= 7;
1946  goto out;
1947  }
1948 
1949  if (id == AV_RB32("LJIF")) {
1950  int rgb = s->rgb;
1951  int pegasus_rct = s->pegasus_rct;
1952  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1953  av_log(s->avctx, AV_LOG_INFO,
1954  "Pegasus lossless jpeg header found\n");
1955  skip_bits(&s->gb, 16); /* version ? */
1956  skip_bits(&s->gb, 16); /* unknown always 0? */
1957  skip_bits(&s->gb, 16); /* unknown always 0? */
1958  skip_bits(&s->gb, 16); /* unknown always 0? */
1959  switch (i=get_bits(&s->gb, 8)) {
1960  case 1:
1961  rgb = 1;
1962  pegasus_rct = 0;
1963  break;
1964  case 2:
1965  rgb = 1;
1966  pegasus_rct = 1;
1967  break;
1968  default:
1969  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1970  }
1971 
1972  len -= 9;
1973  if (s->bayer)
1974  goto out;
1975  if (s->got_picture)
1976  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1977  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1978  goto out;
1979  }
1980 
1981  s->rgb = rgb;
1982  s->pegasus_rct = pegasus_rct;
1983 
1984  goto out;
1985  }
1986  if (id == AV_RL32("colr") && len > 0) {
1987  s->colr = get_bits(&s->gb, 8);
1988  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1989  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1990  len --;
1991  goto out;
1992  }
1993  if (id == AV_RL32("xfrm") && len > 0) {
1994  s->xfrm = get_bits(&s->gb, 8);
1995  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1996  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1997  len --;
1998  goto out;
1999  }
2000 
2001  /* JPS extension by VRex */
2002  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
2003  int flags, layout, type;
2004  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2005  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
2006 
2007  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
2008  skip_bits(&s->gb, 16); len -= 2; /* block length */
2009  skip_bits(&s->gb, 8); /* reserved */
2010  flags = get_bits(&s->gb, 8);
2011  layout = get_bits(&s->gb, 8);
2012  type = get_bits(&s->gb, 8);
2013  len -= 4;
2014 
2015  av_freep(&s->stereo3d);
2016  s->stereo3d = av_stereo3d_alloc();
2017  if (!s->stereo3d) {
2018  goto out;
2019  }
2020  if (type == 0) {
2021  s->stereo3d->type = AV_STEREO3D_2D;
2022  } else if (type == 1) {
2023  switch (layout) {
2024  case 0x01:
2025  s->stereo3d->type = AV_STEREO3D_LINES;
2026  break;
2027  case 0x02:
2028  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2029  break;
2030  case 0x03:
2031  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2032  break;
2033  }
2034  if (!(flags & 0x04)) {
2035  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2036  }
2037  }
2038  goto out;
2039  }
2040 
2041  /* EXIF metadata */
2042  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2043  GetByteContext gbytes;
2044  int ret, le, ifd_offset, bytes_read;
2045  const uint8_t *aligned;
2046 
2047  skip_bits(&s->gb, 16); // skip padding
2048  len -= 2;
2049 
2050  // init byte wise reading
2051  aligned = align_get_bits(&s->gb);
2052  bytestream2_init(&gbytes, aligned, len);
2053 
2054  // read TIFF header
2055  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2056  if (ret) {
2057  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2058  } else {
2059  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2060 
2061  // read 0th IFD and store the metadata
2062  // (return values > 0 indicate the presence of subimage metadata)
2063  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2064  if (ret < 0) {
2065  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2066  }
2067  }
2068 
2069  bytes_read = bytestream2_tell(&gbytes);
2070  skip_bits(&s->gb, bytes_read << 3);
2071  len -= bytes_read;
2072 
2073  goto out;
2074  }
2075 
2076  /* Apple MJPEG-A */
2077  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2078  id = get_bits_long(&s->gb, 32);
2079  len -= 4;
2080  /* Apple MJPEG-A */
2081  if (id == AV_RB32("mjpg")) {
2082  /* structure:
2083  4bytes field size
2084  4bytes pad field size
2085  4bytes next off
2086  4bytes quant off
2087  4bytes huff off
2088  4bytes image off
2089  4bytes scan off
2090  4bytes data off
2091  */
2092  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2093  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2094  }
2095  }
2096 
2097  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2098  int id2;
2099  unsigned seqno;
2100  unsigned nummarkers;
2101 
2102  id = get_bits_long(&s->gb, 32);
2103  id2 = get_bits(&s->gb, 24);
2104  len -= 7;
2105  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2106  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2107  goto out;
2108  }
2109 
2110  skip_bits(&s->gb, 8);
2111  seqno = get_bits(&s->gb, 8);
2112  len -= 2;
2113  if (seqno == 0) {
2114  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2115  goto out;
2116  }
2117 
2118  nummarkers = get_bits(&s->gb, 8);
2119  len -= 1;
2120  if (nummarkers == 0) {
2121  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2122  goto out;
2123  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2124  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2125  goto out;
2126  } else if (seqno > nummarkers) {
2127  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2128  goto out;
2129  }
2130 
2131  /* Allocate if this is the first APP2 we've seen. */
2132  if (s->iccnum == 0) {
2133  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2134  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2135  return AVERROR(ENOMEM);
2136  }
2137  s->iccnum = nummarkers;
2138  }
2139 
2140  if (s->iccentries[seqno - 1].data) {
2141  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2142  goto out;
2143  }
2144 
2145  s->iccentries[seqno - 1].length = len;
2146  s->iccentries[seqno - 1].data = av_malloc(len);
2147  if (!s->iccentries[seqno - 1].data) {
2148  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2149  return AVERROR(ENOMEM);
2150  }
2151 
2152  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2153  skip_bits(&s->gb, len << 3);
2154  len = 0;
2155  s->iccread++;
2156 
2157  if (s->iccread > s->iccnum)
2158  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2159  }
2160 
2161 out:
2162  /* slow but needed for extreme adobe jpegs */
2163  if (len < 0)
2164  av_log(s->avctx, AV_LOG_ERROR,
2165  "mjpeg: error, decode_app parser read over the end\n");
2166  while (--len > 0)
2167  skip_bits(&s->gb, 8);
2168 
2169  return 0;
2170 }
2171 
2173 {
2174  int len = get_bits(&s->gb, 16);
2175  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2176  int i;
2177  char *cbuf = av_malloc(len - 1);
2178  if (!cbuf)
2179  return AVERROR(ENOMEM);
2180 
2181  for (i = 0; i < len - 2; i++)
2182  cbuf[i] = get_bits(&s->gb, 8);
2183  if (i > 0 && cbuf[i - 1] == '\n')
2184  cbuf[i - 1] = 0;
2185  else
2186  cbuf[i] = 0;
2187 
2188  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2189  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2190 
2191  /* buggy avid, it puts EOI only at every 10th frame */
2192  if (!strncmp(cbuf, "AVID", 4)) {
2193  parse_avid(s, cbuf, len);
2194  } else if (!strcmp(cbuf, "CS=ITU601"))
2195  s->cs_itu601 = 1;
2196  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2197  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2198  s->flipped = 1;
2199  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2200  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2201  s->multiscope = 2;
2202  }
2203 
2204  av_free(cbuf);
2205  }
2206 
2207  return 0;
2208 }
2209 
2210 /* return the 8 bit start code value and update the search
2211  state. Return -1 if no start code found */
2212 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2213 {
2214  const uint8_t *buf_ptr;
2215  unsigned int v, v2;
2216  int val;
2217  int skipped = 0;
2218 
2219  buf_ptr = *pbuf_ptr;
2220  while (buf_end - buf_ptr > 1) {
2221  v = *buf_ptr++;
2222  v2 = *buf_ptr;
2223  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2224  val = *buf_ptr++;
2225  goto found;
2226  }
2227  skipped++;
2228  }
2229  buf_ptr = buf_end;
2230  val = -1;
2231 found:
2232  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2233  *pbuf_ptr = buf_ptr;
2234  return val;
2235 }
2236 
2238  const uint8_t **buf_ptr, const uint8_t *buf_end,
2239  const uint8_t **unescaped_buf_ptr,
2240  int *unescaped_buf_size)
2241 {
2242  int start_code;
2243  start_code = find_marker(buf_ptr, buf_end);
2244 
2245  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2246  if (!s->buffer)
2247  return AVERROR(ENOMEM);
2248 
2249  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2250  if (start_code == SOS && !s->ls) {
2251  const uint8_t *src = *buf_ptr;
2252  const uint8_t *ptr = src;
2253  uint8_t *dst = s->buffer;
2254 
2255  #define copy_data_segment(skip) do { \
2256  ptrdiff_t length = (ptr - src) - (skip); \
2257  if (length > 0) { \
2258  memcpy(dst, src, length); \
2259  dst += length; \
2260  src = ptr; \
2261  } \
2262  } while (0)
2263 
2264  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2265  ptr = buf_end;
2266  copy_data_segment(0);
2267  } else {
2268  while (ptr < buf_end) {
2269  uint8_t x = *(ptr++);
2270 
2271  if (x == 0xff) {
2272  ptrdiff_t skip = 0;
2273  while (ptr < buf_end && x == 0xff) {
2274  x = *(ptr++);
2275  skip++;
2276  }
2277 
2278  /* 0xFF, 0xFF, ... */
2279  if (skip > 1) {
2281 
2282  /* decrement src as it is equal to ptr after the
2283  * copy_data_segment macro and we might want to
2284  * copy the current value of x later on */
2285  src--;
2286  }
2287 
2288  if (x < RST0 || x > RST7) {
2289  copy_data_segment(1);
2290  if (x)
2291  break;
2292  }
2293  }
2294  }
2295  if (src < ptr)
2296  copy_data_segment(0);
2297  }
2298  #undef copy_data_segment
2299 
2300  *unescaped_buf_ptr = s->buffer;
2301  *unescaped_buf_size = dst - s->buffer;
2302  memset(s->buffer + *unescaped_buf_size, 0,
2304 
2305  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2306  (buf_end - *buf_ptr) - (dst - s->buffer));
2307  } else if (start_code == SOS && s->ls) {
2308  const uint8_t *src = *buf_ptr;
2309  uint8_t *dst = s->buffer;
2310  int bit_count = 0;
2311  int t = 0, b = 0;
2312  PutBitContext pb;
2313 
2314  /* find marker */
2315  while (src + t < buf_end) {
2316  uint8_t x = src[t++];
2317  if (x == 0xff) {
2318  while ((src + t < buf_end) && x == 0xff)
2319  x = src[t++];
2320  if (x & 0x80) {
2321  t -= FFMIN(2, t);
2322  break;
2323  }
2324  }
2325  }
2326  bit_count = t * 8;
2327  init_put_bits(&pb, dst, t);
2328 
2329  /* unescape bitstream */
2330  while (b < t) {
2331  uint8_t x = src[b++];
2332  put_bits(&pb, 8, x);
2333  if (x == 0xFF && b < t) {
2334  x = src[b++];
2335  if (x & 0x80) {
2336  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2337  x &= 0x7f;
2338  }
2339  put_bits(&pb, 7, x);
2340  bit_count--;
2341  }
2342  }
2343  flush_put_bits(&pb);
2344 
2345  *unescaped_buf_ptr = dst;
2346  *unescaped_buf_size = (bit_count + 7) >> 3;
2347  memset(s->buffer + *unescaped_buf_size, 0,
2349  } else {
2350  *unescaped_buf_ptr = *buf_ptr;
2351  *unescaped_buf_size = buf_end - *buf_ptr;
2352  }
2353 
2354  return start_code;
2355 }
2356 
2358 {
2359  int i;
2360 
2361  if (s->iccentries) {
2362  for (i = 0; i < s->iccnum; i++)
2363  av_freep(&s->iccentries[i].data);
2364  av_freep(&s->iccentries);
2365  }
2366 
2367  s->iccread = 0;
2368  s->iccnum = 0;
2369 }
2370 
2372  int *got_frame, const AVPacket *avpkt,
2373  const uint8_t *buf, const int buf_size)
2374 {
2375  MJpegDecodeContext *s = avctx->priv_data;
2376  const uint8_t *buf_end, *buf_ptr;
2377  const uint8_t *unescaped_buf_ptr;
2378  int hshift, vshift;
2379  int unescaped_buf_size;
2380  int start_code;
2381  int i, index;
2382  int ret = 0;
2383  int is16bit;
2384  AVDictionaryEntry *e = NULL;
2385 
2386  s->force_pal8 = 0;
2387 
2388  s->buf_size = buf_size;
2389 
2390  av_dict_free(&s->exif_metadata);
2391  av_freep(&s->stereo3d);
2392  s->adobe_transform = -1;
2393 
2394  if (s->iccnum != 0)
2396 
2397 redo_for_pal8:
2398  buf_ptr = buf;
2399  buf_end = buf + buf_size;
2400  while (buf_ptr < buf_end) {
2401  /* find start next marker */
2402  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2403  &unescaped_buf_ptr,
2404  &unescaped_buf_size);
2405  /* EOF */
2406  if (start_code < 0) {
2407  break;
2408  } else if (unescaped_buf_size > INT_MAX / 8) {
2409  av_log(avctx, AV_LOG_ERROR,
2410  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2411  start_code, unescaped_buf_size, buf_size);
2412  return AVERROR_INVALIDDATA;
2413  }
2414  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2415  start_code, buf_end - buf_ptr);
2416 
2417  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2418 
2419  if (ret < 0) {
2420  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2421  goto fail;
2422  }
2423 
2424  s->start_code = start_code;
2425  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2426  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2427 
2428  /* process markers */
2429  if (start_code >= RST0 && start_code <= RST7) {
2430  av_log(avctx, AV_LOG_DEBUG,
2431  "restart marker: %d\n", start_code & 0x0f);
2432  /* APP fields */
2433  } else if (start_code >= APP0 && start_code <= APP15) {
2434  if ((ret = mjpeg_decode_app(s)) < 0)
2435  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2436  av_err2str(ret));
2437  /* Comment */
2438  } else if (start_code == COM) {
2439  ret = mjpeg_decode_com(s);
2440  if (ret < 0)
2441  return ret;
2442  } else if (start_code == DQT) {
2444  if (ret < 0)
2445  return ret;
2446  }
2447 
2448  ret = -1;
2449 
2450  if (!CONFIG_JPEGLS_DECODER &&
2451  (start_code == SOF48 || start_code == LSE)) {
2452  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2453  return AVERROR(ENOSYS);
2454  }
2455 
2456  if (avctx->skip_frame == AVDISCARD_ALL) {
2457  switch(start_code) {
2458  case SOF0:
2459  case SOF1:
2460  case SOF2:
2461  case SOF3:
2462  case SOF48:
2463  case SOI:
2464  case SOS:
2465  case EOI:
2466  break;
2467  default:
2468  goto skip;
2469  }
2470  }
2471 
2472  switch (start_code) {
2473  case SOI:
2474  s->restart_interval = 0;
2475  s->restart_count = 0;
2476  s->raw_image_buffer = buf_ptr;
2477  s->raw_image_buffer_size = buf_end - buf_ptr;
2478  /* nothing to do on SOI */
2479  break;
2480  case DHT:
2481  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2482  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2483  goto fail;
2484  }
2485  break;
2486  case SOF0:
2487  case SOF1:
2488  if (start_code == SOF0)
2489  s->avctx->profile = AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2490  else
2492  s->lossless = 0;
2493  s->ls = 0;
2494  s->progressive = 0;
2495  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2496  goto fail;
2497  break;
2498  case SOF2:
2499  s->avctx->profile = AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2500  s->lossless = 0;
2501  s->ls = 0;
2502  s->progressive = 1;
2503  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2504  goto fail;
2505  break;
2506  case SOF3:
2507  s->avctx->profile = AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2508  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2509  s->lossless = 1;
2510  s->ls = 0;
2511  s->progressive = 0;
2512  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2513  goto fail;
2514  break;
2515  case SOF48:
2516  s->avctx->profile = AV_PROFILE_MJPEG_JPEG_LS;
2517  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2518  s->lossless = 1;
2519  s->ls = 1;
2520  s->progressive = 0;
2521  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2522  goto fail;
2523  break;
2524  case LSE:
2525  if (!CONFIG_JPEGLS_DECODER ||
2526  (ret = ff_jpegls_decode_lse(s)) < 0)
2527  goto fail;
2528  if (ret == 1)
2529  goto redo_for_pal8;
2530  break;
2531  case EOI:
2532 eoi_parser:
2533  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2534  s->progressive && s->cur_scan && s->got_picture)
2536  s->cur_scan = 0;
2537  if (!s->got_picture) {
2538  av_log(avctx, AV_LOG_WARNING,
2539  "Found EOI before any SOF, ignoring\n");
2540  break;
2541  }
2542  if (s->interlaced) {
2543  s->bottom_field ^= 1;
2544  /* if not bottom field, do not output image yet */
2545  if (s->bottom_field == !s->interlace_polarity)
2546  break;
2547  }
2548  if (avctx->skip_frame == AVDISCARD_ALL) {
2549  s->got_picture = 0;
2550  goto the_end_no_picture;
2551  }
2552  if (s->avctx->hwaccel) {
2553  ret = FF_HW_SIMPLE_CALL(s->avctx, end_frame);
2554  if (ret < 0)
2555  return ret;
2556 
2557  av_freep(&s->hwaccel_picture_private);
2558  }
2559  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2560  return ret;
2561  *got_frame = 1;
2562  s->got_picture = 0;
2563 
2564  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2565  int qp = FFMAX3(s->qscale[0],
2566  s->qscale[1],
2567  s->qscale[2]);
2568 
2569  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2570  }
2571 
2572  goto the_end;
2573  case SOS:
2574  s->raw_scan_buffer = buf_ptr;
2575  s->raw_scan_buffer_size = buf_end - buf_ptr;
2576 
2577  s->cur_scan++;
2578  if (avctx->skip_frame == AVDISCARD_ALL) {
2579  skip_bits(&s->gb, get_bits_left(&s->gb));
2580  break;
2581  }
2582 
2583  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2584  (avctx->err_recognition & AV_EF_EXPLODE))
2585  goto fail;
2586  break;
2587  case DRI:
2588  if ((ret = mjpeg_decode_dri(s)) < 0)
2589  return ret;
2590  break;
2591  case SOF5:
2592  case SOF6:
2593  case SOF7:
2594  case SOF9:
2595  case SOF10:
2596  case SOF11:
2597  case SOF13:
2598  case SOF14:
2599  case SOF15:
2600  case JPG:
2601  av_log(avctx, AV_LOG_ERROR,
2602  "mjpeg: unsupported coding type (%x)\n", start_code);
2603  break;
2604  }
2605 
2606 skip:
2607  /* eof process start code */
2608  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2609  av_log(avctx, AV_LOG_DEBUG,
2610  "marker parser used %d bytes (%d bits)\n",
2611  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2612  }
2613  if (s->got_picture && s->cur_scan) {
2614  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2615  goto eoi_parser;
2616  }
2617  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2618  return AVERROR_INVALIDDATA;
2619 fail:
2620  s->got_picture = 0;
2621  return ret;
2622 the_end:
2623 
2624  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2625 
2626  if (AV_RB32(s->upscale_h)) {
2627  int p;
2629  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2630  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2631  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2632  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2633  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2634  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2635  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2636  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2637  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2638  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2639  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2640  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2641  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2642  );
2643  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2644  if (ret)
2645  return ret;
2646 
2647  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2648  for (p = 0; p<s->nb_components; p++) {
2649  uint8_t *line = s->picture_ptr->data[p];
2650  int w = s->width;
2651  int h = s->height;
2652  if (!s->upscale_h[p])
2653  continue;
2654  if (p==1 || p==2) {
2655  w = AV_CEIL_RSHIFT(w, hshift);
2656  h = AV_CEIL_RSHIFT(h, vshift);
2657  }
2658  if (s->upscale_v[p] == 1)
2659  h = (h+1)>>1;
2660  av_assert0(w > 0);
2661  for (i = 0; i < h; i++) {
2662  if (s->upscale_h[p] == 1) {
2663  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2664  else line[w - 1] = line[(w - 1) / 2];
2665  for (index = w - 2; index > 0; index--) {
2666  if (is16bit)
2667  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2668  else
2669  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2670  }
2671  } else if (s->upscale_h[p] == 2) {
2672  if (is16bit) {
2673  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2674  if (w > 1)
2675  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2676  } else {
2677  line[w - 1] = line[(w - 1) / 3];
2678  if (w > 1)
2679  line[w - 2] = line[w - 1];
2680  }
2681  for (index = w - 3; index > 0; index--) {
2682  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2683  }
2684  } else if (s->upscale_h[p] == 4){
2685  if (is16bit) {
2686  uint16_t *line16 = (uint16_t *) line;
2687  line16[w - 1] = line16[(w - 1) >> 2];
2688  if (w > 1)
2689  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2690  if (w > 2)
2691  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2692  } else {
2693  line[w - 1] = line[(w - 1) >> 2];
2694  if (w > 1)
2695  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2696  if (w > 2)
2697  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2698  }
2699  for (index = w - 4; index > 0; index--)
2700  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2701  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2702  }
2703  line += s->linesize[p];
2704  }
2705  }
2706  }
2707  if (AV_RB32(s->upscale_v)) {
2708  int p;
2710  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2711  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2712  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2713  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2714  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2715  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2716  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2717  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2718  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2719  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2720  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2721  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2722  );
2723  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2724  if (ret)
2725  return ret;
2726 
2727  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2728  for (p = 0; p < s->nb_components; p++) {
2729  uint8_t *dst;
2730  int w = s->width;
2731  int h = s->height;
2732  if (!s->upscale_v[p])
2733  continue;
2734  if (p==1 || p==2) {
2735  w = AV_CEIL_RSHIFT(w, hshift);
2736  h = AV_CEIL_RSHIFT(h, vshift);
2737  }
2738  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2739  for (i = h - 1; i; i--) {
2740  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2741  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2742  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2743  memcpy(dst, src1, w);
2744  } else {
2745  for (index = 0; index < w; index++)
2746  dst[index] = (src1[index] + src2[index]) >> 1;
2747  }
2748  dst -= s->linesize[p];
2749  }
2750  }
2751  }
2752  if (s->flipped && !s->rgb) {
2753  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2754  if (ret)
2755  return ret;
2756 
2757  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2758  for (index=0; index<s->nb_components; index++) {
2759  int h = frame->height;
2760  if (index && index < 3)
2761  h = AV_CEIL_RSHIFT(h, vshift);
2762  if (frame->data[index]) {
2763  frame->data[index] += (h - 1) * frame->linesize[index];
2764  frame->linesize[index] *= -1;
2765  }
2766  }
2767  }
2768 
2769  if (s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2770  av_assert0(s->nb_components == 3);
2771  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2772  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2773  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2774  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2775  }
2776 
2777  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2778  int w = s->picture_ptr->width;
2779  int h = s->picture_ptr->height;
2780  av_assert0(s->nb_components == 4);
2781  for (i=0; i<h; i++) {
2782  int j;
2783  uint8_t *dst[4];
2784  for (index=0; index<4; index++) {
2785  dst[index] = s->picture_ptr->data[index]
2786  + s->picture_ptr->linesize[index]*i;
2787  }
2788  for (j=0; j<w; j++) {
2789  int k = dst[3][j];
2790  int r = dst[0][j] * k;
2791  int g = dst[1][j] * k;
2792  int b = dst[2][j] * k;
2793  dst[0][j] = g*257 >> 16;
2794  dst[1][j] = b*257 >> 16;
2795  dst[2][j] = r*257 >> 16;
2796  dst[3][j] = 255;
2797  }
2798  }
2799  }
2800  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2801  int w = s->picture_ptr->width;
2802  int h = s->picture_ptr->height;
2803  av_assert0(s->nb_components == 4);
2804  for (i=0; i<h; i++) {
2805  int j;
2806  uint8_t *dst[4];
2807  for (index=0; index<4; index++) {
2808  dst[index] = s->picture_ptr->data[index]
2809  + s->picture_ptr->linesize[index]*i;
2810  }
2811  for (j=0; j<w; j++) {
2812  int k = dst[3][j];
2813  int r = (255 - dst[0][j]) * k;
2814  int g = (128 - dst[1][j]) * k;
2815  int b = (128 - dst[2][j]) * k;
2816  dst[0][j] = r*257 >> 16;
2817  dst[1][j] = (g*257 >> 16) + 128;
2818  dst[2][j] = (b*257 >> 16) + 128;
2819  dst[3][j] = 255;
2820  }
2821  }
2822  }
2823 
2824  if (s->stereo3d) {
2826  if (stereo) {
2827  stereo->type = s->stereo3d->type;
2828  stereo->flags = s->stereo3d->flags;
2829  }
2830  av_freep(&s->stereo3d);
2831  }
2832 
2833  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2834  AVFrameSideData *sd;
2835  size_t offset = 0;
2836  int total_size = 0;
2837  int i;
2838 
2839  /* Sum size of all parts. */
2840  for (i = 0; i < s->iccnum; i++)
2841  total_size += s->iccentries[i].length;
2842 
2844  if (!sd) {
2845  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2846  return AVERROR(ENOMEM);
2847  }
2848 
2849  /* Reassemble the parts, which are now in-order. */
2850  for (i = 0; i < s->iccnum; i++) {
2851  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2852  offset += s->iccentries[i].length;
2853  }
2854  }
2855 
2856  if (e = av_dict_get(s->exif_metadata, "Orientation", e, AV_DICT_IGNORE_SUFFIX)) {
2857  char *value = e->value + strspn(e->value, " \n\t\r"), *endptr;
2858  int orientation = strtol(value, &endptr, 0);
2859 
2860  if (!*endptr) {
2861  AVFrameSideData *sd = NULL;
2862 
2863  if (orientation >= 2 && orientation <= 8) {
2864  int32_t *matrix;
2865 
2867  if (!sd) {
2868  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2869  return AVERROR(ENOMEM);
2870  }
2871 
2872  matrix = (int32_t *)sd->data;
2873 
2874  switch (orientation) {
2875  case 2:
2878  break;
2879  case 3:
2881  break;
2882  case 4:
2885  break;
2886  case 5:
2889  break;
2890  case 6:
2892  break;
2893  case 7:
2896  break;
2897  case 8:
2899  break;
2900  default:
2901  av_assert0(0);
2902  }
2903  }
2904  }
2905  }
2906 
2907  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2908  av_dict_free(&s->exif_metadata);
2909 
2910  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2911  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2912  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2913  avctx->coded_height > s->orig_height) {
2914  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2915  frame->crop_top = frame->height - avctx->height;
2916  }
2917 
2918 the_end_no_picture:
2919  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2920  buf_end - buf_ptr);
2921  return buf_ptr - buf;
2922 }
2923 
2924 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2925  AVPacket *avpkt)
2926 {
2927  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2928  avpkt, avpkt->data, avpkt->size);
2929 }
2930 
2931 
2932 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2933  * even without having called ff_mjpeg_decode_init(). */
2935 {
2936  MJpegDecodeContext *s = avctx->priv_data;
2937  int i, j;
2938 
2939  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2940  av_log(avctx, AV_LOG_INFO, "Single field\n");
2941  }
2942 
2943  if (s->picture) {
2944  av_frame_free(&s->picture);
2945  s->picture_ptr = NULL;
2946  } else if (s->picture_ptr)
2947  av_frame_unref(s->picture_ptr);
2948 
2949  av_frame_free(&s->smv_frame);
2950 
2951  av_freep(&s->buffer);
2952  av_freep(&s->stereo3d);
2953  av_freep(&s->ljpeg_buffer);
2954  s->ljpeg_buffer_size = 0;
2955 
2956  for (i = 0; i < 3; i++) {
2957  for (j = 0; j < 4; j++)
2958  ff_vlc_free(&s->vlcs[i][j]);
2959  }
2960  for (i = 0; i < MAX_COMPONENTS; i++) {
2961  av_freep(&s->blocks[i]);
2962  av_freep(&s->last_nnz[i]);
2963  }
2964  av_dict_free(&s->exif_metadata);
2965 
2967 
2968  av_freep(&s->hwaccel_picture_private);
2969  av_freep(&s->jls_state);
2970 
2971  return 0;
2972 }
2973 
2974 static void decode_flush(AVCodecContext *avctx)
2975 {
2976  MJpegDecodeContext *s = avctx->priv_data;
2977  s->got_picture = 0;
2978 
2979  s->smv_next_frame = 0;
2980  av_frame_unref(s->smv_frame);
2981 }
2982 
2983 #if CONFIG_MJPEG_DECODER
2984 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2985 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2986 static const AVOption options[] = {
2987  { "extern_huff", "Use external huffman table.",
2988  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2989  { NULL },
2990 };
2991 
2992 static const AVClass mjpegdec_class = {
2993  .class_name = "MJPEG decoder",
2994  .item_name = av_default_item_name,
2995  .option = options,
2996  .version = LIBAVUTIL_VERSION_INT,
2997 };
2998 
2999 const FFCodec ff_mjpeg_decoder = {
3000  .p.name = "mjpeg",
3001  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
3002  .p.type = AVMEDIA_TYPE_VIDEO,
3003  .p.id = AV_CODEC_ID_MJPEG,
3004  .priv_data_size = sizeof(MJpegDecodeContext),
3006  .close = ff_mjpeg_decode_end,
3008  .flush = decode_flush,
3009  .p.capabilities = AV_CODEC_CAP_DR1,
3010  .p.max_lowres = 3,
3011  .p.priv_class = &mjpegdec_class,
3012  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
3013  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3016  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3017 #if CONFIG_MJPEG_NVDEC_HWACCEL
3018  HWACCEL_NVDEC(mjpeg),
3019 #endif
3020 #if CONFIG_MJPEG_VAAPI_HWACCEL
3021  HWACCEL_VAAPI(mjpeg),
3022 #endif
3023  NULL
3024  },
3025 };
3026 #endif
3027 #if CONFIG_THP_DECODER
3028 const FFCodec ff_thp_decoder = {
3029  .p.name = "thp",
3030  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
3031  .p.type = AVMEDIA_TYPE_VIDEO,
3032  .p.id = AV_CODEC_ID_THP,
3033  .priv_data_size = sizeof(MJpegDecodeContext),
3035  .close = ff_mjpeg_decode_end,
3037  .flush = decode_flush,
3038  .p.capabilities = AV_CODEC_CAP_DR1,
3039  .p.max_lowres = 3,
3040  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3041 };
3042 #endif
3043 
3044 #if CONFIG_SMVJPEG_DECODER
3045 // SMV JPEG just stacks several output frames into one JPEG picture
3046 // we handle that by setting up the cropping parameters appropriately
3047 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
3048 {
3049  MJpegDecodeContext *s = avctx->priv_data;
3050 
3051  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3052 
3053  frame->width = avctx->coded_width;
3054  frame->height = avctx->coded_height;
3055  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3056  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3057 
3058  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3059  s->smv_frame->pts += s->smv_frame->duration;
3060  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3061 
3062  if (s->smv_next_frame == 0)
3063  av_frame_unref(s->smv_frame);
3064 }
3065 
3066 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3067 {
3068  MJpegDecodeContext *s = avctx->priv_data;
3069  AVPacket *const pkt = avctx->internal->in_pkt;
3070  int got_frame = 0;
3071  int ret;
3072 
3073  if (s->smv_next_frame > 0)
3074  goto return_frame;
3075 
3076  ret = ff_decode_get_packet(avctx, pkt);
3077  if (ret < 0)
3078  return ret;
3079 
3080  av_frame_unref(s->smv_frame);
3081 
3082  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3083  s->smv_frame->pkt_dts = pkt->dts;
3085  if (ret < 0)
3086  return ret;
3087 
3088  if (!got_frame)
3089  return AVERROR(EAGAIN);
3090 
3091  // packet duration covers all the frames in the packet
3092  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3093 
3094 return_frame:
3095  av_assert0(s->smv_frame->buf[0]);
3096  ret = av_frame_ref(frame, s->smv_frame);
3097  if (ret < 0)
3098  return ret;
3099 
3100  smv_process_frame(avctx, frame);
3101  return 0;
3102 }
3103 
3104 const FFCodec ff_smvjpeg_decoder = {
3105  .p.name = "smvjpeg",
3106  CODEC_LONG_NAME("SMV JPEG"),
3107  .p.type = AVMEDIA_TYPE_VIDEO,
3108  .p.id = AV_CODEC_ID_SMVJPEG,
3109  .priv_data_size = sizeof(MJpegDecodeContext),
3111  .close = ff_mjpeg_decode_end,
3112  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3113  .flush = decode_flush,
3114  .p.capabilities = AV_CODEC_CAP_DR1,
3115  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3117 };
3118 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:88
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:423
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1435
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:278
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:241
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:204
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:495
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:255
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1029
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1264
out
FILE * out
Definition: movenc.c:54
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1426
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2974
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:982
SOF0
@ SOF0
Definition: mjpeg.h:39
matrix
Definition: vc1dsp.c:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1412
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:574
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:259
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:115
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:218
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:222
AVFrame::width
int width
Definition: frame.h:412
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:511
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:66
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:491
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1061
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:816
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:148
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:225
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1389
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:641
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:263
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3004
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:605
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1272
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1442
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:450
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:124
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1766
fail
#define fail()
Definition: checkasm.h:138
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:513
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:108
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2371
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2172
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:61
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:521
val
static double val(void *priv, double ch)
Definition: aeval.c:78
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2992
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:636
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:452
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:276
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:182
FFHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: hwaccel_internal.h:59
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:243
aligned
static int aligned(int val)
Definition: dashdec.c:170
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:883
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:480
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1905
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:171
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:199
mask
static const uint16_t mask[17]
Definition: lzw.c:38
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1065
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
FFHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: hwaccel_internal.h:106
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:188
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:543
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:104
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:481
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
g
const char * g
Definition: vf_curves.c:127
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:421
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:354
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:479
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2357
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2934
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:451
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:182
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:487
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:458
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:111
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:140
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:459
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1633
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:172
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:198
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:476
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
tiff_common.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:247
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1457
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:1000
lowres
static int lowres
Definition: ffplay.c:332
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1578
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:779
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1524
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:507
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1617
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1100
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:492
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:50
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2924
av_bswap32
#define av_bswap32
Definition: bswap.h:28
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:901
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1668
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:175
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:267
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:490
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
VD
#define VD
Definition: av1dec.c:1472
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:292
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:225
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2212
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:164
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:178
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2152
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:834
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:542
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:174
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
src2
const pixel * src2
Definition: h264pred_template.c:422
display.h
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:202
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1843
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:77
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:169
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1396
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1046
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:621
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:636
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:656
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:2118
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:974
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:509
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:29
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:177
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
pos
unsigned int pos
Definition: spdifenc.c:413
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1393
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2237
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
id
enum AVCodecID id
Definition: dts2pts_bsf.c:364
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVFrame::height
int height
Definition: frame.h:412
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:312
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:259
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:695
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:708
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:166
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1388
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:302
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:636
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1855
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:466
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:468
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
AVFrame::crop_top
size_t crop_top
Definition: frame.h:778
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:239
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
AVDictionaryEntry::value
char * value
Definition: dict.h:91
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:173
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:358