FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/attributes.h"
36 #include "libavutil/emms.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/avassert.h"
39 #include "libavutil/mem.h"
40 #include "libavutil/opt.h"
41 #include "avcodec.h"
42 #include "blockdsp.h"
43 #include "codec_internal.h"
44 #include "copy_block.h"
45 #include "decode.h"
46 #include "exif.h"
47 #include "hwaccel_internal.h"
48 #include "hwconfig.h"
49 #include "idctdsp.h"
50 #include "internal.h"
51 #include "jpegtables.h"
52 #include "mjpeg.h"
53 #include "mjpegdec.h"
54 #include "jpeglsdec.h"
55 #include "profiles.h"
56 #include "put_bits.h"
57 
58 
60 {
61  static const struct {
62  int class;
63  int index;
64  const uint8_t *bits;
65  const uint8_t *values;
66  int length;
67  } ht[] = {
69  ff_mjpeg_val_dc, 12 },
71  ff_mjpeg_val_dc, 12 },
80  };
81  int i, ret;
82 
83  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
84  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
85  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
86  ht[i].bits, ht[i].values,
87  ht[i].class == 1, s->avctx);
88  if (ret < 0)
89  return ret;
90 
91  if (ht[i].class < 2) {
92  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
93  ht[i].bits + 1, 16);
94  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
95  ht[i].values, ht[i].length);
96  }
97  }
98 
99  return 0;
100 }
101 
102 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
103 {
104  s->buggy_avid = 1;
105  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
106  s->interlace_polarity = 1;
107  if (len > 14 && buf[12] == 2) /* 2 - PAL */
108  s->interlace_polarity = 0;
109  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
110  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
111 }
112 
113 static void init_idct(AVCodecContext *avctx)
114 {
115  MJpegDecodeContext *s = avctx->priv_data;
116 
117  ff_idctdsp_init(&s->idsp, avctx);
118  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
119  s->idsp.idct_permutation);
120 }
121 
123 {
124  MJpegDecodeContext *s = avctx->priv_data;
125  int ret;
126 
127  if (!s->picture_ptr) {
128  s->picture = av_frame_alloc();
129  if (!s->picture)
130  return AVERROR(ENOMEM);
131  s->picture_ptr = s->picture;
132  }
133 
134  s->avctx = avctx;
135  ff_blockdsp_init(&s->bdsp);
136  init_idct(avctx);
137  s->buffer_size = 0;
138  s->buffer = NULL;
139  s->start_code = -1;
140  s->first_picture = 1;
141  s->got_picture = 0;
142  s->orig_height = avctx->coded_height;
144  avctx->colorspace = AVCOL_SPC_BT470BG;
145  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
146 
147  if ((ret = init_default_huffman_tables(s)) < 0)
148  return ret;
149 
150  if (s->extern_huff) {
151  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
152  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
153  return ret;
154  if (ff_mjpeg_decode_dht(s)) {
155  av_log(avctx, AV_LOG_ERROR,
156  "error using external huffman table, switching back to internal\n");
157  if ((ret = init_default_huffman_tables(s)) < 0)
158  return ret;
159  }
160  }
161  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
162  s->interlace_polarity = 1; /* bottom field first */
163  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
164  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
165  if (avctx->codec_tag == AV_RL32("MJPG"))
166  s->interlace_polarity = 1;
167  }
168 
169  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
170  if (avctx->extradata_size >= 4)
171  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
172 
173  if (s->smv_frames_per_jpeg <= 0) {
174  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
175  return AVERROR_INVALIDDATA;
176  }
177 
178  s->smv_frame = av_frame_alloc();
179  if (!s->smv_frame)
180  return AVERROR(ENOMEM);
181  } else if (avctx->extradata_size > 8
182  && AV_RL32(avctx->extradata) == 0x2C
183  && AV_RL32(avctx->extradata+4) == 0x18) {
184  parse_avid(s, avctx->extradata, avctx->extradata_size);
185  }
186 
187  if (avctx->codec->id == AV_CODEC_ID_AMV)
188  s->flipped = 1;
189 
190  return 0;
191 }
192 
193 
194 /* quantize tables */
196 {
197  int len, index, i;
198 
199  len = get_bits(&s->gb, 16) - 2;
200 
201  if (8*len > get_bits_left(&s->gb)) {
202  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
203  return AVERROR_INVALIDDATA;
204  }
205 
206  while (len >= 65) {
207  int pr = get_bits(&s->gb, 4);
208  if (pr > 1) {
209  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
210  return AVERROR_INVALIDDATA;
211  }
212  index = get_bits(&s->gb, 4);
213  if (index >= 4)
214  return -1;
215  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
216  /* read quant table */
217  for (i = 0; i < 64; i++) {
218  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
219  if (s->quant_matrixes[index][i] == 0) {
220  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
221  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
222  if (s->avctx->err_recognition & AV_EF_EXPLODE)
223  return AVERROR_INVALIDDATA;
224  }
225  }
226 
227  // XXX FIXME fine-tune, and perhaps add dc too
228  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
229  s->quant_matrixes[index][8]) >> 1;
230  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
231  index, s->qscale[index]);
232  len -= 1 + 64 * (1+pr);
233  }
234  return 0;
235 }
236 
237 /* decode huffman tables and build VLC decoders */
239 {
240  int len, index, i, class, n, v;
241  uint8_t bits_table[17];
242  uint8_t val_table[256];
243  int ret = 0;
244 
245  len = get_bits(&s->gb, 16) - 2;
246 
247  if (8*len > get_bits_left(&s->gb)) {
248  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
249  return AVERROR_INVALIDDATA;
250  }
251 
252  while (len > 0) {
253  if (len < 17)
254  return AVERROR_INVALIDDATA;
255  class = get_bits(&s->gb, 4);
256  if (class >= 2)
257  return AVERROR_INVALIDDATA;
258  index = get_bits(&s->gb, 4);
259  if (index >= 4)
260  return AVERROR_INVALIDDATA;
261  n = 0;
262  for (i = 1; i <= 16; i++) {
263  bits_table[i] = get_bits(&s->gb, 8);
264  n += bits_table[i];
265  }
266  len -= 17;
267  if (len < n || n > 256)
268  return AVERROR_INVALIDDATA;
269 
270  for (i = 0; i < n; i++) {
271  v = get_bits(&s->gb, 8);
272  val_table[i] = v;
273  }
274  len -= n;
275 
276  /* build VLC and flush previous vlc if present */
277  ff_vlc_free(&s->vlcs[class][index]);
278  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
279  class, index, n);
280  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
281  val_table, class > 0, s->avctx)) < 0)
282  return ret;
283 
284  if (class > 0) {
285  ff_vlc_free(&s->vlcs[2][index]);
286  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
287  val_table, 0, s->avctx)) < 0)
288  return ret;
289  }
290 
291  for (i = 0; i < 16; i++)
292  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
293  for (i = 0; i < 256; i++)
294  s->raw_huffman_values[class][index][i] = val_table[i];
295  }
296  return 0;
297 }
298 
300 {
301  int len, nb_components, i, width, height, bits, ret, size_change;
302  unsigned pix_fmt_id;
303  int h_count[MAX_COMPONENTS] = { 0 };
304  int v_count[MAX_COMPONENTS] = { 0 };
305 
306  s->cur_scan = 0;
307  memset(s->upscale_h, 0, sizeof(s->upscale_h));
308  memset(s->upscale_v, 0, sizeof(s->upscale_v));
309 
310  len = get_bits(&s->gb, 16);
311  bits = get_bits(&s->gb, 8);
312 
313  if (bits > 16 || bits < 1) {
314  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
315  return AVERROR_INVALIDDATA;
316  }
317 
318  if (s->avctx->bits_per_raw_sample != bits) {
319  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
320  s->avctx->bits_per_raw_sample = bits;
321  init_idct(s->avctx);
322  }
323  if (s->pegasus_rct)
324  bits = 9;
325  if (bits == 9 && !s->pegasus_rct)
326  s->rct = 1; // FIXME ugly
327 
328  if(s->lossless && s->avctx->lowres){
329  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
330  return -1;
331  }
332 
333  height = get_bits(&s->gb, 16);
334  width = get_bits(&s->gb, 16);
335 
336  // HACK for odd_height.mov
337  if (s->interlaced && s->width == width && s->height == height + 1)
338  height= s->height;
339 
340  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
341  if (av_image_check_size(width, height, 0, s->avctx) < 0)
342  return AVERROR_INVALIDDATA;
343  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
344  return AVERROR_INVALIDDATA;
345 
346  nb_components = get_bits(&s->gb, 8);
347  if (nb_components <= 0 ||
348  nb_components > MAX_COMPONENTS)
349  return -1;
350  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
351  if (nb_components != s->nb_components) {
352  av_log(s->avctx, AV_LOG_ERROR,
353  "nb_components changing in interlaced picture\n");
354  return AVERROR_INVALIDDATA;
355  }
356  }
357  if (s->ls && !(bits <= 8 || nb_components == 1)) {
359  "JPEG-LS that is not <= 8 "
360  "bits/component or 16-bit gray");
361  return AVERROR_PATCHWELCOME;
362  }
363  if (len != 8 + 3 * nb_components) {
364  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
365  return AVERROR_INVALIDDATA;
366  }
367 
368  s->nb_components = nb_components;
369  s->h_max = 1;
370  s->v_max = 1;
371  for (i = 0; i < nb_components; i++) {
372  /* component id */
373  s->component_id[i] = get_bits(&s->gb, 8);
374  h_count[i] = get_bits(&s->gb, 4);
375  v_count[i] = get_bits(&s->gb, 4);
376  /* compute hmax and vmax (only used in interleaved case) */
377  if (h_count[i] > s->h_max)
378  s->h_max = h_count[i];
379  if (v_count[i] > s->v_max)
380  s->v_max = v_count[i];
381  s->quant_index[i] = get_bits(&s->gb, 8);
382  if (s->quant_index[i] >= 4) {
383  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
384  return AVERROR_INVALIDDATA;
385  }
386  if (!h_count[i] || !v_count[i]) {
387  av_log(s->avctx, AV_LOG_ERROR,
388  "Invalid sampling factor in component %d %d:%d\n",
389  i, h_count[i], v_count[i]);
390  return AVERROR_INVALIDDATA;
391  }
392 
393  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
394  i, h_count[i], v_count[i],
395  s->component_id[i], s->quant_index[i]);
396  }
397  if ( nb_components == 4
398  && s->component_id[0] == 'C'
399  && s->component_id[1] == 'M'
400  && s->component_id[2] == 'Y'
401  && s->component_id[3] == 'K')
402  s->adobe_transform = 0;
403 
404  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
405  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
406  return AVERROR_PATCHWELCOME;
407  }
408 
409  if (s->bayer) {
410  if (nb_components == 2) {
411  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
412  width stored in their SOF3 markers is the width of each one. We only output
413  a single component, therefore we need to adjust the output image width. We
414  handle the deinterleaving (but not the debayering) in this file. */
415  width *= 2;
416  }
417  /* They can also contain 1 component, which is double the width and half the height
418  of the final image (rows are interleaved). We don't handle the decoding in this
419  file, but leave that to the TIFF/DNG decoder. */
420  }
421 
422  /* if different size, realloc/alloc picture */
423  if (width != s->width || height != s->height || bits != s->bits ||
424  memcmp(s->h_count, h_count, sizeof(h_count)) ||
425  memcmp(s->v_count, v_count, sizeof(v_count))) {
426  size_change = 1;
427 
428  s->width = width;
429  s->height = height;
430  s->bits = bits;
431  memcpy(s->h_count, h_count, sizeof(h_count));
432  memcpy(s->v_count, v_count, sizeof(v_count));
433  s->interlaced = 0;
434  s->got_picture = 0;
435 
436  /* test interlaced mode */
437  if (s->first_picture &&
438  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
439  s->orig_height != 0 &&
440  s->height < ((s->orig_height * 3) / 4)) {
441  s->interlaced = 1;
442  s->bottom_field = s->interlace_polarity;
443  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
444  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
445  height *= 2;
446  }
447 
448  ret = ff_set_dimensions(s->avctx, width, height);
449  if (ret < 0)
450  return ret;
451 
452  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
453  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
454  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
455  s->orig_height < height)
456  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
457 
458  s->first_picture = 0;
459  } else {
460  size_change = 0;
461  }
462 
463  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
464  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
465  if (s->avctx->height <= 0)
466  return AVERROR_INVALIDDATA;
467  }
468  if (s->bayer && s->progressive) {
469  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
470  return AVERROR_INVALIDDATA;
471  }
472 
473  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
474  if (s->progressive) {
475  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
476  return AVERROR_INVALIDDATA;
477  }
478  } else {
479  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
480  s->rgb = 1;
481  else if (!s->lossless)
482  s->rgb = 0;
483  /* XXX: not complete test ! */
484  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
485  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
486  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
487  (s->h_count[3] << 4) | s->v_count[3];
488  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
489  /* NOTE we do not allocate pictures large enough for the possible
490  * padding of h/v_count being 4 */
491  if (!(pix_fmt_id & 0xD0D0D0D0))
492  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
493  if (!(pix_fmt_id & 0x0D0D0D0D))
494  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
495 
496  for (i = 0; i < 8; i++) {
497  int j = 6 + (i&1) - (i&6);
498  int is = (pix_fmt_id >> (4*i)) & 0xF;
499  int js = (pix_fmt_id >> (4*j)) & 0xF;
500 
501  if (is == 1 && js != 2 && (i < 2 || i > 5))
502  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
503  if (is == 1 && js != 2 && (i < 2 || i > 5))
504  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
505 
506  if (is == 1 && js == 2) {
507  if (i & 1) s->upscale_h[j/2] = 1;
508  else s->upscale_v[j/2] = 1;
509  }
510  }
511 
512  if (s->bayer) {
513  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
514  goto unk_pixfmt;
515  }
516 
517  switch (pix_fmt_id) {
518  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
519  if (!s->bayer)
520  goto unk_pixfmt;
521  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
522  break;
523  case 0x11111100:
524  if (s->rgb)
525  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
526  else {
527  if ( s->adobe_transform == 0
528  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
529  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
530  } else {
531  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
532  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
533  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
534  }
535  }
536  av_assert0(s->nb_components == 3);
537  break;
538  case 0x11111111:
539  if (s->rgb)
540  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
541  else {
542  if (s->adobe_transform == 0 && s->bits <= 8) {
543  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
544  } else {
545  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
546  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
547  }
548  }
549  av_assert0(s->nb_components == 4);
550  break;
551  case 0x11412100:
552  if (s->bits > 8)
553  goto unk_pixfmt;
554  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
555  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
556  s->upscale_h[0] = 4;
557  s->upscale_h[1] = 0;
558  s->upscale_h[2] = 1;
559  } else {
560  goto unk_pixfmt;
561  }
562  break;
563  case 0x22111122:
564  case 0x22111111:
565  if (s->adobe_transform == 0 && s->bits <= 8) {
566  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
567  s->upscale_v[1] = s->upscale_v[2] = 1;
568  s->upscale_h[1] = s->upscale_h[2] = 1;
569  } else if (s->adobe_transform == 2 && s->bits <= 8) {
570  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
571  s->upscale_v[1] = s->upscale_v[2] = 1;
572  s->upscale_h[1] = s->upscale_h[2] = 1;
573  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
574  } else {
575  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
576  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
577  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
578  }
579  av_assert0(s->nb_components == 4);
580  break;
581  case 0x12121100:
582  case 0x22122100:
583  case 0x21211100:
584  case 0x21112100:
585  case 0x22211200:
586  case 0x22221100:
587  case 0x22112200:
588  case 0x11222200:
589  if (s->bits > 8)
590  goto unk_pixfmt;
591  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
592  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
593  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
594  } else {
595  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
596  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
597  }
598  break;
599  case 0x11000000:
600  case 0x13000000:
601  case 0x14000000:
602  case 0x31000000:
603  case 0x33000000:
604  case 0x34000000:
605  case 0x41000000:
606  case 0x43000000:
607  case 0x44000000:
608  if(s->bits <= 8)
609  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
610  else
611  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
612  break;
613  case 0x12111100:
614  case 0x14121200:
615  case 0x14111100:
616  case 0x22211100:
617  case 0x22112100:
618  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
619  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
620  else
621  goto unk_pixfmt;
622  s->upscale_v[1] = s->upscale_v[2] = 1;
623  } else {
624  if (pix_fmt_id == 0x14111100)
625  s->upscale_v[1] = s->upscale_v[2] = 1;
626  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
627  else
628  goto unk_pixfmt;
629  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
630  }
631  break;
632  case 0x21111100:
633  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
634  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
635  else
636  goto unk_pixfmt;
637  s->upscale_h[1] = s->upscale_h[2] = 1;
638  } else {
639  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
640  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
641  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
642  }
643  break;
644  case 0x11311100:
645  if (s->bits > 8)
646  goto unk_pixfmt;
647  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
648  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
649  else
650  goto unk_pixfmt;
651  s->upscale_h[0] = s->upscale_h[2] = 2;
652  break;
653  case 0x31111100:
654  if (s->bits > 8)
655  goto unk_pixfmt;
656  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
657  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
658  s->upscale_h[1] = s->upscale_h[2] = 2;
659  break;
660  case 0x22121100:
661  case 0x22111200:
662  case 0x41211100:
663  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
664  else
665  goto unk_pixfmt;
666  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
667  break;
668  case 0x22111100:
669  case 0x23111100:
670  case 0x42111100:
671  case 0x24111100:
672  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
673  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
674  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
675  if (pix_fmt_id == 0x42111100) {
676  if (s->bits > 8)
677  goto unk_pixfmt;
678  s->upscale_h[1] = s->upscale_h[2] = 1;
679  } else if (pix_fmt_id == 0x24111100) {
680  if (s->bits > 8)
681  goto unk_pixfmt;
682  s->upscale_v[1] = s->upscale_v[2] = 1;
683  } else if (pix_fmt_id == 0x23111100) {
684  if (s->bits > 8)
685  goto unk_pixfmt;
686  s->upscale_v[1] = s->upscale_v[2] = 2;
687  }
688  break;
689  case 0x41111100:
690  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
691  else
692  goto unk_pixfmt;
693  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
694  break;
695  default:
696  unk_pixfmt:
697  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
698  memset(s->upscale_h, 0, sizeof(s->upscale_h));
699  memset(s->upscale_v, 0, sizeof(s->upscale_v));
700  return AVERROR_PATCHWELCOME;
701  }
702  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
703  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
704  return AVERROR_PATCHWELCOME;
705  }
706  if (s->ls) {
707  memset(s->upscale_h, 0, sizeof(s->upscale_h));
708  memset(s->upscale_v, 0, sizeof(s->upscale_v));
709  if (s->nb_components == 3) {
710  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
711  } else if (s->nb_components != 1) {
712  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
713  return AVERROR_PATCHWELCOME;
714  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
715  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
716  else if (s->bits <= 8)
717  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
718  else
719  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
720  }
721 
722  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
723  if (!s->pix_desc) {
724  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
725  return AVERROR_BUG;
726  }
727 
728  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
729  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
730  } else {
731  enum AVPixelFormat pix_fmts[] = {
732 #if CONFIG_MJPEG_NVDEC_HWACCEL
734 #endif
735 #if CONFIG_MJPEG_VAAPI_HWACCEL
737 #endif
738  s->avctx->pix_fmt,
740  };
741  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
742  if (s->hwaccel_pix_fmt < 0)
743  return AVERROR(EINVAL);
744 
745  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
746  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
747  }
748 
749  if (s->avctx->skip_frame == AVDISCARD_ALL) {
750  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
751  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
752  s->got_picture = 1;
753  return 0;
754  }
755 
756  av_frame_unref(s->picture_ptr);
757  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
758  return -1;
759  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
760  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
761  s->got_picture = 1;
762 
763  // Lets clear the palette to avoid leaving uninitialized values in it
764  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
765  memset(s->picture_ptr->data[1], 0, 1024);
766 
767  for (i = 0; i < 4; i++)
768  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
769 
770  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
771  s->width, s->height, s->linesize[0], s->linesize[1],
772  s->interlaced, s->avctx->height);
773 
774  }
775 
776  if ((s->rgb && !s->lossless && !s->ls) ||
777  (!s->rgb && s->ls && s->nb_components > 1) ||
778  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
779  ) {
780  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
781  return AVERROR_PATCHWELCOME;
782  }
783 
784  /* totally blank picture as progressive JPEG will only add details to it */
785  if (s->progressive) {
786  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
787  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
788  for (i = 0; i < s->nb_components; i++) {
789  int size = bw * bh * s->h_count[i] * s->v_count[i];
790  av_freep(&s->blocks[i]);
791  av_freep(&s->last_nnz[i]);
792  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
793  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
794  if (!s->blocks[i] || !s->last_nnz[i])
795  return AVERROR(ENOMEM);
796  s->block_stride[i] = bw * s->h_count[i];
797  }
798  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
799  }
800 
801  if (s->avctx->hwaccel) {
802  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
803  s->hwaccel_picture_private =
804  av_mallocz(hwaccel->frame_priv_data_size);
805  if (!s->hwaccel_picture_private)
806  return AVERROR(ENOMEM);
807 
808  ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
809  s->raw_image_buffer_size);
810  if (ret < 0)
811  return ret;
812  }
813 
814  return 0;
815 }
816 
817 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
818 {
819  int code;
820  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
821  if (code < 0 || code > 16) {
822  av_log(s->avctx, AV_LOG_ERROR,
823  "mjpeg_decode_dc: bad vlc: %d\n", dc_index);
824  return AVERROR_INVALIDDATA;
825  }
826 
827  *val = code ? get_xbits(&s->gb, code) : 0;
828  return 0;
829 }
830 
831 /* decode block and dequantize */
832 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
833  int dc_index, int ac_index, uint16_t *quant_matrix)
834 {
835  int code, i, j, level, val;
836 
837  /* DC coef */
838  int ret = mjpeg_decode_dc(s, dc_index, &val);
839  if (ret < 0)
840  return ret;
841 
842  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
843  s->last_dc[component] = val;
844  block[0] = av_clip_int16(val);
845  /* AC coefs */
846  i = 0;
847  {OPEN_READER(re, &s->gb);
848  do {
849  UPDATE_CACHE(re, &s->gb);
850  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
851 
852  i += ((unsigned)code) >> 4;
853  code &= 0xf;
854  if (code) {
855  if (code > MIN_CACHE_BITS - 16)
856  UPDATE_CACHE(re, &s->gb);
857 
858  {
859  int cache = GET_CACHE(re, &s->gb);
860  int sign = (~cache) >> 31;
861  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
862  }
863 
864  LAST_SKIP_BITS(re, &s->gb, code);
865 
866  if (i > 63) {
867  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
868  return AVERROR_INVALIDDATA;
869  }
870  j = s->permutated_scantable[i];
871  block[j] = level * quant_matrix[i];
872  }
873  } while (i < 63);
874  CLOSE_READER(re, &s->gb);}
875 
876  return 0;
877 }
878 
880  int component, int dc_index,
881  uint16_t *quant_matrix, int Al)
882 {
883  unsigned val;
884  s->bdsp.clear_block(block);
885  int ret = mjpeg_decode_dc(s, dc_index, &val);
886  if (ret < 0)
887  return ret;
888 
889  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
890  s->last_dc[component] = val;
891  block[0] = val;
892  return 0;
893 }
894 
895 /* decode block and dequantize - progressive JPEG version */
897  uint8_t *last_nnz, int ac_index,
898  uint16_t *quant_matrix,
899  int ss, int se, int Al, int *EOBRUN)
900 {
901  int code, i, j, val, run;
902  unsigned level;
903 
904  if (*EOBRUN) {
905  (*EOBRUN)--;
906  return 0;
907  }
908 
909  {
910  OPEN_READER(re, &s->gb);
911  for (i = ss; ; i++) {
912  UPDATE_CACHE(re, &s->gb);
913  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
914 
915  run = ((unsigned) code) >> 4;
916  code &= 0xF;
917  if (code) {
918  i += run;
919  if (code > MIN_CACHE_BITS - 16)
920  UPDATE_CACHE(re, &s->gb);
921 
922  {
923  int cache = GET_CACHE(re, &s->gb);
924  int sign = (~cache) >> 31;
925  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
926  }
927 
928  LAST_SKIP_BITS(re, &s->gb, code);
929 
930  if (i >= se) {
931  if (i == se) {
932  j = s->permutated_scantable[se];
933  block[j] = level * (quant_matrix[se] << Al);
934  break;
935  }
936  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
937  return AVERROR_INVALIDDATA;
938  }
939  j = s->permutated_scantable[i];
940  block[j] = level * (quant_matrix[i] << Al);
941  } else {
942  if (run == 0xF) {// ZRL - skip 15 coefficients
943  i += 15;
944  if (i >= se) {
945  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
946  return AVERROR_INVALIDDATA;
947  }
948  } else {
949  val = (1 << run);
950  if (run) {
951  UPDATE_CACHE(re, &s->gb);
952  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
953  LAST_SKIP_BITS(re, &s->gb, run);
954  }
955  *EOBRUN = val - 1;
956  break;
957  }
958  }
959  }
960  CLOSE_READER(re, &s->gb);
961  }
962 
963  if (i > *last_nnz)
964  *last_nnz = i;
965 
966  return 0;
967 }
968 
969 #define REFINE_BIT(j) { \
970  UPDATE_CACHE(re, &s->gb); \
971  sign = block[j] >> 15; \
972  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
973  ((quant_matrix[i] ^ sign) - sign) << Al; \
974  LAST_SKIP_BITS(re, &s->gb, 1); \
975 }
976 
977 #define ZERO_RUN \
978 for (; ; i++) { \
979  if (i > last) { \
980  i += run; \
981  if (i > se) { \
982  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
983  return -1; \
984  } \
985  break; \
986  } \
987  j = s->permutated_scantable[i]; \
988  if (block[j]) \
989  REFINE_BIT(j) \
990  else if (run-- == 0) \
991  break; \
992 }
993 
994 /* decode block and dequantize - progressive JPEG refinement pass */
996  uint8_t *last_nnz,
997  int ac_index, uint16_t *quant_matrix,
998  int ss, int se, int Al, int *EOBRUN)
999 {
1000  int code, i = ss, j, sign, val, run;
1001  int last = FFMIN(se, *last_nnz);
1002 
1003  OPEN_READER(re, &s->gb);
1004  if (*EOBRUN) {
1005  (*EOBRUN)--;
1006  } else {
1007  for (; ; i++) {
1008  UPDATE_CACHE(re, &s->gb);
1009  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1010 
1011  if (code & 0xF) {
1012  run = ((unsigned) code) >> 4;
1013  UPDATE_CACHE(re, &s->gb);
1014  val = SHOW_UBITS(re, &s->gb, 1);
1015  LAST_SKIP_BITS(re, &s->gb, 1);
1016  ZERO_RUN;
1017  j = s->permutated_scantable[i];
1018  val--;
1019  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1020  if (i == se) {
1021  if (i > *last_nnz)
1022  *last_nnz = i;
1023  CLOSE_READER(re, &s->gb);
1024  return 0;
1025  }
1026  } else {
1027  run = ((unsigned) code) >> 4;
1028  if (run == 0xF) {
1029  ZERO_RUN;
1030  } else {
1031  val = run;
1032  run = (1 << run);
1033  if (val) {
1034  UPDATE_CACHE(re, &s->gb);
1035  run += SHOW_UBITS(re, &s->gb, val);
1036  LAST_SKIP_BITS(re, &s->gb, val);
1037  }
1038  *EOBRUN = run - 1;
1039  break;
1040  }
1041  }
1042  }
1043 
1044  if (i > *last_nnz)
1045  *last_nnz = i;
1046  }
1047 
1048  for (; i <= last; i++) {
1049  j = s->permutated_scantable[i];
1050  if (block[j])
1051  REFINE_BIT(j)
1052  }
1053  CLOSE_READER(re, &s->gb);
1054 
1055  return 0;
1056 }
1057 #undef REFINE_BIT
1058 #undef ZERO_RUN
1059 
1060 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1061 {
1062  int i;
1063  int reset = 0;
1064 
1065  if (s->restart_interval) {
1066  s->restart_count--;
1067  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1068  align_get_bits(&s->gb);
1069  for (i = 0; i < nb_components; i++) /* reset dc */
1070  s->last_dc[i] = (4 << s->bits);
1071  }
1072 
1073  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1074  /* skip RSTn */
1075  if (s->restart_count == 0) {
1076  if( show_bits(&s->gb, i) == (1 << i) - 1
1077  || show_bits(&s->gb, i) == 0xFF) {
1078  int pos = get_bits_count(&s->gb);
1079  align_get_bits(&s->gb);
1080  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1081  skip_bits(&s->gb, 8);
1082  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1083  for (i = 0; i < nb_components; i++) /* reset dc */
1084  s->last_dc[i] = (4 << s->bits);
1085  reset = 1;
1086  } else
1087  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1088  }
1089  }
1090  }
1091  return reset;
1092 }
1093 
1094 /* Handles 1 to 4 components */
1095 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1096 {
1097  int i, mb_x, mb_y;
1098  unsigned width;
1099  uint16_t (*buffer)[4];
1100  int left[4], top[4], topleft[4];
1101  const int linesize = s->linesize[0];
1102  const int mask = ((1 << s->bits) - 1) << point_transform;
1103  int resync_mb_y = 0;
1104  int resync_mb_x = 0;
1105  int vpred[6];
1106  int ret;
1107 
1108  if (!s->bayer && s->nb_components < 3)
1109  return AVERROR_INVALIDDATA;
1110  if (s->bayer && s->nb_components > 2)
1111  return AVERROR_INVALIDDATA;
1112  if (s->nb_components <= 0 || s->nb_components > 4)
1113  return AVERROR_INVALIDDATA;
1114  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1115  return AVERROR_INVALIDDATA;
1116  if (s->bayer) {
1117  if (s->rct || s->pegasus_rct)
1118  return AVERROR_INVALIDDATA;
1119  }
1120 
1121 
1122  s->restart_count = s->restart_interval;
1123 
1124  if (s->restart_interval == 0)
1125  s->restart_interval = INT_MAX;
1126 
1127  if (s->bayer)
1128  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1129  else
1130  width = s->mb_width;
1131 
1132  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1133  if (!s->ljpeg_buffer)
1134  return AVERROR(ENOMEM);
1135 
1136  buffer = s->ljpeg_buffer;
1137 
1138  for (i = 0; i < 4; i++)
1139  buffer[0][i] = 1 << (s->bits - 1);
1140 
1141  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1142  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1143 
1144  if (s->interlaced && s->bottom_field)
1145  ptr += linesize >> 1;
1146 
1147  for (i = 0; i < 4; i++)
1148  top[i] = left[i] = topleft[i] = buffer[0][i];
1149 
1150  if ((mb_y * s->width) % s->restart_interval == 0) {
1151  for (i = 0; i < 6; i++)
1152  vpred[i] = 1 << (s->bits-1);
1153  }
1154 
1155  for (mb_x = 0; mb_x < width; mb_x++) {
1156  int modified_predictor = predictor;
1157 
1158  if (get_bits_left(&s->gb) < 1) {
1159  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1160  return AVERROR_INVALIDDATA;
1161  }
1162 
1163  if (s->restart_interval && !s->restart_count){
1164  s->restart_count = s->restart_interval;
1165  resync_mb_x = mb_x;
1166  resync_mb_y = mb_y;
1167  for(i=0; i<4; i++)
1168  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1169  }
1170  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1171  modified_predictor = 1;
1172 
1173  for (i=0;i<nb_components;i++) {
1174  int pred, dc;
1175 
1176  topleft[i] = top[i];
1177  top[i] = buffer[mb_x][i];
1178 
1179  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1180  if (ret < 0)
1181  return ret;
1182 
1183  if (!s->bayer || mb_x) {
1184  pred = left[i];
1185  } else { /* This path runs only for the first line in bayer images */
1186  vpred[i] += dc;
1187  pred = vpred[i] - dc;
1188  }
1189 
1190  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1191 
1192  left[i] = buffer[mb_x][i] =
1193  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1194  }
1195 
1196  if (s->restart_interval && !--s->restart_count) {
1197  align_get_bits(&s->gb);
1198  skip_bits(&s->gb, 16); /* skip RSTn */
1199  }
1200  }
1201  if (s->rct && s->nb_components == 4) {
1202  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1203  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1204  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1205  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1206  ptr[4*mb_x + 0] = buffer[mb_x][3];
1207  }
1208  } else if (s->nb_components == 4) {
1209  for(i=0; i<nb_components; i++) {
1210  int c= s->comp_index[i];
1211  if (s->bits <= 8) {
1212  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1213  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1214  }
1215  } else if(s->bits == 9) {
1216  return AVERROR_PATCHWELCOME;
1217  } else {
1218  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1219  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1220  }
1221  }
1222  }
1223  } else if (s->rct) {
1224  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1225  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1226  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1227  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1228  }
1229  } else if (s->pegasus_rct) {
1230  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1231  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1232  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1233  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1234  }
1235  } else if (s->bayer) {
1236  if (s->bits <= 8)
1237  return AVERROR_PATCHWELCOME;
1238  if (nb_components == 1) {
1239  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1240  for (mb_x = 0; mb_x < width; mb_x++)
1241  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1242  } else if (nb_components == 2) {
1243  for (mb_x = 0; mb_x < width; mb_x++) {
1244  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1245  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1246  }
1247  }
1248  } else {
1249  for(i=0; i<nb_components; i++) {
1250  int c= s->comp_index[i];
1251  if (s->bits <= 8) {
1252  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1253  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1254  }
1255  } else if(s->bits == 9) {
1256  return AVERROR_PATCHWELCOME;
1257  } else {
1258  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1259  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1260  }
1261  }
1262  }
1263  }
1264  }
1265  return 0;
1266 }
1267 
1269  int point_transform, int nb_components)
1270 {
1271  int i, mb_x, mb_y, mask;
1272  int bits= (s->bits+7)&~7;
1273  int resync_mb_y = 0;
1274  int resync_mb_x = 0;
1275  int ret;
1276 
1277  point_transform += bits - s->bits;
1278  mask = ((1 << s->bits) - 1) << point_transform;
1279 
1280  av_assert0(nb_components>=1 && nb_components<=4);
1281 
1282  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1283  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1284  if (get_bits_left(&s->gb) < 1) {
1285  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1286  return AVERROR_INVALIDDATA;
1287  }
1288  if (s->restart_interval && !s->restart_count){
1289  s->restart_count = s->restart_interval;
1290  resync_mb_x = mb_x;
1291  resync_mb_y = mb_y;
1292  }
1293 
1294  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1295  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1296  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1297  for (i = 0; i < nb_components; i++) {
1298  uint8_t *ptr;
1299  uint16_t *ptr16;
1300  int n, h, v, x, y, c, j, linesize;
1301  n = s->nb_blocks[i];
1302  c = s->comp_index[i];
1303  h = s->h_scount[i];
1304  v = s->v_scount[i];
1305  x = 0;
1306  y = 0;
1307  linesize= s->linesize[c];
1308 
1309  if(bits>8) linesize /= 2;
1310 
1311  for(j=0; j<n; j++) {
1312  int pred, dc;
1313 
1314  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1315  if (ret < 0)
1316  return ret;
1317 
1318  if ( h * mb_x + x >= s->width
1319  || v * mb_y + y >= s->height) {
1320  // Nothing to do
1321  } else if (bits<=8) {
1322  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1323  if(y==0 && toprow){
1324  if(x==0 && leftcol){
1325  pred= 1 << (bits - 1);
1326  }else{
1327  pred= ptr[-1];
1328  }
1329  }else{
1330  if(x==0 && leftcol){
1331  pred= ptr[-linesize];
1332  }else{
1333  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1334  }
1335  }
1336 
1337  if (s->interlaced && s->bottom_field)
1338  ptr += linesize >> 1;
1339  pred &= mask;
1340  *ptr= pred + ((unsigned)dc << point_transform);
1341  }else{
1342  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1343  if(y==0 && toprow){
1344  if(x==0 && leftcol){
1345  pred= 1 << (bits - 1);
1346  }else{
1347  pred= ptr16[-1];
1348  }
1349  }else{
1350  if(x==0 && leftcol){
1351  pred= ptr16[-linesize];
1352  }else{
1353  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1354  }
1355  }
1356 
1357  if (s->interlaced && s->bottom_field)
1358  ptr16 += linesize >> 1;
1359  pred &= mask;
1360  *ptr16= pred + ((unsigned)dc << point_transform);
1361  }
1362  if (++x == h) {
1363  x = 0;
1364  y++;
1365  }
1366  }
1367  }
1368  } else {
1369  for (i = 0; i < nb_components; i++) {
1370  uint8_t *ptr;
1371  uint16_t *ptr16;
1372  int n, h, v, x, y, c, j, linesize, dc;
1373  n = s->nb_blocks[i];
1374  c = s->comp_index[i];
1375  h = s->h_scount[i];
1376  v = s->v_scount[i];
1377  x = 0;
1378  y = 0;
1379  linesize = s->linesize[c];
1380 
1381  if(bits>8) linesize /= 2;
1382 
1383  for (j = 0; j < n; j++) {
1384  int pred;
1385 
1386  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1387  if (ret < 0)
1388  return ret;
1389 
1390  if ( h * mb_x + x >= s->width
1391  || v * mb_y + y >= s->height) {
1392  // Nothing to do
1393  } else if (bits<=8) {
1394  ptr = s->picture_ptr->data[c] +
1395  (linesize * (v * mb_y + y)) +
1396  (h * mb_x + x); //FIXME optimize this crap
1397  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1398 
1399  pred &= mask;
1400  *ptr = pred + ((unsigned)dc << point_transform);
1401  }else{
1402  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1403  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1404 
1405  pred &= mask;
1406  *ptr16= pred + ((unsigned)dc << point_transform);
1407  }
1408 
1409  if (++x == h) {
1410  x = 0;
1411  y++;
1412  }
1413  }
1414  }
1415  }
1416  if (s->restart_interval && !--s->restart_count) {
1417  align_get_bits(&s->gb);
1418  skip_bits(&s->gb, 16); /* skip RSTn */
1419  }
1420  }
1421  }
1422  return 0;
1423 }
1424 
1426  uint8_t *dst, const uint8_t *src,
1427  int linesize, int lowres)
1428 {
1429  switch (lowres) {
1430  case 0: s->copy_block(dst, src, linesize, 8);
1431  break;
1432  case 1: copy_block4(dst, src, linesize, linesize, 4);
1433  break;
1434  case 2: copy_block2(dst, src, linesize, linesize, 2);
1435  break;
1436  case 3: *dst = *src;
1437  break;
1438  }
1439 }
1440 
1441 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1442 {
1443  int block_x, block_y;
1444  int size = 8 >> s->avctx->lowres;
1445  if (s->bits > 8) {
1446  for (block_y=0; block_y<size; block_y++)
1447  for (block_x=0; block_x<size; block_x++)
1448  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1449  } else {
1450  for (block_y=0; block_y<size; block_y++)
1451  for (block_x=0; block_x<size; block_x++)
1452  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1453  }
1454 }
1455 
1456 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1457  int Al, const uint8_t *mb_bitmask,
1458  int mb_bitmask_size,
1459  const AVFrame *reference)
1460 {
1461  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1462  uint8_t *data[MAX_COMPONENTS];
1463  const uint8_t *reference_data[MAX_COMPONENTS];
1464  int linesize[MAX_COMPONENTS];
1465  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1466  int bytes_per_pixel = 1 + (s->bits > 8);
1467 
1468  if (mb_bitmask) {
1469  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1470  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1471  return AVERROR_INVALIDDATA;
1472  }
1473  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1474  }
1475 
1476  s->restart_count = 0;
1477 
1478  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1479  &chroma_v_shift);
1480  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1481  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1482 
1483  for (i = 0; i < nb_components; i++) {
1484  int c = s->comp_index[i];
1485  data[c] = s->picture_ptr->data[c];
1486  reference_data[c] = reference ? reference->data[c] : NULL;
1487  linesize[c] = s->linesize[c];
1488  s->coefs_finished[c] |= 1;
1489  }
1490 
1491  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1492  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1493  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1494 
1495  if (s->restart_interval && !s->restart_count)
1496  s->restart_count = s->restart_interval;
1497 
1498  if (get_bits_left(&s->gb) < 0) {
1499  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1500  -get_bits_left(&s->gb));
1501  return AVERROR_INVALIDDATA;
1502  }
1503  for (i = 0; i < nb_components; i++) {
1504  uint8_t *ptr;
1505  int n, h, v, x, y, c, j;
1506  int block_offset;
1507  n = s->nb_blocks[i];
1508  c = s->comp_index[i];
1509  h = s->h_scount[i];
1510  v = s->v_scount[i];
1511  x = 0;
1512  y = 0;
1513  for (j = 0; j < n; j++) {
1514  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1515  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1516 
1517  if (s->interlaced && s->bottom_field)
1518  block_offset += linesize[c] >> 1;
1519  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1520  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1521  ptr = data[c] + block_offset;
1522  } else
1523  ptr = NULL;
1524  if (!s->progressive) {
1525  if (copy_mb) {
1526  if (ptr)
1527  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1528  linesize[c], s->avctx->lowres);
1529 
1530  } else {
1531  s->bdsp.clear_block(s->block);
1532  if (decode_block(s, s->block, i,
1533  s->dc_index[i], s->ac_index[i],
1534  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1535  av_log(s->avctx, AV_LOG_ERROR,
1536  "error y=%d x=%d\n", mb_y, mb_x);
1537  return AVERROR_INVALIDDATA;
1538  }
1539  if (ptr && linesize[c]) {
1540  s->idsp.idct_put(ptr, linesize[c], s->block);
1541  if (s->bits & 7)
1542  shift_output(s, ptr, linesize[c]);
1543  }
1544  }
1545  } else {
1546  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1547  (h * mb_x + x);
1548  int16_t *block = s->blocks[c][block_idx];
1549  if (Ah)
1550  block[0] += get_bits1(&s->gb) *
1551  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1552  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1553  s->quant_matrixes[s->quant_sindex[i]],
1554  Al) < 0) {
1555  av_log(s->avctx, AV_LOG_ERROR,
1556  "error y=%d x=%d\n", mb_y, mb_x);
1557  return AVERROR_INVALIDDATA;
1558  }
1559  }
1560  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1561  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1562  mb_x, mb_y, x, y, c, s->bottom_field,
1563  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1564  if (++x == h) {
1565  x = 0;
1566  y++;
1567  }
1568  }
1569  }
1570 
1571  handle_rstn(s, nb_components);
1572  }
1573  }
1574  return 0;
1575 }
1576 
1578  int se, int Ah, int Al)
1579 {
1580  int mb_x, mb_y;
1581  int EOBRUN = 0;
1582  int c = s->comp_index[0];
1583  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1584 
1585  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1586  if (se < ss || se > 63) {
1587  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1588  return AVERROR_INVALIDDATA;
1589  }
1590 
1591  // s->coefs_finished is a bitmask for coefficients coded
1592  // ss and se are parameters telling start and end coefficients
1593  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1594 
1595  s->restart_count = 0;
1596 
1597  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1598  int block_idx = mb_y * s->block_stride[c];
1599  int16_t (*block)[64] = &s->blocks[c][block_idx];
1600  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1601  if (get_bits_left(&s->gb) <= 0) {
1602  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1603  return AVERROR_INVALIDDATA;
1604  }
1605  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1606  int ret;
1607  if (s->restart_interval && !s->restart_count)
1608  s->restart_count = s->restart_interval;
1609 
1610  if (Ah)
1611  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1612  quant_matrix, ss, se, Al, &EOBRUN);
1613  else
1614  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1615  quant_matrix, ss, se, Al, &EOBRUN);
1616 
1617  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1619  if (ret < 0) {
1620  av_log(s->avctx, AV_LOG_ERROR,
1621  "error y=%d x=%d\n", mb_y, mb_x);
1622  return AVERROR_INVALIDDATA;
1623  }
1624 
1625  if (handle_rstn(s, 0))
1626  EOBRUN = 0;
1627  }
1628  }
1629  return 0;
1630 }
1631 
1633 {
1634  int mb_x, mb_y;
1635  int c;
1636  const int bytes_per_pixel = 1 + (s->bits > 8);
1637  const int block_size = s->lossless ? 1 : 8;
1638 
1639  for (c = 0; c < s->nb_components; c++) {
1640  uint8_t *data = s->picture_ptr->data[c];
1641  int linesize = s->linesize[c];
1642  int h = s->h_max / s->h_count[c];
1643  int v = s->v_max / s->v_count[c];
1644  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1645  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1646 
1647  if (~s->coefs_finished[c])
1648  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1649 
1650  if (s->interlaced && s->bottom_field)
1651  data += linesize >> 1;
1652 
1653  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1654  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1655  int block_idx = mb_y * s->block_stride[c];
1656  int16_t (*block)[64] = &s->blocks[c][block_idx];
1657  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1658  s->idsp.idct_put(ptr, linesize, *block);
1659  if (s->bits & 7)
1660  shift_output(s, ptr, linesize);
1661  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1662  }
1663  }
1664  }
1665 }
1666 
1667 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1668  int mb_bitmask_size, const AVFrame *reference)
1669 {
1670  int len, nb_components, i, h, v, predictor, point_transform;
1671  int index, id, ret;
1672  const int block_size = s->lossless ? 1 : 8;
1673  int ilv, prev_shift;
1674 
1675  if (!s->got_picture) {
1676  av_log(s->avctx, AV_LOG_WARNING,
1677  "Can not process SOS before SOF, skipping\n");
1678  return -1;
1679  }
1680 
1681  /* XXX: verify len field validity */
1682  len = get_bits(&s->gb, 16);
1683  nb_components = get_bits(&s->gb, 8);
1684  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1686  "decode_sos: nb_components (%d)",
1687  nb_components);
1688  return AVERROR_PATCHWELCOME;
1689  }
1690  if (len != 6 + 2 * nb_components) {
1691  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1692  return AVERROR_INVALIDDATA;
1693  }
1694  for (i = 0; i < nb_components; i++) {
1695  id = get_bits(&s->gb, 8);
1696  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1697  /* find component index */
1698  for (index = 0; index < s->nb_components; index++)
1699  if (id == s->component_id[index])
1700  break;
1701  if (index == s->nb_components) {
1702  av_log(s->avctx, AV_LOG_ERROR,
1703  "decode_sos: index(%d) out of components\n", index);
1704  return AVERROR_INVALIDDATA;
1705  }
1706  /* Metasoft MJPEG codec has Cb and Cr swapped */
1707  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1708  && nb_components == 3 && s->nb_components == 3 && i)
1709  index = 3 - i;
1710 
1711  s->quant_sindex[i] = s->quant_index[index];
1712  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1713  s->h_scount[i] = s->h_count[index];
1714  s->v_scount[i] = s->v_count[index];
1715 
1716  s->comp_index[i] = index;
1717 
1718  s->dc_index[i] = get_bits(&s->gb, 4);
1719  s->ac_index[i] = get_bits(&s->gb, 4);
1720 
1721  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1722  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1723  goto out_of_range;
1724  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1725  goto out_of_range;
1726  }
1727 
1728  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1729  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1730  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1731  prev_shift = get_bits(&s->gb, 4); /* Ah */
1732  point_transform = get_bits(&s->gb, 4); /* Al */
1733  }else
1734  prev_shift = point_transform = 0;
1735 
1736  if (nb_components > 1) {
1737  /* interleaved stream */
1738  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1739  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1740  } else if (!s->ls) { /* skip this for JPEG-LS */
1741  h = s->h_max / s->h_scount[0];
1742  v = s->v_max / s->v_scount[0];
1743  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1744  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1745  s->nb_blocks[0] = 1;
1746  s->h_scount[0] = 1;
1747  s->v_scount[0] = 1;
1748  }
1749 
1750  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1751  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1752  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1753  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1754  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1755 
1756 
1757  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1758  for (i = s->mjpb_skiptosod; i > 0; i--)
1759  skip_bits(&s->gb, 8);
1760 
1761 next_field:
1762  for (i = 0; i < nb_components; i++)
1763  s->last_dc[i] = (4 << s->bits);
1764 
1765  if (s->avctx->hwaccel) {
1766  int bytes_to_start = get_bits_count(&s->gb) / 8;
1767  av_assert0(bytes_to_start >= 0 &&
1768  s->raw_scan_buffer_size >= bytes_to_start);
1769 
1770  ret = FF_HW_CALL(s->avctx, decode_slice,
1771  s->raw_scan_buffer + bytes_to_start,
1772  s->raw_scan_buffer_size - bytes_to_start);
1773  if (ret < 0)
1774  return ret;
1775 
1776  } else if (s->lossless) {
1777  av_assert0(s->picture_ptr == s->picture);
1778  if (CONFIG_JPEGLS_DECODER && s->ls) {
1779 // for () {
1780 // reset_ls_coding_parameters(s, 0);
1781 
1783  point_transform, ilv)) < 0)
1784  return ret;
1785  } else {
1786  if (s->rgb || s->bayer) {
1787  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1788  return ret;
1789  } else {
1791  point_transform,
1792  nb_components)) < 0)
1793  return ret;
1794  }
1795  }
1796  } else {
1797  if (s->progressive && predictor) {
1798  av_assert0(s->picture_ptr == s->picture);
1800  ilv, prev_shift,
1801  point_transform)) < 0)
1802  return ret;
1803  } else {
1804  if ((ret = mjpeg_decode_scan(s, nb_components,
1805  prev_shift, point_transform,
1806  mb_bitmask, mb_bitmask_size, reference)) < 0)
1807  return ret;
1808  }
1809  }
1810 
1811  if (s->interlaced &&
1812  get_bits_left(&s->gb) > 32 &&
1813  show_bits(&s->gb, 8) == 0xFF) {
1814  GetBitContext bak = s->gb;
1815  align_get_bits(&bak);
1816  if (show_bits(&bak, 16) == 0xFFD1) {
1817  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1818  s->gb = bak;
1819  skip_bits(&s->gb, 16);
1820  s->bottom_field ^= 1;
1821 
1822  goto next_field;
1823  }
1824  }
1825 
1826  emms_c();
1827  return 0;
1828  out_of_range:
1829  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1830  return AVERROR_INVALIDDATA;
1831 }
1832 
1834 {
1835  if (get_bits(&s->gb, 16) != 4)
1836  return AVERROR_INVALIDDATA;
1837  s->restart_interval = get_bits(&s->gb, 16);
1838  s->restart_count = 0;
1839  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1840  s->restart_interval);
1841 
1842  return 0;
1843 }
1844 
1846 {
1847  int len, id, i;
1848 
1849  len = get_bits(&s->gb, 16);
1850  if (len < 2)
1851  return AVERROR_INVALIDDATA;
1852  len -= 2;
1853 
1854  if (len < 4) {
1855  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1856  return AVERROR_INVALIDDATA;
1857  av_log(s->avctx, AV_LOG_VERBOSE, "skipping APPx stub (len=%" PRId32 ")\n", len);
1858  goto out;
1859  }
1860 
1861  if (8 * len > get_bits_left(&s->gb))
1862  return AVERROR_INVALIDDATA;
1863 
1864  id = get_bits_long(&s->gb, 32);
1865  len -= 4;
1866 
1867  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1868  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1869  av_fourcc2str(av_bswap32(id)), id, len);
1870 
1871  /* Buggy AVID, it puts EOI only at every 10th frame. */
1872  /* Also, this fourcc is used by non-avid files too, it holds some
1873  information, but it's always present in AVID-created files. */
1874  if (id == AV_RB32("AVI1")) {
1875  /* structure:
1876  4bytes AVI1
1877  1bytes polarity
1878  1bytes always zero
1879  4bytes field_size
1880  4bytes field_size_less_padding
1881  */
1882  s->buggy_avid = 1;
1883  i = get_bits(&s->gb, 8); len--;
1884  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1885  goto out;
1886  }
1887 
1888  if (id == AV_RB32("JFIF")) {
1889  int t_w, t_h, v1, v2;
1890  if (len < 8)
1891  goto out;
1892  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1893  v1 = get_bits(&s->gb, 8);
1894  v2 = get_bits(&s->gb, 8);
1895  skip_bits(&s->gb, 8);
1896 
1897  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1898  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1899  if ( s->avctx->sample_aspect_ratio.num <= 0
1900  || s->avctx->sample_aspect_ratio.den <= 0) {
1901  s->avctx->sample_aspect_ratio.num = 0;
1902  s->avctx->sample_aspect_ratio.den = 1;
1903  }
1904 
1905  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1906  av_log(s->avctx, AV_LOG_INFO,
1907  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1908  v1, v2,
1909  s->avctx->sample_aspect_ratio.num,
1910  s->avctx->sample_aspect_ratio.den);
1911 
1912  len -= 8;
1913  if (len >= 2) {
1914  t_w = get_bits(&s->gb, 8);
1915  t_h = get_bits(&s->gb, 8);
1916  if (t_w && t_h) {
1917  /* skip thumbnail */
1918  if (len -10 - (t_w * t_h * 3) > 0)
1919  len -= t_w * t_h * 3;
1920  }
1921  len -= 2;
1922  }
1923  goto out;
1924  }
1925 
1926  if ( id == AV_RB32("Adob")
1927  && len >= 8
1928  && show_bits(&s->gb, 8) == 'e'
1929  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1930  skip_bits(&s->gb, 8); /* 'e' */
1931  skip_bits(&s->gb, 16); /* version */
1932  skip_bits(&s->gb, 16); /* flags0 */
1933  skip_bits(&s->gb, 16); /* flags1 */
1934  s->adobe_transform = get_bits(&s->gb, 8);
1935  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1936  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1937  len -= 8;
1938  goto out;
1939  }
1940 
1941  if (id == AV_RB32("LJIF")) {
1942  int rgb = s->rgb;
1943  int pegasus_rct = s->pegasus_rct;
1944  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1945  av_log(s->avctx, AV_LOG_INFO,
1946  "Pegasus lossless jpeg header found\n");
1947  skip_bits(&s->gb, 16); /* version ? */
1948  skip_bits(&s->gb, 16); /* unknown always 0? */
1949  skip_bits(&s->gb, 16); /* unknown always 0? */
1950  skip_bits(&s->gb, 16); /* unknown always 0? */
1951  switch (i=get_bits(&s->gb, 8)) {
1952  case 1:
1953  rgb = 1;
1954  pegasus_rct = 0;
1955  break;
1956  case 2:
1957  rgb = 1;
1958  pegasus_rct = 1;
1959  break;
1960  default:
1961  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1962  }
1963 
1964  len -= 9;
1965  if (s->bayer)
1966  goto out;
1967  if (s->got_picture)
1968  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1969  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1970  goto out;
1971  }
1972 
1973  s->rgb = rgb;
1974  s->pegasus_rct = pegasus_rct;
1975 
1976  goto out;
1977  }
1978  if (id == AV_RL32("colr") && len > 0) {
1979  s->colr = get_bits(&s->gb, 8);
1980  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1981  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1982  len --;
1983  goto out;
1984  }
1985  if (id == AV_RL32("xfrm") && len > 0) {
1986  s->xfrm = get_bits(&s->gb, 8);
1987  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1988  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1989  len --;
1990  goto out;
1991  }
1992 
1993  /* JPS extension by VRex */
1994  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1995  int flags, layout, type;
1996  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1997  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1998 
1999  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
2000  skip_bits(&s->gb, 16); len -= 2; /* block length */
2001  skip_bits(&s->gb, 8); /* reserved */
2002  flags = get_bits(&s->gb, 8);
2003  layout = get_bits(&s->gb, 8);
2004  type = get_bits(&s->gb, 8);
2005  len -= 4;
2006 
2007  av_freep(&s->stereo3d);
2008  s->stereo3d = av_stereo3d_alloc();
2009  if (!s->stereo3d) {
2010  goto out;
2011  }
2012  if (type == 0) {
2013  s->stereo3d->type = AV_STEREO3D_2D;
2014  } else if (type == 1) {
2015  switch (layout) {
2016  case 0x01:
2017  s->stereo3d->type = AV_STEREO3D_LINES;
2018  break;
2019  case 0x02:
2020  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2021  break;
2022  case 0x03:
2023  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2024  break;
2025  }
2026  if (!(flags & 0x04)) {
2027  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2028  }
2029  }
2030  goto out;
2031  }
2032 
2033  /* EXIF metadata */
2034  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2035  int ret;
2036  const uint8_t *aligned;
2037 
2038  skip_bits(&s->gb, 16); // skip padding
2039  len -= 2;
2040 
2041  // init byte wise reading
2042  aligned = align_get_bits(&s->gb);
2043 
2044  ret = av_exif_parse_buffer(s->avctx, aligned, len, &s->exif_metadata, AV_EXIF_TIFF_HEADER);
2045  if (ret < 0) {
2046  av_log(s->avctx, AV_LOG_WARNING, "unable to parse EXIF buffer\n");
2047  goto out;
2048  }
2049 
2050  skip_bits(&s->gb, ret << 3);
2051  len -= ret;
2052 
2053  goto out;
2054  }
2055 
2056  /* Apple MJPEG-A */
2057  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2058  id = get_bits_long(&s->gb, 32);
2059  len -= 4;
2060  /* Apple MJPEG-A */
2061  if (id == AV_RB32("mjpg")) {
2062  /* structure:
2063  4bytes field size
2064  4bytes pad field size
2065  4bytes next off
2066  4bytes quant off
2067  4bytes huff off
2068  4bytes image off
2069  4bytes scan off
2070  4bytes data off
2071  */
2072  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2073  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2074  }
2075  }
2076 
2077  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2078  int id2;
2079  unsigned seqno;
2080  unsigned nummarkers;
2081 
2082  id = get_bits_long(&s->gb, 32);
2083  id2 = get_bits(&s->gb, 24);
2084  len -= 7;
2085  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2086  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2087  goto out;
2088  }
2089 
2090  skip_bits(&s->gb, 8);
2091  seqno = get_bits(&s->gb, 8);
2092  len -= 2;
2093  if (seqno == 0) {
2094  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2095  goto out;
2096  }
2097 
2098  nummarkers = get_bits(&s->gb, 8);
2099  len -= 1;
2100  if (nummarkers == 0) {
2101  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2102  goto out;
2103  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2104  av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
2105  goto out;
2106  } else if (seqno > nummarkers) {
2107  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2108  goto out;
2109  }
2110 
2111  /* Allocate if this is the first APP2 we've seen. */
2112  if (s->iccnum == 0) {
2113  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2114  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2115  return AVERROR(ENOMEM);
2116  }
2117  s->iccnum = nummarkers;
2118  }
2119 
2120  if (s->iccentries[seqno - 1].data) {
2121  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2122  goto out;
2123  }
2124 
2125  s->iccentries[seqno - 1].length = len;
2126  s->iccentries[seqno - 1].data = av_malloc(len);
2127  if (!s->iccentries[seqno - 1].data) {
2128  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2129  return AVERROR(ENOMEM);
2130  }
2131 
2132  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2133  skip_bits(&s->gb, len << 3);
2134  len = 0;
2135  s->iccread++;
2136 
2137  if (s->iccread > s->iccnum)
2138  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2139  }
2140 
2141 out:
2142  /* slow but needed for extreme adobe jpegs */
2143  if (len < 0)
2144  av_log(s->avctx, AV_LOG_ERROR,
2145  "mjpeg: error, decode_app parser read over the end\n");
2146  while (len-- > 0)
2147  skip_bits(&s->gb, 8);
2148 
2149  return 0;
2150 }
2151 
2153 {
2154  int len = get_bits(&s->gb, 16);
2155  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2156  int i;
2157  char *cbuf = av_malloc(len - 1);
2158  if (!cbuf)
2159  return AVERROR(ENOMEM);
2160 
2161  for (i = 0; i < len - 2; i++)
2162  cbuf[i] = get_bits(&s->gb, 8);
2163  if (i > 0 && cbuf[i - 1] == '\n')
2164  cbuf[i - 1] = 0;
2165  else
2166  cbuf[i] = 0;
2167 
2168  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2169  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2170 
2171  /* buggy avid, it puts EOI only at every 10th frame */
2172  if (!strncmp(cbuf, "AVID", 4)) {
2173  parse_avid(s, cbuf, len);
2174  } else if (!strcmp(cbuf, "CS=ITU601"))
2175  s->cs_itu601 = 1;
2176  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2177  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2178  s->flipped = 1;
2179  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2180  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2181  s->multiscope = 2;
2182  }
2183 
2184  av_free(cbuf);
2185  }
2186 
2187  return 0;
2188 }
2189 
2190 /* return the 8 bit start code value and update the search
2191  state. Return -1 if no start code found */
2192 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2193 {
2194  const uint8_t *buf_ptr;
2195  unsigned int v, v2;
2196  int val;
2197  int skipped = 0;
2198 
2199  buf_ptr = *pbuf_ptr;
2200  while (buf_end - buf_ptr > 1) {
2201  v = *buf_ptr++;
2202  v2 = *buf_ptr;
2203  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2204  val = *buf_ptr++;
2205  goto found;
2206  }
2207  skipped++;
2208  }
2209  buf_ptr = buf_end;
2210  val = -1;
2211 found:
2212  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2213  *pbuf_ptr = buf_ptr;
2214  return val;
2215 }
2216 
2218  const uint8_t **buf_ptr, const uint8_t *buf_end,
2219  const uint8_t **unescaped_buf_ptr,
2220  int *unescaped_buf_size)
2221 {
2222  int start_code;
2223  start_code = find_marker(buf_ptr, buf_end);
2224 
2225  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2226  if (!s->buffer)
2227  return AVERROR(ENOMEM);
2228 
2229  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2230  if (start_code == SOS && !s->ls) {
2231  const uint8_t *src = *buf_ptr;
2232  const uint8_t *ptr = src;
2233  uint8_t *dst = s->buffer;
2234 
2235  #define copy_data_segment(skip) do { \
2236  ptrdiff_t length = (ptr - src) - (skip); \
2237  if (length > 0) { \
2238  memcpy(dst, src, length); \
2239  dst += length; \
2240  src = ptr; \
2241  } \
2242  } while (0)
2243 
2244  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2245  ptr = buf_end;
2246  copy_data_segment(0);
2247  } else {
2248  while (ptr < buf_end) {
2249  uint8_t x = *(ptr++);
2250 
2251  if (x == 0xff) {
2252  ptrdiff_t skip = 0;
2253  while (ptr < buf_end && x == 0xff) {
2254  x = *(ptr++);
2255  skip++;
2256  }
2257 
2258  /* 0xFF, 0xFF, ... */
2259  if (skip > 1) {
2261 
2262  /* decrement src as it is equal to ptr after the
2263  * copy_data_segment macro and we might want to
2264  * copy the current value of x later on */
2265  src--;
2266  }
2267 
2268  if (x < RST0 || x > RST7) {
2269  copy_data_segment(1);
2270  if (x)
2271  break;
2272  }
2273  }
2274  }
2275  if (src < ptr)
2276  copy_data_segment(0);
2277  }
2278  #undef copy_data_segment
2279 
2280  *unescaped_buf_ptr = s->buffer;
2281  *unescaped_buf_size = dst - s->buffer;
2282  memset(s->buffer + *unescaped_buf_size, 0,
2284 
2285  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2286  (buf_end - *buf_ptr) - (dst - s->buffer));
2287  } else if (start_code == SOS && s->ls) {
2288  const uint8_t *src = *buf_ptr;
2289  uint8_t *dst = s->buffer;
2290  int bit_count = 0;
2291  int t = 0, b = 0;
2292  PutBitContext pb;
2293 
2294  /* find marker */
2295  while (src + t < buf_end) {
2296  uint8_t x = src[t++];
2297  if (x == 0xff) {
2298  while ((src + t < buf_end) && x == 0xff)
2299  x = src[t++];
2300  if (x & 0x80) {
2301  t -= FFMIN(2, t);
2302  break;
2303  }
2304  }
2305  }
2306  bit_count = t * 8;
2307  init_put_bits(&pb, dst, t);
2308 
2309  /* unescape bitstream */
2310  while (b < t) {
2311  uint8_t x = src[b++];
2312  put_bits(&pb, 8, x);
2313  if (x == 0xFF && b < t) {
2314  x = src[b++];
2315  if (x & 0x80) {
2316  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2317  x &= 0x7f;
2318  }
2319  put_bits(&pb, 7, x);
2320  bit_count--;
2321  }
2322  }
2323  flush_put_bits(&pb);
2324 
2325  *unescaped_buf_ptr = dst;
2326  *unescaped_buf_size = (bit_count + 7) >> 3;
2327  memset(s->buffer + *unescaped_buf_size, 0,
2329  } else {
2330  *unescaped_buf_ptr = *buf_ptr;
2331  *unescaped_buf_size = buf_end - *buf_ptr;
2332  }
2333 
2334  return start_code;
2335 }
2336 
2338 {
2339  int i;
2340 
2341  if (s->iccentries) {
2342  for (i = 0; i < s->iccnum; i++)
2343  av_freep(&s->iccentries[i].data);
2344  av_freep(&s->iccentries);
2345  }
2346 
2347  s->iccread = 0;
2348  s->iccnum = 0;
2349 }
2350 
2352  int *got_frame, const AVPacket *avpkt,
2353  const uint8_t *buf, const int buf_size)
2354 {
2355  MJpegDecodeContext *s = avctx->priv_data;
2356  const uint8_t *buf_end, *buf_ptr;
2357  const uint8_t *unescaped_buf_ptr;
2358  int hshift, vshift;
2359  int unescaped_buf_size;
2360  int start_code;
2361  int index;
2362  int ret = 0;
2363  int is16bit;
2364 
2365  s->force_pal8 = 0;
2366 
2367  s->buf_size = buf_size;
2368 
2369  av_exif_free(&s->exif_metadata);
2370  av_freep(&s->stereo3d);
2371  s->adobe_transform = -1;
2372 
2373  if (s->iccnum != 0)
2375 
2376 redo_for_pal8:
2377  buf_ptr = buf;
2378  buf_end = buf + buf_size;
2379  while (buf_ptr < buf_end) {
2380  /* find start next marker */
2381  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2382  &unescaped_buf_ptr,
2383  &unescaped_buf_size);
2384  /* EOF */
2385  if (start_code < 0) {
2386  break;
2387  } else if (unescaped_buf_size > INT_MAX / 8) {
2388  av_log(avctx, AV_LOG_ERROR,
2389  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2390  start_code, unescaped_buf_size, buf_size);
2391  return AVERROR_INVALIDDATA;
2392  }
2393  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2394  start_code, buf_end - buf_ptr);
2395 
2396  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2397 
2398  if (ret < 0) {
2399  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2400  goto fail;
2401  }
2402 
2403  s->start_code = start_code;
2404  if (avctx->debug & FF_DEBUG_STARTCODE)
2405  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2406 
2407  /* process markers */
2408  if (start_code >= RST0 && start_code <= RST7) {
2409  av_log(avctx, AV_LOG_DEBUG,
2410  "restart marker: %d\n", start_code & 0x0f);
2411  /* APP fields */
2412  } else if (start_code >= APP0 && start_code <= APP15) {
2413  if ((ret = mjpeg_decode_app(s)) < 0)
2414  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2415  av_err2str(ret));
2416  /* Comment */
2417  } else if (start_code == COM) {
2418  ret = mjpeg_decode_com(s);
2419  if (ret < 0)
2420  return ret;
2421  } else if (start_code == DQT) {
2423  if (ret < 0)
2424  return ret;
2425  }
2426 
2427  ret = -1;
2428 
2429  if (!CONFIG_JPEGLS_DECODER &&
2430  (start_code == SOF48 || start_code == LSE)) {
2431  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2432  return AVERROR(ENOSYS);
2433  }
2434 
2435  if (avctx->skip_frame == AVDISCARD_ALL) {
2436  switch(start_code) {
2437  case SOF0:
2438  case SOF1:
2439  case SOF2:
2440  case SOF3:
2441  case SOF48:
2442  break;
2443  default:
2444  goto skip;
2445  }
2446  }
2447 
2448  switch (start_code) {
2449  case SOI:
2450  s->restart_interval = 0;
2451  s->restart_count = 0;
2452  s->raw_image_buffer = buf_ptr;
2453  s->raw_image_buffer_size = buf_end - buf_ptr;
2454  /* nothing to do on SOI */
2455  break;
2456  case DHT:
2457  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2458  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2459  goto fail;
2460  }
2461  break;
2462  case SOF0:
2463  case SOF1:
2464  if (start_code == SOF0)
2466  else
2468  s->lossless = 0;
2469  s->ls = 0;
2470  s->progressive = 0;
2471  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2472  goto fail;
2473  break;
2474  case SOF2:
2476  s->lossless = 0;
2477  s->ls = 0;
2478  s->progressive = 1;
2479  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2480  goto fail;
2481  break;
2482  case SOF3:
2484 #if FF_API_CODEC_PROPS
2488 #endif
2489  s->lossless = 1;
2490  s->ls = 0;
2491  s->progressive = 0;
2492  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2493  goto fail;
2494  break;
2495  case SOF48:
2497 #if FF_API_CODEC_PROPS
2501 #endif
2502  s->lossless = 1;
2503  s->ls = 1;
2504  s->progressive = 0;
2505  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2506  goto fail;
2507  break;
2508  case LSE:
2509  if (!CONFIG_JPEGLS_DECODER ||
2510  (ret = ff_jpegls_decode_lse(s)) < 0)
2511  goto fail;
2512  if (ret == 1)
2513  goto redo_for_pal8;
2514  break;
2515  case EOI:
2516 eoi_parser:
2517  if (!avctx->hwaccel &&
2518  s->progressive && s->cur_scan && s->got_picture)
2520  s->cur_scan = 0;
2521  if (!s->got_picture) {
2522  av_log(avctx, AV_LOG_WARNING,
2523  "Found EOI before any SOF, ignoring\n");
2524  break;
2525  }
2526  if (s->interlaced) {
2527  s->bottom_field ^= 1;
2528  /* if not bottom field, do not output image yet */
2529  if (s->bottom_field == !s->interlace_polarity)
2530  break;
2531  }
2532  if (avctx->hwaccel) {
2533  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2534  if (ret < 0)
2535  return ret;
2536 
2537  av_freep(&s->hwaccel_picture_private);
2538  }
2539  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2540  return ret;
2541  if (s->lossless)
2542  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2543  *got_frame = 1;
2544  s->got_picture = 0;
2545 
2546  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2547  int qp = FFMAX3(s->qscale[0],
2548  s->qscale[1],
2549  s->qscale[2]);
2550 
2551  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2552  }
2553 
2554  goto the_end;
2555  case SOS:
2556  s->raw_scan_buffer = buf_ptr;
2557  s->raw_scan_buffer_size = buf_end - buf_ptr;
2558 
2559  s->cur_scan++;
2560 
2561  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2562  (avctx->err_recognition & AV_EF_EXPLODE))
2563  goto fail;
2564  break;
2565  case DRI:
2566  if ((ret = mjpeg_decode_dri(s)) < 0)
2567  return ret;
2568  break;
2569  case SOF5:
2570  case SOF6:
2571  case SOF7:
2572  case SOF9:
2573  case SOF10:
2574  case SOF11:
2575  case SOF13:
2576  case SOF14:
2577  case SOF15:
2578  case JPG:
2579  av_log(avctx, AV_LOG_ERROR,
2580  "mjpeg: unsupported coding type (%x)\n", start_code);
2581  break;
2582  }
2583 
2584  if (avctx->skip_frame == AVDISCARD_ALL) {
2585  switch(start_code) {
2586  case SOF0:
2587  case SOF1:
2588  case SOF2:
2589  case SOF3:
2590  case SOF48:
2591  s->got_picture = 0;
2592  goto the_end_no_picture;
2593  }
2594  }
2595 
2596 skip:
2597  /* eof process start code */
2598  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2599  av_log(avctx, AV_LOG_DEBUG,
2600  "marker parser used %d bytes (%d bits)\n",
2601  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2602  }
2603  if (s->got_picture && s->cur_scan) {
2604  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2605  goto eoi_parser;
2606  }
2607  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2608  return AVERROR_INVALIDDATA;
2609 fail:
2610  s->got_picture = 0;
2611  return ret;
2612 the_end:
2613 
2614  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2615 
2616  if (AV_RB32(s->upscale_h)) {
2617  int p;
2619  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2620  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2621  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2622  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2623  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2624  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2625  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2626  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2627  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2628  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2629  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2630  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2631  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2632  );
2633  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2634  if (ret)
2635  return ret;
2636 
2637  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2638  for (p = 0; p<s->nb_components; p++) {
2639  uint8_t *line = s->picture_ptr->data[p];
2640  int w = s->width;
2641  int h = s->height;
2642  if (!s->upscale_h[p])
2643  continue;
2644  if (p==1 || p==2) {
2645  w = AV_CEIL_RSHIFT(w, hshift);
2646  h = AV_CEIL_RSHIFT(h, vshift);
2647  }
2648  if (s->upscale_v[p] == 1)
2649  h = (h+1)>>1;
2650  av_assert0(w > 0);
2651  for (int i = 0; i < h; i++) {
2652  if (s->upscale_h[p] == 1) {
2653  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2654  else line[w - 1] = line[(w - 1) / 2];
2655  for (index = w - 2; index > 0; index--) {
2656  if (is16bit)
2657  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2658  else
2659  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2660  }
2661  } else if (s->upscale_h[p] == 2) {
2662  if (is16bit) {
2663  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2664  if (w > 1)
2665  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2666  } else {
2667  line[w - 1] = line[(w - 1) / 3];
2668  if (w > 1)
2669  line[w - 2] = line[w - 1];
2670  }
2671  for (index = w - 3; index > 0; index--) {
2672  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2673  }
2674  } else if (s->upscale_h[p] == 4){
2675  if (is16bit) {
2676  uint16_t *line16 = (uint16_t *) line;
2677  line16[w - 1] = line16[(w - 1) >> 2];
2678  if (w > 1)
2679  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2680  if (w > 2)
2681  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2682  } else {
2683  line[w - 1] = line[(w - 1) >> 2];
2684  if (w > 1)
2685  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2686  if (w > 2)
2687  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2688  }
2689  for (index = w - 4; index > 0; index--)
2690  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2691  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2692  }
2693  line += s->linesize[p];
2694  }
2695  }
2696  }
2697  if (AV_RB32(s->upscale_v)) {
2698  int p;
2700  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2701  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2702  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2703  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2704  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2705  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2706  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2707  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2708  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2709  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2710  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2711  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2712  );
2713  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2714  if (ret)
2715  return ret;
2716 
2717  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2718  for (p = 0; p < s->nb_components; p++) {
2719  uint8_t *dst;
2720  int w = s->width;
2721  int h = s->height;
2722  if (!s->upscale_v[p])
2723  continue;
2724  if (p==1 || p==2) {
2725  w = AV_CEIL_RSHIFT(w, hshift);
2726  h = AV_CEIL_RSHIFT(h, vshift);
2727  }
2728  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2729  for (int i = h - 1; i; i--) {
2730  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2731  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2732  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2733  memcpy(dst, src1, w);
2734  } else {
2735  for (index = 0; index < w; index++)
2736  dst[index] = (src1[index] + src2[index]) >> 1;
2737  }
2738  dst -= s->linesize[p];
2739  }
2740  }
2741  }
2742  if (s->flipped && !s->rgb) {
2743  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2744  if (ret)
2745  return ret;
2746 
2747  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2748  for (index=0; index<s->nb_components; index++) {
2749  int h = frame->height;
2750  if (index && index < 3)
2751  h = AV_CEIL_RSHIFT(h, vshift);
2752  if (frame->data[index]) {
2753  frame->data[index] += (h - 1) * frame->linesize[index];
2754  frame->linesize[index] *= -1;
2755  }
2756  }
2757  }
2758 
2759  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2760  av_assert0(s->nb_components == 3);
2761  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2762  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2763  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2764  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2765  }
2766 
2767  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2768  int w = s->picture_ptr->width;
2769  int h = s->picture_ptr->height;
2770  av_assert0(s->nb_components == 4);
2771  for (int i = 0; i < h; i++) {
2772  int j;
2773  uint8_t *dst[4];
2774  for (index=0; index<4; index++) {
2775  dst[index] = s->picture_ptr->data[index]
2776  + s->picture_ptr->linesize[index]*i;
2777  }
2778  for (j=0; j<w; j++) {
2779  int k = dst[3][j];
2780  int r = dst[0][j] * k;
2781  int g = dst[1][j] * k;
2782  int b = dst[2][j] * k;
2783  dst[0][j] = g*257 >> 16;
2784  dst[1][j] = b*257 >> 16;
2785  dst[2][j] = r*257 >> 16;
2786  }
2787  memset(dst[3], 255, w);
2788  }
2789  }
2790  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2791  int w = s->picture_ptr->width;
2792  int h = s->picture_ptr->height;
2793  av_assert0(s->nb_components == 4);
2794  for (int i = 0; i < h; i++) {
2795  int j;
2796  uint8_t *dst[4];
2797  for (index=0; index<4; index++) {
2798  dst[index] = s->picture_ptr->data[index]
2799  + s->picture_ptr->linesize[index]*i;
2800  }
2801  for (j=0; j<w; j++) {
2802  int k = dst[3][j];
2803  int r = (255 - dst[0][j]) * k;
2804  int g = (128 - dst[1][j]) * k;
2805  int b = (128 - dst[2][j]) * k;
2806  dst[0][j] = r*257 >> 16;
2807  dst[1][j] = (g*257 >> 16) + 128;
2808  dst[2][j] = (b*257 >> 16) + 128;
2809  }
2810  memset(dst[3], 255, w);
2811  }
2812  }
2813 
2814  if (s->stereo3d) {
2816  if (stereo) {
2817  stereo->type = s->stereo3d->type;
2818  stereo->flags = s->stereo3d->flags;
2819  }
2820  av_freep(&s->stereo3d);
2821  }
2822 
2823  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2824  AVFrameSideData *sd;
2825  size_t offset = 0;
2826  int total_size = 0;
2827 
2828  /* Sum size of all parts. */
2829  for (int i = 0; i < s->iccnum; i++)
2830  total_size += s->iccentries[i].length;
2831 
2832  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2833  if (ret < 0) {
2834  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2835  return ret;
2836  }
2837 
2838  if (sd) {
2839  /* Reassemble the parts, which are now in-order. */
2840  for (int i = 0; i < s->iccnum; i++) {
2841  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2842  offset += s->iccentries[i].length;
2843  }
2844  }
2845  }
2846 
2847  if (s->exif_metadata.entries) {
2848  ret = ff_decode_exif_attach_ifd(avctx, frame, &s->exif_metadata);
2849  av_exif_free(&s->exif_metadata);
2850  if (ret < 0)
2851  av_log(avctx, AV_LOG_WARNING, "couldn't attach EXIF metadata\n");
2852  }
2853 
2854  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2855  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2856  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2857  avctx->coded_height > s->orig_height) {
2858  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2859  frame->crop_top = frame->height - avctx->height;
2860  }
2861 
2862 the_end_no_picture:
2863  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2864  buf_end - buf_ptr);
2865  return buf_ptr - buf;
2866 }
2867 
2868 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2869  AVPacket *avpkt)
2870 {
2871  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2872  avpkt, avpkt->data, avpkt->size);
2873 }
2874 
2875 
2876 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2877  * even without having called ff_mjpeg_decode_init(). */
2879 {
2880  MJpegDecodeContext *s = avctx->priv_data;
2881  int i, j;
2882 
2883  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2884  av_log(avctx, AV_LOG_INFO, "Single field\n");
2885  }
2886 
2887  av_frame_free(&s->picture);
2888  s->picture_ptr = NULL;
2889 
2890  av_frame_free(&s->smv_frame);
2891 
2892  av_freep(&s->buffer);
2893  av_freep(&s->stereo3d);
2894  av_freep(&s->ljpeg_buffer);
2895  s->ljpeg_buffer_size = 0;
2896 
2897  for (i = 0; i < 3; i++) {
2898  for (j = 0; j < 4; j++)
2899  ff_vlc_free(&s->vlcs[i][j]);
2900  }
2901  for (i = 0; i < MAX_COMPONENTS; i++) {
2902  av_freep(&s->blocks[i]);
2903  av_freep(&s->last_nnz[i]);
2904  }
2905  av_exif_free(&s->exif_metadata);
2906 
2908 
2909  av_freep(&s->hwaccel_picture_private);
2910  av_freep(&s->jls_state);
2911 
2912  return 0;
2913 }
2914 
2916 {
2917  MJpegDecodeContext *s = avctx->priv_data;
2918  s->got_picture = 0;
2919 
2920  s->smv_next_frame = 0;
2921  av_frame_unref(s->smv_frame);
2922 }
2923 
2924 #if CONFIG_MJPEG_DECODER
2925 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2926 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2927 static const AVOption options[] = {
2928  { "extern_huff", "Use external huffman table.",
2929  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2930  { NULL },
2931 };
2932 
2933 static const AVClass mjpegdec_class = {
2934  .class_name = "MJPEG decoder",
2935  .item_name = av_default_item_name,
2936  .option = options,
2937  .version = LIBAVUTIL_VERSION_INT,
2938 };
2939 
2940 const FFCodec ff_mjpeg_decoder = {
2941  .p.name = "mjpeg",
2942  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2943  .p.type = AVMEDIA_TYPE_VIDEO,
2944  .p.id = AV_CODEC_ID_MJPEG,
2945  .priv_data_size = sizeof(MJpegDecodeContext),
2949  .flush = decode_flush,
2950  .p.capabilities = AV_CODEC_CAP_DR1,
2951  .p.max_lowres = 3,
2952  .p.priv_class = &mjpegdec_class,
2953  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2954  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2957  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2958 #if CONFIG_MJPEG_NVDEC_HWACCEL
2959  HWACCEL_NVDEC(mjpeg),
2960 #endif
2961 #if CONFIG_MJPEG_VAAPI_HWACCEL
2962  HWACCEL_VAAPI(mjpeg),
2963 #endif
2964  NULL
2965  },
2966 };
2967 #endif
2968 #if CONFIG_THP_DECODER
2969 const FFCodec ff_thp_decoder = {
2970  .p.name = "thp",
2971  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
2972  .p.type = AVMEDIA_TYPE_VIDEO,
2973  .p.id = AV_CODEC_ID_THP,
2974  .priv_data_size = sizeof(MJpegDecodeContext),
2978  .flush = decode_flush,
2979  .p.capabilities = AV_CODEC_CAP_DR1,
2980  .p.max_lowres = 3,
2981  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2982 };
2983 #endif
2984 
2985 #if CONFIG_SMVJPEG_DECODER
2986 // SMV JPEG just stacks several output frames into one JPEG picture
2987 // we handle that by setting up the cropping parameters appropriately
2988 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
2989 {
2990  MJpegDecodeContext *s = avctx->priv_data;
2991 
2992  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2993 
2994  frame->width = avctx->coded_width;
2995  frame->height = avctx->coded_height;
2996  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2997  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2998 
2999  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3000  s->smv_frame->pts += s->smv_frame->duration;
3001  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3002 
3003  if (s->smv_next_frame == 0)
3004  av_frame_unref(s->smv_frame);
3005 }
3006 
3007 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3008 {
3009  MJpegDecodeContext *s = avctx->priv_data;
3010  AVPacket *const pkt = avctx->internal->in_pkt;
3011  int got_frame = 0;
3012  int ret;
3013 
3014  if (s->smv_next_frame > 0)
3015  goto return_frame;
3016 
3017  ret = ff_decode_get_packet(avctx, pkt);
3018  if (ret < 0)
3019  return ret;
3020 
3021  av_frame_unref(s->smv_frame);
3022 
3023  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3024  s->smv_frame->pkt_dts = pkt->dts;
3026  if (ret < 0)
3027  return ret;
3028 
3029  if (!got_frame)
3030  return AVERROR(EAGAIN);
3031 
3032  // packet duration covers all the frames in the packet
3033  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3034 
3035 return_frame:
3036  av_assert0(s->smv_frame->buf[0]);
3037  ret = av_frame_ref(frame, s->smv_frame);
3038  if (ret < 0)
3039  return ret;
3040 
3041  smv_process_frame(avctx, frame);
3042  return 0;
3043 }
3044 
3045 const FFCodec ff_smvjpeg_decoder = {
3046  .p.name = "smvjpeg",
3047  CODEC_LONG_NAME("SMV JPEG"),
3048  .p.type = AVMEDIA_TYPE_VIDEO,
3049  .p.id = AV_CODEC_ID_SMVJPEG,
3050  .priv_data_size = sizeof(MJpegDecodeContext),
3053  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3054  .flush = decode_flush,
3055  .p.capabilities = AV_CODEC_CAP_DR1,
3056  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3058 };
3059 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
flags
const SwsFlags flags[]
Definition: swscale.c:61
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:433
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:276
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:245
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:493
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:689
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:360
opt.h
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:764
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:659
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1203
out
FILE * out
Definition: movenc.c:55
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1425
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
APP1
@ APP1
Definition: mjpeg.h:80
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3447
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:977
SOF0
@ SOF0
Definition: mjpeg.h:39
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1398
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:568
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:419
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:260
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:113
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:224
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:767
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:558
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:682
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:209
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:512
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1375
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:247
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3487
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:696
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:238
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1268
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1441
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:122
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1662
fail
#define fail()
Definition: checkasm.h:200
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:109
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2351
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2152
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:59
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:609
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3475
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:191
aligned
static int aligned(int val)
Definition: dashdec.c:171
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:879
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1638
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:212
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1060
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:185
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:346
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:102
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:355
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2337
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2878
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:331
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:529
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2410
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:530
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1632
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:195
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
Definition: mjpegdec.c:817
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:241
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
options
Definition: swscale.c:43
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:231
MJpegDecodeContext
Definition: mjpegdec.h:55
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1456
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:995
lowres
static int lowres
Definition: ffplay.c:330
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1577
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:646
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1697
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1720
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1095
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:559
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2868
av_bswap32
#define av_bswap32
Definition: bswap.h:47
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:896
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1667
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2114
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
SOF15
@ SOF15
Definition: mjpeg.h:54
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:174
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:557
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:290
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2192
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:177
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2315
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:832
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:369
VD
#define VD
Definition: amfdec.c:671
src2
const pixel * src2
Definition: h264pred_template.c:421
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:215
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1833
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:167
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
decode_flush
static av_cold void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2915
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1382
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:676
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:750
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1878
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:969
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:35
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:555
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1379
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:367
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2217
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:431
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:354
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:243
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:789
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1618
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:264
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1374
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:299
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1845
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:535
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1637
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:46
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347