FFmpeg
utvideodec.c
Go to the documentation of this file.
1 /*
2  * Ut Video decoder
3  * Copyright (c) 2011 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video decoder
25  */
26 
27 #include <inttypes.h>
28 #include <stdlib.h>
29 
30 #define CACHED_BITSTREAM_READER !ARCH_X86_32
31 #define UNCHECKED_BITSTREAM_READER 1
32 
33 #include "libavutil/intreadwrite.h"
34 #include "libavutil/pixdesc.h"
35 #include "avcodec.h"
36 #include "bswapdsp.h"
37 #include "bytestream.h"
38 #include "codec_internal.h"
39 #include "get_bits.h"
40 #include "lossless_videodsp.h"
41 #include "thread.h"
42 #include "utvideo.h"
43 #include "utvideodsp.h"
44 
45 typedef struct UtvideoContext {
50 
52  int planes;
53  int slices;
57  int pro;
58  int pack;
59 
60  uint8_t *slice_bits;
62  void *buffer;
63 
64  const uint8_t *packed_stream[4][256];
65  size_t packed_stream_size[4][256];
66  const uint8_t *control_stream[4][256];
67  size_t control_stream_size[4][256];
69 
70 typedef struct HuffEntry {
71  uint8_t len;
72  uint16_t sym;
73 } HuffEntry;
74 
75 static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc,
76  VLC_MULTI *multi, int *fsym, unsigned nb_elems)
77 {
78  int i;
79  HuffEntry he[1024];
80  uint8_t bits[1024];
81  uint16_t codes_count[33] = { 0 };
82 
83  *fsym = -1;
84  for (i = 0; i < nb_elems; i++) {
85  if (src[i] == 0) {
86  *fsym = i;
87  return 0;
88  } else if (src[i] == 255) {
89  bits[i] = 0;
90  } else if (src[i] <= 32) {
91  bits[i] = src[i];
92  } else
93  return AVERROR_INVALIDDATA;
94 
95  codes_count[bits[i]]++;
96  }
97  if (codes_count[0] == nb_elems)
98  return AVERROR_INVALIDDATA;
99 
100  /* For Ut Video, longer codes are to the left of the tree and
101  * for codes with the same length the symbol is descending from
102  * left to right. So after the next loop --codes_count[i] will
103  * be the index of the first (lowest) symbol of length i when
104  * indexed by the position in the tree with left nodes being first. */
105  for (int i = 31; i >= 0; i--)
106  codes_count[i] += codes_count[i + 1];
107 
108  for (unsigned i = 0; i < nb_elems; i++)
109  he[--codes_count[bits[i]]] = (HuffEntry) { bits[i], i };
110 
111 #define VLC_BITS 11
112  return ff_vlc_init_multi_from_lengths(vlc, multi, VLC_BITS, nb_elems, codes_count[0],
113  &he[0].len, sizeof(*he),
114  &he[0].sym, sizeof(*he), 2, 0, 0, c->avctx);
115 }
116 
117 #define READ_PLANE(b, end) \
118 { \
119  buf = !use_pred ? dest : c->buffer; \
120  i = 0; \
121  for (; CACHED_BITSTREAM_READER && i < width-end && get_bits_left(&gb) > 0;) {\
122  ret = get_vlc_multi(&gb, (uint8_t *)buf + i * b, multi.table, \
123  vlc.table, VLC_BITS, 3, b); \
124  if (ret > 0) \
125  i += ret; \
126  if (ret <= 0) \
127  goto fail; \
128  } \
129  for (; i < width && get_bits_left(&gb) > 0; i++) \
130  buf[i] = get_vlc2(&gb, vlc.table, VLC_BITS, 3); \
131  if (use_pred) { \
132  if (b == 2) \
133  c->llviddsp.add_left_pred_int16((uint16_t *)dest, (const uint16_t *)buf, 0x3ff, width, prev); \
134  else \
135  c->llviddsp.add_left_pred((uint8_t *)dest, (const uint8_t *)buf, width, prev); \
136  } \
137  prev = dest[width-1]; \
138  dest += stride; \
139 }
140 
141 static int decode_plane10(UtvideoContext *c, int plane_no,
142  uint16_t *dst, ptrdiff_t stride,
143  int width, int height,
144  const uint8_t *src, const uint8_t *huff,
145  int use_pred)
146 {
147  int i, j, slice, pix, ret;
148  int sstart, send;
149  VLC_MULTI multi;
150  VLC vlc;
151  GetBitContext gb;
152  int prev, fsym;
153 
154  if ((ret = build_huff(c, huff, &vlc, &multi, &fsym, 1024)) < 0) {
155  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
156  return ret;
157  }
158  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
159  send = 0;
160  for (slice = 0; slice < c->slices; slice++) {
161  uint16_t *dest;
162 
163  sstart = send;
164  send = (height * (slice + 1) / c->slices);
165  dest = dst + sstart * stride;
166 
167  prev = 0x200;
168  for (j = sstart; j < send; j++) {
169  for (i = 0; i < width; i++) {
170  pix = fsym;
171  if (use_pred) {
172  prev += pix;
173  prev &= 0x3FF;
174  pix = prev;
175  }
176  dest[i] = pix;
177  }
178  dest += stride;
179  }
180  }
181  return 0;
182  }
183 
184  send = 0;
185  for (slice = 0; slice < c->slices; slice++) {
186  uint16_t *dest, *buf;
187  int slice_data_start, slice_data_end, slice_size;
188 
189  sstart = send;
190  send = (height * (slice + 1) / c->slices);
191  dest = dst + sstart * stride;
192 
193  // slice offset and size validation was done earlier
194  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
195  slice_data_end = AV_RL32(src + slice * 4);
196  slice_size = slice_data_end - slice_data_start;
197 
198  if (!slice_size) {
199  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
200  "yet a slice has a length of zero.\n");
201  goto fail;
202  }
203 
204  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
205  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
206  (uint32_t *)(src + slice_data_start + c->slices * 4),
207  (slice_data_end - slice_data_start + 3) >> 2);
208  init_get_bits(&gb, c->slice_bits, slice_size * 8);
209 
210  prev = 0x200;
211  for (j = sstart; j < send; j++)
212  READ_PLANE(2, 3)
213  if (get_bits_left(&gb) > 32)
214  av_log(c->avctx, AV_LOG_WARNING,
215  "%d bits left after decoding slice\n", get_bits_left(&gb));
216  }
217 
218  ff_vlc_free(&vlc);
219  ff_vlc_free_multi(&multi);
220 
221  return 0;
222 fail:
223  ff_vlc_free(&vlc);
224  ff_vlc_free_multi(&multi);
225  return AVERROR_INVALIDDATA;
226 }
227 
228 static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
229 {
230  const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
231 
232  if (interlaced)
233  return ~(1 + 2 * is_luma);
234 
235  return ~is_luma;
236 }
237 
238 static int decode_plane(UtvideoContext *c, int plane_no,
239  uint8_t *dst, ptrdiff_t stride,
240  int width, int height,
241  const uint8_t *src, int use_pred)
242 {
243  int i, j, slice, pix;
244  int sstart, send;
245  VLC_MULTI multi;
246  VLC vlc;
247  GetBitContext gb;
248  int ret, prev, fsym;
249  const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
250 
251  if (c->pack) {
252  send = 0;
253  for (slice = 0; slice < c->slices; slice++) {
254  GetBitContext cbit, pbit;
255  uint8_t *dest, *p;
256 
257  ret = init_get_bits8_le(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]);
258  if (ret < 0)
259  return ret;
260 
261  ret = init_get_bits8_le(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]);
262  if (ret < 0)
263  return ret;
264 
265  sstart = send;
266  send = (height * (slice + 1) / c->slices) & cmask;
267  dest = dst + sstart * stride;
268 
269  if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit))
270  return AVERROR_INVALIDDATA;
271 
272  for (p = dest; p < dst + send * stride; p += 8) {
273  int bits = get_bits_le(&cbit, 3);
274 
275  if (bits == 0) {
276  *(uint64_t *) p = 0;
277  } else {
278  uint32_t sub = 0x80 >> (8 - (bits + 1)), add;
279  int k;
280 
281  if ((bits + 1) * 8 > get_bits_left(&pbit))
282  return AVERROR_INVALIDDATA;
283 
284  for (k = 0; k < 8; k++) {
285 
286  p[k] = get_bits_le(&pbit, bits + 1);
287  add = (~p[k] & sub) << (8 - bits);
288  p[k] -= sub;
289  p[k] += add;
290  }
291  }
292  }
293  }
294 
295  return 0;
296  }
297 
298  if (build_huff(c, src, &vlc, &multi, &fsym, 256)) {
299  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
300  return AVERROR_INVALIDDATA;
301  }
302  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
303  send = 0;
304  for (slice = 0; slice < c->slices; slice++) {
305  uint8_t *dest;
306 
307  sstart = send;
308  send = (height * (slice + 1) / c->slices) & cmask;
309  dest = dst + sstart * stride;
310 
311  prev = 0x80;
312  for (j = sstart; j < send; j++) {
313  for (i = 0; i < width; i++) {
314  pix = fsym;
315  if (use_pred) {
316  prev += (unsigned)pix;
317  pix = prev;
318  }
319  dest[i] = pix;
320  }
321  dest += stride;
322  }
323  }
324  return 0;
325  }
326 
327  src += 256;
328 
329  send = 0;
330  for (slice = 0; slice < c->slices; slice++) {
331  uint8_t *dest, *buf;
332  int slice_data_start, slice_data_end, slice_size;
333 
334  sstart = send;
335  send = (height * (slice + 1) / c->slices) & cmask;
336  dest = dst + sstart * stride;
337 
338  // slice offset and size validation was done earlier
339  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
340  slice_data_end = AV_RL32(src + slice * 4);
341  slice_size = slice_data_end - slice_data_start;
342 
343  if (!slice_size) {
344  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
345  "yet a slice has a length of zero.\n");
346  goto fail;
347  }
348 
349  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
350  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
351  (uint32_t *)(src + slice_data_start + c->slices * 4),
352  (slice_data_end - slice_data_start + 3) >> 2);
353  init_get_bits(&gb, c->slice_bits, slice_size * 8);
354 
355  prev = 0x80;
356  for (j = sstart; j < send; j++)
357  READ_PLANE(1, 5)
358  if (get_bits_left(&gb) > 32)
359  av_log(c->avctx, AV_LOG_WARNING,
360  "%d bits left after decoding slice\n", get_bits_left(&gb));
361  }
362 
363  ff_vlc_free(&vlc);
364  ff_vlc_free_multi(&multi);
365 
366  return 0;
367 fail:
368  ff_vlc_free(&vlc);
369  ff_vlc_free_multi(&multi);
370  return AVERROR_INVALIDDATA;
371 }
372 
373 #undef A
374 #undef B
375 #undef C
376 
377 static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
378  int width, int height, int slices, int rmode)
379 {
380  int i, j, slice;
381  int A, B, C;
382  uint8_t *bsrc;
383  int slice_start, slice_height;
384  const int cmask = ~rmode;
385 
386  for (slice = 0; slice < slices; slice++) {
387  slice_start = ((slice * height) / slices) & cmask;
388  slice_height = ((((slice + 1) * height) / slices) & cmask) -
389  slice_start;
390 
391  if (!slice_height)
392  continue;
393  bsrc = src + slice_start * stride;
394 
395  // first line - left neighbour prediction
396  bsrc[0] += 0x80;
397  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
398  bsrc += stride;
399  if (slice_height <= 1)
400  continue;
401  // second line - first element has top prediction, the rest uses median
402  C = bsrc[-stride];
403  bsrc[0] += C;
404  A = bsrc[0];
405  for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
406  B = bsrc[i - stride];
407  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
408  C = B;
409  A = bsrc[i];
410  }
411  if (width > 16)
412  c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16,
413  bsrc + 16, width - 16, &A, &B);
414 
415  bsrc += stride;
416  // the rest of lines use continuous median prediction
417  for (j = 2; j < slice_height; j++) {
418  c->llviddsp.add_median_pred(bsrc, bsrc - stride,
419  bsrc, width, &A, &B);
420  bsrc += stride;
421  }
422  }
423 }
424 
425 /* UtVideo interlaced mode treats every two lines as a single one,
426  * so restoring function should take care of possible padding between
427  * two parts of the same "line".
428  */
429 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
430  int width, int height, int slices, int rmode)
431 {
432  int i, j, slice;
433  int A, B, C;
434  uint8_t *bsrc;
435  int slice_start, slice_height;
436  const int cmask = ~(rmode ? 3 : 1);
437  const ptrdiff_t stride2 = stride << 1;
438 
439  for (slice = 0; slice < slices; slice++) {
440  slice_start = ((slice * height) / slices) & cmask;
441  slice_height = ((((slice + 1) * height) / slices) & cmask) -
442  slice_start;
443  slice_height >>= 1;
444  if (!slice_height)
445  continue;
446 
447  bsrc = src + slice_start * stride;
448 
449  // first line - left neighbour prediction
450  bsrc[0] += 0x80;
451  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
452  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
453  bsrc += stride2;
454  if (slice_height <= 1)
455  continue;
456  // second line - first element has top prediction, the rest uses median
457  C = bsrc[-stride2];
458  bsrc[0] += C;
459  A = bsrc[0];
460  for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
461  B = bsrc[i - stride2];
462  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
463  C = B;
464  A = bsrc[i];
465  }
466  if (width > 16)
467  c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16,
468  bsrc + 16, width - 16, &A, &B);
469 
470  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
471  bsrc + stride, width, &A, &B);
472  bsrc += stride2;
473  // the rest of lines use continuous median prediction
474  for (j = 2; j < slice_height; j++) {
475  c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
476  bsrc, width, &A, &B);
477  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
478  bsrc + stride, width, &A, &B);
479  bsrc += stride2;
480  }
481  }
482 }
483 
484 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
485  int width, int height, int slices, int rmode)
486 {
487  int i, j, slice;
488  int A, B, C;
489  uint8_t *bsrc;
490  int slice_start, slice_height;
491  const int cmask = ~rmode;
492  int min_width = FFMIN(width, 32);
493 
494  for (slice = 0; slice < slices; slice++) {
495  slice_start = ((slice * height) / slices) & cmask;
496  slice_height = ((((slice + 1) * height) / slices) & cmask) -
497  slice_start;
498 
499  if (!slice_height)
500  continue;
501  bsrc = src + slice_start * stride;
502 
503  // first line - left neighbour prediction
504  bsrc[0] += 0x80;
505  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
506  bsrc += stride;
507  if (slice_height <= 1)
508  continue;
509  for (j = 1; j < slice_height; j++) {
510  // second line - first element has top prediction, the rest uses gradient
511  bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
512  for (i = 1; i < min_width; i++) { /* dsp need align 32 */
513  A = bsrc[i - stride];
514  B = bsrc[i - (stride + 1)];
515  C = bsrc[i - 1];
516  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
517  }
518  if (width > 32)
519  c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32);
520  bsrc += stride;
521  }
522  }
523 }
524 
525 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
526  int width, int height, int slices, int rmode)
527 {
528  int i, j, slice;
529  int A, B, C;
530  uint8_t *bsrc;
531  int slice_start, slice_height;
532  const int cmask = ~(rmode ? 3 : 1);
533  const ptrdiff_t stride2 = stride << 1;
534  int min_width = FFMIN(width, 32);
535 
536  for (slice = 0; slice < slices; slice++) {
537  slice_start = ((slice * height) / slices) & cmask;
538  slice_height = ((((slice + 1) * height) / slices) & cmask) -
539  slice_start;
540  slice_height >>= 1;
541  if (!slice_height)
542  continue;
543 
544  bsrc = src + slice_start * stride;
545 
546  // first line - left neighbour prediction
547  bsrc[0] += 0x80;
548  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
549  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
550  bsrc += stride2;
551  if (slice_height <= 1)
552  continue;
553  for (j = 1; j < slice_height; j++) {
554  // second line - first element has top prediction, the rest uses gradient
555  bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
556  for (i = 1; i < min_width; i++) { /* dsp need align 32 */
557  A = bsrc[i - stride2];
558  B = bsrc[i - (stride2 + 1)];
559  C = bsrc[i - 1];
560  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
561  }
562  if (width > 32)
563  c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32);
564 
565  A = bsrc[-stride];
566  B = bsrc[-(1 + stride + stride - width)];
567  C = bsrc[width - 1];
568  bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
569  for (i = 1; i < width; i++) {
570  A = bsrc[i - stride];
571  B = bsrc[i - (1 + stride)];
572  C = bsrc[i - 1 + stride];
573  bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
574  }
575  bsrc += stride2;
576  }
577  }
578 }
579 
581  int *got_frame, AVPacket *avpkt)
582 {
583  const uint8_t *buf = avpkt->data;
584  int buf_size = avpkt->size;
585  UtvideoContext *c = avctx->priv_data;
586  int i, j;
587  const uint8_t *plane_start[5];
588  int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
589  int ret;
590  GetByteContext gb;
591 
592  if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
593  return ret;
594 
595  /* parse plane structure to get frame flags and validate slice offsets */
596  bytestream2_init(&gb, buf, buf_size);
597 
598  if (c->pack) {
599  const uint8_t *packed_stream;
600  const uint8_t *control_stream;
601  GetByteContext pb;
602  uint32_t nb_cbs;
603  int left;
604 
605  c->frame_info = PRED_GRADIENT << 8;
606 
607  if (bytestream2_get_byte(&gb) != 1)
608  return AVERROR_INVALIDDATA;
609  bytestream2_skip(&gb, 3);
610  c->offset = bytestream2_get_le32(&gb);
611 
612  if (buf_size <= c->offset + 8LL)
613  return AVERROR_INVALIDDATA;
614 
615  bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset);
616 
617  nb_cbs = bytestream2_get_le32(&pb);
618  if (nb_cbs > c->offset)
619  return AVERROR_INVALIDDATA;
620 
621  packed_stream = buf + 8;
622  control_stream = packed_stream + (c->offset - nb_cbs);
623  left = control_stream - packed_stream;
624 
625  for (i = 0; i < c->planes; i++) {
626  for (j = 0; j < c->slices; j++) {
627  c->packed_stream[i][j] = packed_stream;
628  c->packed_stream_size[i][j] = bytestream2_get_le32(&pb);
629  if (c->packed_stream_size[i][j] > left)
630  return AVERROR_INVALIDDATA;
631  left -= c->packed_stream_size[i][j];
632  packed_stream += c->packed_stream_size[i][j];
633  }
634  }
635 
636  left = buf + buf_size - control_stream;
637 
638  for (i = 0; i < c->planes; i++) {
639  for (j = 0; j < c->slices; j++) {
640  c->control_stream[i][j] = control_stream;
641  c->control_stream_size[i][j] = bytestream2_get_le32(&pb);
642  if (c->control_stream_size[i][j] > left)
643  return AVERROR_INVALIDDATA;
644  left -= c->control_stream_size[i][j];
645  control_stream += c->control_stream_size[i][j];
646  }
647  }
648  } else if (c->pro) {
649  if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
650  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
651  return AVERROR_INVALIDDATA;
652  }
653  c->frame_info = bytestream2_get_le32u(&gb);
654  c->slices = ((c->frame_info >> 16) & 0xff) + 1;
655  for (i = 0; i < c->planes; i++) {
656  plane_start[i] = gb.buffer;
657  if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
658  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
659  return AVERROR_INVALIDDATA;
660  }
661  slice_start = 0;
662  slice_end = 0;
663  for (j = 0; j < c->slices; j++) {
664  slice_end = bytestream2_get_le32u(&gb);
665  if (slice_end < 0 || slice_end < slice_start ||
666  bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) {
667  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
668  return AVERROR_INVALIDDATA;
669  }
670  slice_size = slice_end - slice_start;
672  max_slice_size = FFMAX(max_slice_size, slice_size);
673  }
674  plane_size = slice_end;
675  bytestream2_skipu(&gb, plane_size);
676  bytestream2_skipu(&gb, 1024);
677  }
678  plane_start[c->planes] = gb.buffer;
679  } else {
680  for (i = 0; i < c->planes; i++) {
681  plane_start[i] = gb.buffer;
682  if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
683  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
684  return AVERROR_INVALIDDATA;
685  }
686  bytestream2_skipu(&gb, 256);
687  slice_start = 0;
688  slice_end = 0;
689  for (j = 0; j < c->slices; j++) {
690  slice_end = bytestream2_get_le32u(&gb);
691  if (slice_end < 0 || slice_end < slice_start ||
693  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
694  return AVERROR_INVALIDDATA;
695  }
696  slice_size = slice_end - slice_start;
698  max_slice_size = FFMAX(max_slice_size, slice_size);
699  }
700  plane_size = slice_end;
701  bytestream2_skipu(&gb, plane_size);
702  }
703  plane_start[c->planes] = gb.buffer;
704  if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
705  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
706  return AVERROR_INVALIDDATA;
707  }
708  c->frame_info = bytestream2_get_le32u(&gb);
709  }
710  av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
711  c->frame_info);
712 
713  c->frame_pred = (c->frame_info >> 8) & 3;
714 
715  max_slice_size += 4*avctx->width;
716 
717  if (!c->pack) {
718  av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
719  max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
720 
721  if (!c->slice_bits) {
722  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
723  return AVERROR(ENOMEM);
724  }
725  }
726 
727  switch (c->avctx->pix_fmt) {
728  case AV_PIX_FMT_GBRP:
729  case AV_PIX_FMT_GBRAP:
730  for (i = 0; i < c->planes; i++) {
731  ret = decode_plane(c, i, frame->data[i],
732  frame->linesize[i], avctx->width,
733  avctx->height, plane_start[i],
734  c->frame_pred == PRED_LEFT);
735  if (ret)
736  return ret;
737  if (c->frame_pred == PRED_MEDIAN) {
738  if (!c->interlaced) {
740  frame->linesize[i], avctx->width,
741  avctx->height, c->slices, 0);
742  } else {
744  frame->linesize[i],
745  avctx->width, avctx->height, c->slices,
746  0);
747  }
748  } else if (c->frame_pred == PRED_GRADIENT) {
749  if (!c->interlaced) {
751  frame->linesize[i], avctx->width,
752  avctx->height, c->slices, 0);
753  } else {
755  frame->linesize[i],
756  avctx->width, avctx->height, c->slices,
757  0);
758  }
759  }
760  }
761  c->utdsp.restore_rgb_planes(frame->data[2], frame->data[0], frame->data[1],
762  frame->linesize[2], frame->linesize[0], frame->linesize[1],
763  avctx->width, avctx->height);
764  break;
765  case AV_PIX_FMT_GBRAP10:
766  case AV_PIX_FMT_GBRP10:
767  for (i = 0; i < c->planes; i++) {
768  ret = decode_plane10(c, i, (uint16_t *)frame->data[i],
769  frame->linesize[i] / 2, avctx->width,
770  avctx->height, plane_start[i],
771  plane_start[i + 1] - 1024,
772  c->frame_pred == PRED_LEFT);
773  if (ret)
774  return ret;
775  }
776  c->utdsp.restore_rgb_planes10((uint16_t *)frame->data[2], (uint16_t *)frame->data[0], (uint16_t *)frame->data[1],
777  frame->linesize[2] / 2, frame->linesize[0] / 2, frame->linesize[1] / 2,
778  avctx->width, avctx->height);
779  break;
780  case AV_PIX_FMT_YUV420P:
781  for (i = 0; i < 3; i++) {
783  avctx->width >> !!i, avctx->height >> !!i,
784  plane_start[i], c->frame_pred == PRED_LEFT);
785  if (ret)
786  return ret;
787  if (c->frame_pred == PRED_MEDIAN) {
788  if (!c->interlaced) {
790  avctx->width >> !!i, avctx->height >> !!i,
791  c->slices, !i);
792  } else {
794  avctx->width >> !!i,
795  avctx->height >> !!i,
796  c->slices, !i);
797  }
798  } else if (c->frame_pred == PRED_GRADIENT) {
799  if (!c->interlaced) {
801  avctx->width >> !!i, avctx->height >> !!i,
802  c->slices, !i);
803  } else {
805  avctx->width >> !!i,
806  avctx->height >> !!i,
807  c->slices, !i);
808  }
809  }
810  }
811  break;
812  case AV_PIX_FMT_YUV422P:
813  for (i = 0; i < 3; i++) {
815  avctx->width >> !!i, avctx->height,
816  plane_start[i], c->frame_pred == PRED_LEFT);
817  if (ret)
818  return ret;
819  if (c->frame_pred == PRED_MEDIAN) {
820  if (!c->interlaced) {
822  avctx->width >> !!i, avctx->height,
823  c->slices, 0);
824  } else {
826  avctx->width >> !!i, avctx->height,
827  c->slices, 0);
828  }
829  } else if (c->frame_pred == PRED_GRADIENT) {
830  if (!c->interlaced) {
832  avctx->width >> !!i, avctx->height,
833  c->slices, 0);
834  } else {
836  avctx->width >> !!i, avctx->height,
837  c->slices, 0);
838  }
839  }
840  }
841  break;
842  case AV_PIX_FMT_YUV444P:
843  for (i = 0; i < 3; i++) {
845  avctx->width, avctx->height,
846  plane_start[i], c->frame_pred == PRED_LEFT);
847  if (ret)
848  return ret;
849  if (c->frame_pred == PRED_MEDIAN) {
850  if (!c->interlaced) {
852  avctx->width, avctx->height,
853  c->slices, 0);
854  } else {
856  avctx->width, avctx->height,
857  c->slices, 0);
858  }
859  } else if (c->frame_pred == PRED_GRADIENT) {
860  if (!c->interlaced) {
862  avctx->width, avctx->height,
863  c->slices, 0);
864  } else {
866  avctx->width, avctx->height,
867  c->slices, 0);
868  }
869  }
870  }
871  break;
873  for (i = 0; i < 3; i++) {
874  ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
875  avctx->width >> !!i, avctx->height >> !!i,
876  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
877  if (ret)
878  return ret;
879  }
880  break;
882  for (i = 0; i < 3; i++) {
883  ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
884  avctx->width >> !!i, avctx->height,
885  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
886  if (ret)
887  return ret;
888  }
889  break;
890  }
891 
894  if (c->interlaced)
896 
897  *got_frame = 1;
898 
899  /* always report that the buffer was completely consumed */
900  return buf_size;
901 }
902 
904 {
905  UtvideoContext * const c = avctx->priv_data;
906  int h_shift, v_shift;
907 
908  c->avctx = avctx;
909 
910  ff_utvideodsp_init(&c->utdsp);
911  ff_bswapdsp_init(&c->bdsp);
912  ff_llviddsp_init(&c->llviddsp);
913 
914  c->slice_bits_size = 0;
915 
916  switch (avctx->codec_tag) {
917  case MKTAG('U', 'L', 'R', 'G'):
918  c->planes = 3;
919  avctx->pix_fmt = AV_PIX_FMT_GBRP;
920  break;
921  case MKTAG('U', 'L', 'R', 'A'):
922  c->planes = 4;
923  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
924  break;
925  case MKTAG('U', 'L', 'Y', '0'):
926  c->planes = 3;
927  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
928  avctx->colorspace = AVCOL_SPC_BT470BG;
929  break;
930  case MKTAG('U', 'L', 'Y', '2'):
931  c->planes = 3;
932  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
933  avctx->colorspace = AVCOL_SPC_BT470BG;
934  break;
935  case MKTAG('U', 'L', 'Y', '4'):
936  c->planes = 3;
937  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
938  avctx->colorspace = AVCOL_SPC_BT470BG;
939  break;
940  case MKTAG('U', 'Q', 'Y', '0'):
941  c->planes = 3;
942  c->pro = 1;
943  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
944  break;
945  case MKTAG('U', 'Q', 'Y', '2'):
946  c->planes = 3;
947  c->pro = 1;
948  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
949  break;
950  case MKTAG('U', 'Q', 'R', 'G'):
951  c->planes = 3;
952  c->pro = 1;
953  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
954  break;
955  case MKTAG('U', 'Q', 'R', 'A'):
956  c->planes = 4;
957  c->pro = 1;
958  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
959  break;
960  case MKTAG('U', 'L', 'H', '0'):
961  c->planes = 3;
962  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
963  avctx->colorspace = AVCOL_SPC_BT709;
964  break;
965  case MKTAG('U', 'L', 'H', '2'):
966  c->planes = 3;
967  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
968  avctx->colorspace = AVCOL_SPC_BT709;
969  break;
970  case MKTAG('U', 'L', 'H', '4'):
971  c->planes = 3;
972  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
973  avctx->colorspace = AVCOL_SPC_BT709;
974  break;
975  case MKTAG('U', 'M', 'Y', '2'):
976  c->planes = 3;
977  c->pack = 1;
978  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
979  avctx->colorspace = AVCOL_SPC_BT470BG;
980  break;
981  case MKTAG('U', 'M', 'H', '2'):
982  c->planes = 3;
983  c->pack = 1;
984  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
985  avctx->colorspace = AVCOL_SPC_BT709;
986  break;
987  case MKTAG('U', 'M', 'Y', '4'):
988  c->planes = 3;
989  c->pack = 1;
990  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
991  avctx->colorspace = AVCOL_SPC_BT470BG;
992  break;
993  case MKTAG('U', 'M', 'H', '4'):
994  c->planes = 3;
995  c->pack = 1;
996  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
997  avctx->colorspace = AVCOL_SPC_BT709;
998  break;
999  case MKTAG('U', 'M', 'R', 'G'):
1000  c->planes = 3;
1001  c->pack = 1;
1002  avctx->pix_fmt = AV_PIX_FMT_GBRP;
1003  break;
1004  case MKTAG('U', 'M', 'R', 'A'):
1005  c->planes = 4;
1006  c->pack = 1;
1007  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1008  break;
1009  default:
1010  av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
1011  avctx->codec_tag);
1012  return AVERROR_INVALIDDATA;
1013  }
1014 
1015  av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift);
1016  if ((avctx->width & ((1<<h_shift)-1)) ||
1017  (avctx->height & ((1<<v_shift)-1))) {
1018  avpriv_request_sample(avctx, "Odd dimensions");
1019  return AVERROR_PATCHWELCOME;
1020  }
1021 
1022  if (c->pack && avctx->extradata_size >= 16) {
1023  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1024  avctx->extradata[3], avctx->extradata[2],
1025  avctx->extradata[1], avctx->extradata[0]);
1026  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1027  AV_RB32(avctx->extradata + 4));
1028  c->compression = avctx->extradata[8];
1029  if (c->compression != 2)
1030  avpriv_request_sample(avctx, "Unknown compression type");
1031  c->slices = avctx->extradata[9] + 1;
1032  } else if (!c->pro && avctx->extradata_size >= 16) {
1033  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1034  avctx->extradata[3], avctx->extradata[2],
1035  avctx->extradata[1], avctx->extradata[0]);
1036  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1037  AV_RB32(avctx->extradata + 4));
1038  c->frame_info_size = AV_RL32(avctx->extradata + 8);
1039  c->flags = AV_RL32(avctx->extradata + 12);
1040 
1041  if (c->frame_info_size != 4)
1042  avpriv_request_sample(avctx, "Frame info not 4 bytes");
1043  av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1044  c->slices = (c->flags >> 24) + 1;
1045  c->compression = c->flags & 1;
1046  c->interlaced = c->flags & 0x800;
1047  } else if (c->pro && avctx->extradata_size == 8) {
1048  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1049  avctx->extradata[3], avctx->extradata[2],
1050  avctx->extradata[1], avctx->extradata[0]);
1051  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1052  AV_RB32(avctx->extradata + 4));
1053  c->interlaced = 0;
1054  c->frame_info_size = 4;
1055  } else {
1056  av_log(avctx, AV_LOG_ERROR,
1057  "Insufficient extradata size %d, should be at least 16\n",
1058  avctx->extradata_size);
1059  return AVERROR_INVALIDDATA;
1060  }
1061 
1062  c->buffer = av_calloc(avctx->width + 8, c->pro?2:1);
1063  if (!c->buffer)
1064  return AVERROR(ENOMEM);
1065 
1066  return 0;
1067 }
1068 
1070 {
1071  UtvideoContext * const c = avctx->priv_data;
1072 
1073  av_freep(&c->slice_bits);
1074  av_freep(&c->buffer);
1075 
1076  return 0;
1077 }
1078 
1080  .p.name = "utvideo",
1081  CODEC_LONG_NAME("Ut Video"),
1082  .p.type = AVMEDIA_TYPE_VIDEO,
1083  .p.id = AV_CODEC_ID_UTVIDEO,
1084  .priv_data_size = sizeof(UtvideoContext),
1085  .init = decode_init,
1086  .close = decode_end,
1088  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1089 };
UTVideoDSPContext
Definition: utvideodsp.h:27
A
#define A(x)
Definition: vpx_arith.h:28
utvideo.h
bswapdsp.h
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
restore_gradient_planar_il
static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:525
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: utvideodec.c:903
PRED_LEFT
@ PRED_LEFT
Definition: utvideo.h:34
UtvideoContext::buffer
void * buffer
Definition: utvideodec.c:62
GetByteContext
Definition: bytestream.h:33
HuffEntry::len
uint8_t len
Definition: exr.c:95
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
UtvideoContext::llviddsp
LLVidDSPContext llviddsp
Definition: utvideodec.c:49
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: utvideodec.c:1069
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
pixdesc.h
AVPacket::data
uint8_t * data
Definition: packet.h:522
compute_cmask
static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
Definition: utvideodec.c:228
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
UtvideoContext::offset
uint32_t offset
Definition: utvideodec.c:51
FFCodec
Definition: codec_internal.h:127
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:616
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
restore_gradient_planar
static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:484
UtvideoContext::slices
int slices
Definition: utvideodec.c:53
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
decode_plane10
static int decode_plane10(UtvideoContext *c, int plane_no, uint16_t *dst, ptrdiff_t stride, int width, int height, const uint8_t *src, const uint8_t *huff, int use_pred)
Definition: utvideodec.c:141
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
fail
#define fail()
Definition: checkasm.h:179
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
GetBitContext
Definition: get_bits.h:108
READ_PLANE
#define READ_PLANE(b, end)
Definition: utvideodec.c:117
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2990
LLVidDSPContext
Definition: lossless_videodsp.h:28
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: vvcdec.c:694
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:595
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
intreadwrite.h
VLC_MULTI
Definition: vlc.h:48
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1725
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
ff_vlc_free_multi
void ff_vlc_free_multi(VLC_MULTI *vlc)
Definition: vlc.c:575
HuffEntry::sym
uint16_t sym
Definition: exr.c:96
bits
uint8_t bits
Definition: vp3data.h:128
B
#define B
Definition: huffyuv.h:42
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:356
AV_CODEC_ID_UTVIDEO
@ AV_CODEC_ID_UTVIDEO
Definition: codec_id.h:205
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
get_bits.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
UtvideoContext::interlaced
int interlaced
Definition: utvideodec.c:55
frame
static AVFrame * frame
Definition: demux_decode.c:54
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
UtvideoContext::control_stream
const uint8_t * control_stream[4][256]
Definition: utvideodec.c:66
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
UtvideoContext::slice_bits
uint8_t * slice_bits
Definition: utvideodec.c:60
UtvideoContext::frame_pred
int frame_pred
Definition: utvideodec.c:56
ff_vlc_init_multi_from_lengths
int ff_vlc_init_multi_from_lengths(VLC *vlc, VLC_MULTI *multi, int nb_bits, int nb_elems, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc_multi()
Definition: vlc.c:517
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
UtvideoContext::frame_info
uint32_t frame_info
Definition: utvideodec.c:51
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:446
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:523
codec_internal.h
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: utvideodec.c:580
UtvideoContext::packed_stream
const uint8_t * packed_stream[4][256]
Definition: utvideodec.c:64
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
VLC_BITS
#define VLC_BITS
height
#define height
PRED_MEDIAN
@ PRED_MEDIAN
Definition: utvideo.h:36
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
restore_median_planar_il
static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:429
interlaced
uint8_t interlaced
Definition: mxfenc.c:2263
restore_median_planar
static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:377
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
ff_utvideo_decoder
const FFCodec ff_utvideo_decoder
Definition: utvideodec.c:1079
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
UtvideoContext
Definition: utvideodec.c:45
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:603
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
UtvideoContext::pack
int pack
Definition: utvideodec.c:58
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
build_huff
static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc, VLC_MULTI *multi, int *fsym, unsigned nb_elems)
Definition: utvideodec.c:75
mid_pred
#define mid_pred
Definition: mathops.h:98
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
UtvideoContext::frame_info_size
uint32_t frame_info_size
Definition: utvideodec.c:51
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
AVCodecContext
main external API structure.
Definition: avcodec.h:445
VLC
Definition: vlc.h:36
HuffEntry
Definition: exr.c:94
UtvideoContext::compression
int compression
Definition: utvideodec.c:54
UtvideoContext::flags
uint32_t flags
Definition: utvideodec.c:51
decode_plane
static int decode_plane(UtvideoContext *c, int plane_no, uint8_t *dst, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred)
Definition: utvideodec.c:238
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
lossless_videodsp.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
init_get_bits8_le
static int init_get_bits8_le(GetBitContext *s, const uint8_t *buffer, int byte_size)
Definition: get_bits.h:553
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVPacket
This structure stores compressed data.
Definition: packet.h:499
UtvideoContext::slice_bits_size
int slice_bits_size
Definition: utvideodec.c:61
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
utvideodsp.h
ff_utvideodsp_init
av_cold void ff_utvideodsp_init(UTVideoDSPContext *c)
Definition: utvideodsp.c:75
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
UtvideoContext::bdsp
BswapDSPContext bdsp
Definition: utvideodec.c:48
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
UtvideoContext::pro
int pro
Definition: utvideodec.c:57
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
BswapDSPContext
Definition: bswapdsp.h:24
UtvideoContext::avctx
AVCodecContext * avctx
Definition: utvideodec.c:46
UtvideoContext::packed_stream_size
size_t packed_stream_size[4][256]
Definition: utvideodec.c:65
UtvideoContext::control_stream_size
size_t control_stream_size[4][256]
Definition: utvideodec.c:67
UtvideoContext::utdsp
UTVideoDSPContext utdsp
Definition: utvideodec.c:47
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
UtvideoContext::planes
int planes
Definition: utvideodec.c:52
PRED_GRADIENT
@ PRED_GRADIENT
Definition: utvideo.h:35