FFmpeg
diracdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 Marco Gerards <marco@gnu.org>
3  * Copyright (C) 2009 David Conrad
4  * Copyright (C) 2011 Jordi Ortiz
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Dirac Decoder
26  * @author Marco Gerards <marco@gnu.org>, David Conrad, Jordi Ortiz <nenjordi@gmail.com>
27  */
28 
29 #include "libavutil/mem.h"
30 #include "libavutil/mem_internal.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/thread.h"
33 #include "avcodec.h"
34 #include "get_bits.h"
35 #include "codec_internal.h"
36 #include "decode.h"
37 #include "golomb.h"
38 #include "dirac_arith.h"
39 #include "dirac_vlc.h"
40 #include "mpegvideoencdsp.h"
41 #include "dirac_dwt.h"
42 #include "dirac.h"
43 #include "diractab.h"
44 #include "diracdsp.h"
45 #include "videodsp.h"
46 
47 #define EDGE_WIDTH 16
48 
49 /**
50  * The spec limits this to 3 for frame coding, but in practice can be as high as 6
51  */
52 #define MAX_REFERENCE_FRAMES 8
53 #define MAX_DELAY 5 /* limit for main profile for frame coding (TODO: field coding) */
54 #define MAX_FRAMES (MAX_REFERENCE_FRAMES + MAX_DELAY + 1)
55 #define MAX_QUANT 255 /* max quant for VC-2 */
56 #define MAX_BLOCKSIZE 32 /* maximum xblen/yblen we support */
57 
58 /**
59  * DiracBlock->ref flags, if set then the block does MC from the given ref
60  */
61 #define DIRAC_REF_MASK_REF1 1
62 #define DIRAC_REF_MASK_REF2 2
63 #define DIRAC_REF_MASK_GLOBAL 4
64 
65 /**
66  * Value of Picture.reference when Picture is not a reference picture, but
67  * is held for delayed output.
68  */
69 #define DELAYED_PIC_REF 4
70 
71 #define CALC_PADDING(size, depth) \
72  (((size + (1 << depth) - 1) >> depth) << depth)
73 
74 #define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
75 
76 typedef struct {
78  int interpolated[3]; /* 1 if hpel[] is valid */
79  uint8_t *hpel[3][4];
80  uint8_t *hpel_base[3][4];
81  int reference;
82  unsigned picture_number;
83 } DiracFrame;
84 
85 typedef struct {
86  union {
87  int16_t mv[2][2];
88  int16_t dc[3];
89  } u; /* anonymous unions aren't in C99 :( */
90  uint8_t ref;
91 } DiracBlock;
92 
93 typedef struct SubBand {
94  int level;
96  int stride; /* in bytes */
97  int width;
98  int height;
99  int pshift;
100  int quant;
101  uint8_t *ibuf;
102  struct SubBand *parent;
103 
104  /* for low delay */
105  unsigned length;
106  const uint8_t *coeff_data;
107 } SubBand;
108 
109 typedef struct Plane {
111 
112  int width;
113  int height;
114  ptrdiff_t stride;
115 
116  /* block length */
117  uint8_t xblen;
118  uint8_t yblen;
119  /* block separation (block n+1 starts after this many pixels in block n) */
120  uint8_t xbsep;
121  uint8_t ybsep;
122  /* amount of overspill on each edge (half of the overlap between blocks) */
123  uint8_t xoffset;
124  uint8_t yoffset;
125 
127 } Plane;
128 
129 /* Used by Low Delay and High Quality profiles */
130 typedef struct DiracSlice {
132  int slice_x;
133  int slice_y;
134  int bytes;
135 } DiracSlice;
136 
137 typedef struct DiracContext {
146  int64_t frame_number; /* number of the next frame to display */
150 
151  int bit_depth; /* bit depth */
152  int pshift; /* pixel shift = bit_depth > 8 */
153 
154  int zero_res; /* zero residue flag */
155  int is_arith; /* whether coeffs use arith or golomb coding */
156  int core_syntax; /* use core syntax only */
157  int low_delay; /* use the low delay syntax */
158  int hq_picture; /* high quality picture, enables low_delay */
159  int ld_picture; /* use low delay picture, turns on low_delay */
160  int dc_prediction; /* has dc prediction */
161  int globalmc_flag; /* use global motion compensation */
162  int num_refs; /* number of reference pictures */
163 
164  /* wavelet decoding */
165  unsigned wavelet_depth; /* depth of the IDWT */
166  unsigned wavelet_idx;
167 
168  /**
169  * schroedinger older than 1.0.8 doesn't store
170  * quant delta if only one codebook exists in a band
171  */
172  unsigned old_delta_quant;
173  unsigned codeblock_mode;
174 
175  unsigned num_x; /* number of horizontal slices */
176  unsigned num_y; /* number of vertical slices */
177 
178  uint8_t *thread_buf; /* Per-thread buffer for coefficient storage */
179  int threads_num_buf; /* Current # of buffers allocated */
180  int thread_buf_size; /* Each thread has a buffer this size */
181 
184 
185  struct {
186  unsigned width;
187  unsigned height;
189 
190  struct {
191  AVRational bytes; /* average bytes per slice */
192  uint8_t quant[MAX_DWT_LEVELS][4]; /* [DIRAC_STD] E.1 */
193  } lowdelay;
194 
195  struct {
196  unsigned prefix_bytes;
197  uint64_t size_scaler;
198  } highquality;
199 
200  struct {
201  int pan_tilt[2]; /* pan/tilt vector */
202  int zrs[2][2]; /* zoom/rotate/shear matrix */
203  int perspective[2]; /* perspective vector */
204  unsigned zrs_exp;
205  unsigned perspective_exp;
206  } globalmc[2];
207 
208  /* motion compensation */
209  uint8_t mv_precision; /* [DIRAC_STD] REFS_WT_PRECISION */
210  int16_t weight[2]; /* [DIRAC_STD] REF1_WT and REF2_WT */
211  unsigned weight_log2denom; /* [DIRAC_STD] REFS_WT_PRECISION */
212 
213  int blwidth; /* number of blocks (horizontally) */
214  int blheight; /* number of blocks (vertically) */
215  int sbwidth; /* number of superblocks (horizontally) */
216  int sbheight; /* number of superblocks (vertically) */
217 
218  uint8_t *sbsplit;
220 
221  uint8_t *edge_emu_buffer[4];
223 
224  uint16_t *mctmp; /* buffer holding the MC data multiplied by OBMC weights */
225  uint8_t *mcscratch;
227 
229 
230  void (*put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
231  void (*avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
232  void (*add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
235 
238 
242 } DiracContext;
243 
250 };
251 
252 /* magic number division by 3 from schroedinger */
253 static inline int divide3(int x)
254 {
255  return (int)((x+1U)*21845 + 10922) >> 16;
256 }
257 
258 static DiracFrame *remove_frame(DiracFrame *framelist[], unsigned picnum)
259 {
260  DiracFrame *remove_pic = NULL;
261  int i, remove_idx = -1;
262 
263  for (i = 0; framelist[i]; i++)
264  if (framelist[i]->picture_number == picnum) {
265  remove_pic = framelist[i];
266  remove_idx = i;
267  }
268 
269  if (remove_pic)
270  for (i = remove_idx; framelist[i]; i++)
271  framelist[i] = framelist[i+1];
272 
273  return remove_pic;
274 }
275 
276 static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
277 {
278  int i;
279  for (i = 0; i < maxframes; i++)
280  if (!framelist[i]) {
281  framelist[i] = frame;
282  return 0;
283  }
284  return -1;
285 }
286 
288 {
289  int sbwidth = DIVRNDUP(s->seq.width, 4);
290  int sbheight = DIVRNDUP(s->seq.height, 4);
291  int i, w, h, top_padding;
292 
293  /* todo: think more about this / use or set Plane here */
294  for (i = 0; i < 3; i++) {
295  int max_xblen = MAX_BLOCKSIZE >> (i ? s->chroma_x_shift : 0);
296  int max_yblen = MAX_BLOCKSIZE >> (i ? s->chroma_y_shift : 0);
297  w = s->seq.width >> (i ? s->chroma_x_shift : 0);
298  h = s->seq.height >> (i ? s->chroma_y_shift : 0);
299 
300  /* we allocate the max we support here since num decompositions can
301  * change from frame to frame. Stride is aligned to 16 for SIMD, and
302  * 1<<MAX_DWT_LEVELS top padding to avoid if(y>0) in arith decoding
303  * MAX_BLOCKSIZE padding for MC: blocks can spill up to half of that
304  * on each side */
305  top_padding = FFMAX(1<<MAX_DWT_LEVELS, max_yblen/2);
306  w = FFALIGN(CALC_PADDING(w, MAX_DWT_LEVELS), 8); /* FIXME: Should this be 16 for SSE??? */
307  h = top_padding + CALC_PADDING(h, MAX_DWT_LEVELS) + max_yblen/2;
308 
309  s->plane[i].idwt.buf_base = av_calloc(w + max_xblen, h * (2 << s->pshift));
310  s->plane[i].idwt.tmp = av_malloc_array((w+16), 2 << s->pshift);
311  s->plane[i].idwt.buf = s->plane[i].idwt.buf_base + (top_padding*w)*(2 << s->pshift);
312  if (!s->plane[i].idwt.buf_base || !s->plane[i].idwt.tmp)
313  return AVERROR(ENOMEM);
314  }
315 
316  /* fixme: allocate using real stride here */
317  s->sbsplit = av_malloc_array(sbwidth, sbheight);
318  s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
319 
320  if (!s->sbsplit || !s->blmotion)
321  return AVERROR(ENOMEM);
322  return 0;
323 }
324 
326 {
327  int w = s->seq.width;
328  int h = s->seq.height;
329 
330  av_assert0(stride >= w);
331  stride += 64;
332 
333  if (s->buffer_stride >= stride)
334  return 0;
335  s->buffer_stride = 0;
336 
337  av_freep(&s->edge_emu_buffer_base);
338  memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
339  av_freep(&s->mctmp);
340  av_freep(&s->mcscratch);
341 
342  s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
343 
344  s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
345  s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
346 
347  if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
348  return AVERROR(ENOMEM);
349 
350  s->buffer_stride = stride;
351  return 0;
352 }
353 
355 {
356  int i, j, k;
357 
358  for (i = 0; i < MAX_FRAMES; i++) {
359  if (s->all_frames[i].avframe->data[0]) {
360  av_frame_unref(s->all_frames[i].avframe);
361  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
362  }
363 
364  for (j = 0; j < 3; j++)
365  for (k = 1; k < 4; k++)
366  av_freep(&s->all_frames[i].hpel_base[j][k]);
367  }
368 
369  memset(s->ref_frames, 0, sizeof(s->ref_frames));
370  memset(s->delay_frames, 0, sizeof(s->delay_frames));
371 
372  for (i = 0; i < 3; i++) {
373  av_freep(&s->plane[i].idwt.buf_base);
374  av_freep(&s->plane[i].idwt.tmp);
375  }
376 
377  s->buffer_stride = 0;
378  av_freep(&s->sbsplit);
379  av_freep(&s->blmotion);
380  av_freep(&s->edge_emu_buffer_base);
381 
382  av_freep(&s->mctmp);
383  av_freep(&s->mcscratch);
384 }
385 
387 
389 {
390  DiracContext *s = avctx->priv_data;
391  int i, ret;
392 
393  s->avctx = avctx;
394  s->frame_number = -1;
395 
396  s->thread_buf = NULL;
397  s->threads_num_buf = -1;
398  s->thread_buf_size = -1;
399 
400  ff_diracdsp_init(&s->diracdsp);
401  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
402  ff_videodsp_init(&s->vdsp, 8);
403 
404  for (i = 0; i < MAX_FRAMES; i++) {
405  s->all_frames[i].avframe = av_frame_alloc();
406  if (!s->all_frames[i].avframe)
407  return AVERROR(ENOMEM);
408  }
410  if (ret != 0)
411  return AVERROR_UNKNOWN;
412 
413  return 0;
414 }
415 
417 {
418  DiracContext *s = avctx->priv_data;
420  s->seen_sequence_header = 0;
421  s->frame_number = -1;
422 }
423 
425 {
426  DiracContext *s = avctx->priv_data;
427  int i;
428 
429  // Necessary in case dirac_decode_init() failed
430  if (s->all_frames[MAX_FRAMES - 1].avframe)
432  for (i = 0; i < MAX_FRAMES; i++)
433  av_frame_free(&s->all_frames[i].avframe);
434 
435  av_freep(&s->thread_buf);
436  av_freep(&s->slice_params_buf);
437 
438  return 0;
439 }
440 
441 static inline int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
442 {
443  int coeff = dirac_get_se_golomb(gb);
444  const unsigned sign = FFSIGN(coeff);
445  if (coeff)
446  coeff = sign*((sign * coeff * qfactor + qoffset) >> 2);
447  return coeff;
448 }
449 
450 #define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
451 
452 #define UNPACK_ARITH(n, type) \
453  static inline void coeff_unpack_arith_##n(DiracArith *c, int qfactor, int qoffset, \
454  SubBand *b, type *buf, int x, int y) \
455  { \
456  int sign, sign_pred = 0, pred_ctx = CTX_ZPZN_F1; \
457  unsigned coeff; \
458  const int mstride = -(b->stride >> (1+b->pshift)); \
459  if (b->parent) { \
460  const type *pbuf = (type *)b->parent->ibuf; \
461  const int stride = b->parent->stride >> (1+b->parent->pshift); \
462  pred_ctx += !!pbuf[stride * (y>>1) + (x>>1)] << 1; \
463  } \
464  if (b->orientation == subband_hl) \
465  sign_pred = buf[mstride]; \
466  if (x) { \
467  pred_ctx += !(buf[-1] | buf[mstride] | buf[-1 + mstride]); \
468  if (b->orientation == subband_lh) \
469  sign_pred = buf[-1]; \
470  } else { \
471  pred_ctx += !buf[mstride]; \
472  } \
473  coeff = dirac_get_arith_uint(c, pred_ctx, CTX_COEFF_DATA); \
474  if (coeff) { \
475  coeff = (coeff * qfactor + qoffset) >> 2; \
476  sign = dirac_get_arith_bit(c, SIGN_CTX(sign_pred)); \
477  coeff = (coeff ^ -sign) + sign; \
478  } \
479  *buf = coeff; \
480  } \
481 
482 UNPACK_ARITH(8, int16_t)
484 
485 /**
486  * Decode the coeffs in the rectangle defined by left, right, top, bottom
487  * [DIRAC_STD] 13.4.3.2 Codeblock unpacking loop. codeblock()
488  */
489 static inline int codeblock(const DiracContext *s, SubBand *b,
490  GetBitContext *gb, DiracArith *c,
491  int left, int right, int top, int bottom,
492  int blockcnt_one, int is_arith)
493 {
494  int x, y, zero_block;
495  int qoffset, qfactor;
496  uint8_t *buf;
497 
498  /* check for any coded coefficients in this codeblock */
499  if (!blockcnt_one) {
500  if (is_arith)
501  zero_block = dirac_get_arith_bit(c, CTX_ZERO_BLOCK);
502  else
503  zero_block = get_bits1(gb);
504 
505  if (zero_block)
506  return 0;
507  }
508 
509  if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
510  int quant;
511  if (is_arith)
513  else
515  if (quant > INT_MAX - b->quant || b->quant + quant < 0) {
516  av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
517  return AVERROR_INVALIDDATA;
518  }
519  b->quant += quant;
520  }
521 
522  if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
523  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", b->quant);
524  b->quant = 0;
525  return AVERROR_INVALIDDATA;
526  }
527 
528  qfactor = ff_dirac_qscale_tab[b->quant];
529  /* TODO: context pointer? */
530  if (!s->num_refs)
531  qoffset = ff_dirac_qoffset_intra_tab[b->quant] + 2;
532  else
533  qoffset = ff_dirac_qoffset_inter_tab[b->quant] + 2;
534 
535  buf = b->ibuf + top * b->stride;
536  if (is_arith) {
537  for (y = top; y < bottom; y++) {
538  if (c->error)
539  return c->error;
540  for (x = left; x < right; x++) {
541  if (b->pshift) {
542  coeff_unpack_arith_10(c, qfactor, qoffset, b, (int32_t*)(buf)+x, x, y);
543  } else {
544  coeff_unpack_arith_8(c, qfactor, qoffset, b, (int16_t*)(buf)+x, x, y);
545  }
546  }
547  buf += b->stride;
548  }
549  } else {
550  for (y = top; y < bottom; y++) {
551  if (get_bits_left(gb) < 1)
552  return AVERROR_INVALIDDATA;
553  for (x = left; x < right; x++) {
554  int val = coeff_unpack_golomb(gb, qfactor, qoffset);
555  if (b->pshift) {
556  AV_WN32(&buf[4*x], val);
557  } else {
558  AV_WN16(&buf[2*x], val);
559  }
560  }
561  buf += b->stride;
562  }
563  }
564  return 0;
565 }
566 
567 /**
568  * Dirac Specification ->
569  * 13.3 intra_dc_prediction(band)
570  */
571 #define INTRA_DC_PRED(n, type) \
572  static inline void intra_dc_prediction_##n(SubBand *b) \
573  { \
574  type *buf = (type*)b->ibuf; \
575  int x, y; \
576  \
577  for (x = 1; x < b->width; x++) \
578  buf[x] += buf[x-1]; \
579  buf += (b->stride >> (1+b->pshift)); \
580  \
581  for (y = 1; y < b->height; y++) { \
582  buf[0] += buf[-(b->stride >> (1+b->pshift))]; \
583  \
584  for (x = 1; x < b->width; x++) { \
585  int pred = buf[x - 1] + buf[x - (b->stride >> (1+b->pshift))] + buf[x - (b->stride >> (1+b->pshift))-1]; \
586  buf[x] += divide3(pred); \
587  } \
588  buf += (b->stride >> (1+b->pshift)); \
589  } \
590  } \
591 
592 INTRA_DC_PRED(8, int16_t)
593 INTRA_DC_PRED(10, uint32_t)
594 
595 /**
596  * Dirac Specification ->
597  * 13.4.2 Non-skipped subbands. subband_coeffs()
598  */
600  SubBand *b, int is_arith)
601 {
602  int cb_x, cb_y, left, right, top, bottom;
603  DiracArith c;
604  GetBitContext gb;
605  int cb_width = s->codeblock[b->level + (b->orientation != subband_ll)].width;
606  int cb_height = s->codeblock[b->level + (b->orientation != subband_ll)].height;
607  int blockcnt_one = (cb_width + cb_height) == 2;
608  int ret;
609 
610  if (!b->length)
611  return 0;
612 
613  init_get_bits8(&gb, b->coeff_data, b->length);
614 
615  if (is_arith)
616  ff_dirac_init_arith_decoder(&c, &gb, b->length);
617 
618  top = 0;
619  for (cb_y = 0; cb_y < cb_height; cb_y++) {
620  bottom = (b->height * (cb_y+1LL)) / cb_height;
621  left = 0;
622  for (cb_x = 0; cb_x < cb_width; cb_x++) {
623  right = (b->width * (cb_x+1LL)) / cb_width;
624  ret = codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
625  if (ret < 0)
626  return ret;
627  left = right;
628  }
629  top = bottom;
630  }
631 
632  if (b->orientation == subband_ll && s->num_refs == 0) {
633  if (s->pshift) {
634  intra_dc_prediction_10(b);
635  } else {
636  intra_dc_prediction_8(b);
637  }
638  }
639  return 0;
640 }
641 
642 static int decode_subband_arith(AVCodecContext *avctx, void *b)
643 {
644  const DiracContext *s = avctx->priv_data;
645  return decode_subband_internal(s, b, 1);
646 }
647 
648 static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
649 {
650  const DiracContext *s = avctx->priv_data;
651  SubBand **b = arg;
652  return decode_subband_internal(s, *b, 0);
653 }
654 
655 /**
656  * Dirac Specification ->
657  * [DIRAC_STD] 13.4.1 core_transform_data()
658  */
660 {
661  AVCodecContext *avctx = s->avctx;
663  enum dirac_subband orientation;
664  int level, num_bands = 0;
665  int ret[3*MAX_DWT_LEVELS+1];
666  int i;
667  int damaged_count = 0;
668 
669  /* Unpack all subbands at all levels. */
670  for (level = 0; level < s->wavelet_depth; level++) {
671  for (orientation = !!level; orientation < 4; orientation++) {
672  SubBand *b = &s->plane[comp].band[level][orientation];
673  bands[num_bands++] = b;
674 
675  align_get_bits(&s->gb);
676  /* [DIRAC_STD] 13.4.2 subband() */
677  b->length = get_interleaved_ue_golomb(&s->gb);
678  if (b->length) {
679  b->quant = get_interleaved_ue_golomb(&s->gb);
680  if (b->quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
681  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", b->quant);
682  b->quant = 0;
683  return AVERROR_INVALIDDATA;
684  }
685  align_get_bits(&s->gb);
686  b->coeff_data = s->gb.buffer + get_bits_count(&s->gb)/8;
687  if (b->length > FFMAX(get_bits_left(&s->gb)/8, 0)) {
688  b->length = FFMAX(get_bits_left(&s->gb)/8, 0);
689  damaged_count ++;
690  }
691  skip_bits_long(&s->gb, b->length*8);
692  }
693  }
694  /* arithmetic coding has inter-level dependencies, so we can only execute one level at a time */
695  if (s->is_arith)
696  avctx->execute(avctx, decode_subband_arith, &s->plane[comp].band[level][!!level],
697  ret + 3*level + !!level, 4-!!level, sizeof(SubBand));
698  }
699  /* golomb coding has no inter-level dependencies, so we can execute all subbands in parallel */
700  if (!s->is_arith)
701  avctx->execute(avctx, decode_subband_golomb, bands, ret, num_bands, sizeof(SubBand*));
702 
703  for (i = 0; i < s->wavelet_depth * 3 + 1; i++) {
704  if (ret[i] < 0)
705  damaged_count++;
706  }
707  if (damaged_count > (s->wavelet_depth * 3 + 1) /2)
708  return AVERROR_INVALIDDATA;
709 
710  return 0;
711 }
712 
713 #define PARSE_VALUES(type, x, gb, ebits, buf1, buf2) \
714  type *buf = (type *)buf1; \
715  buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
716  if (get_bits_count(gb) >= ebits) \
717  return; \
718  if (buf2) { \
719  buf = (type *)buf2; \
720  buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset); \
721  if (get_bits_count(gb) >= ebits) \
722  return; \
723  } \
724 
725 static void decode_subband(const DiracContext *s, GetBitContext *gb, int quant,
726  int slice_x, int slice_y, int bits_end,
727  const SubBand *b1, const SubBand *b2)
728 {
729  int left = b1->width * slice_x / s->num_x;
730  int right = b1->width *(slice_x+1) / s->num_x;
731  int top = b1->height * slice_y / s->num_y;
732  int bottom = b1->height *(slice_y+1) / s->num_y;
733 
734  int qfactor, qoffset;
735 
736  uint8_t *buf1 = b1->ibuf + top * b1->stride;
737  uint8_t *buf2 = b2 ? b2->ibuf + top * b2->stride: NULL;
738  int x, y;
739 
740  if (quant > (DIRAC_MAX_QUANT_INDEX - 1)) {
741  av_log(s->avctx, AV_LOG_ERROR, "Unsupported quant %d\n", quant);
742  return;
743  }
744  qfactor = ff_dirac_qscale_tab[quant];
745  qoffset = ff_dirac_qoffset_intra_tab[quant] + 2;
746  /* we have to constantly check for overread since the spec explicitly
747  requires this, with the meaning that all remaining coeffs are set to 0 */
748  if (get_bits_count(gb) >= bits_end)
749  return;
750 
751  if (s->pshift) {
752  for (y = top; y < bottom; y++) {
753  for (x = left; x < right; x++) {
754  PARSE_VALUES(int32_t, x, gb, bits_end, buf1, buf2);
755  }
756  buf1 += b1->stride;
757  if (buf2)
758  buf2 += b2->stride;
759  }
760  }
761  else {
762  for (y = top; y < bottom; y++) {
763  for (x = left; x < right; x++) {
764  PARSE_VALUES(int16_t, x, gb, bits_end, buf1, buf2);
765  }
766  buf1 += b1->stride;
767  if (buf2)
768  buf2 += b2->stride;
769  }
770  }
771 }
772 
773 /**
774  * Dirac Specification ->
775  * 13.5.2 Slices. slice(sx,sy)
776  */
777 static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
778 {
779  const DiracContext *s = avctx->priv_data;
780  DiracSlice *slice = arg;
781  GetBitContext *gb = &slice->gb;
782  enum dirac_subband orientation;
783  int level, quant, chroma_bits, chroma_end;
784 
785  int quant_base = get_bits(gb, 7); /*[DIRAC_STD] qindex */
786  int length_bits = av_log2(8 * slice->bytes)+1;
787  int luma_bits = get_bits_long(gb, length_bits);
788  int luma_end = get_bits_count(gb) + FFMIN(luma_bits, get_bits_left(gb));
789 
790  /* [DIRAC_STD] 13.5.5.2 luma_slice_band */
791  for (level = 0; level < s->wavelet_depth; level++)
792  for (orientation = !!level; orientation < 4; orientation++) {
793  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
794  decode_subband(s, gb, quant, slice->slice_x, slice->slice_y, luma_end,
795  &s->plane[0].band[level][orientation], NULL);
796  }
797 
798  /* consume any unused bits from luma */
799  skip_bits_long(gb, get_bits_count(gb) - luma_end);
800 
801  chroma_bits = 8*slice->bytes - 7 - length_bits - luma_bits;
802  chroma_end = get_bits_count(gb) + FFMIN(chroma_bits, get_bits_left(gb));
803  /* [DIRAC_STD] 13.5.5.3 chroma_slice_band */
804  for (level = 0; level < s->wavelet_depth; level++)
805  for (orientation = !!level; orientation < 4; orientation++) {
806  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
807  decode_subband(s, gb, quant, slice->slice_x, slice->slice_y, chroma_end,
808  &s->plane[1].band[level][orientation],
809  &s->plane[2].band[level][orientation]);
810  }
811 
812  return 0;
813 }
814 
815 typedef struct SliceCoeffs {
816  int left;
817  int top;
818  int tot_h;
819  int tot_v;
820  int tot;
821 } SliceCoeffs;
822 
823 static int subband_coeffs(const DiracContext *s, int x, int y, int p,
825 {
826  int level, coef = 0;
827  for (level = 0; level < s->wavelet_depth; level++) {
828  SliceCoeffs *o = &c[level];
829  const SubBand *b = &s->plane[p].band[level][3]; /* orientation doens't matter */
830  o->top = b->height * y / s->num_y;
831  o->left = b->width * x / s->num_x;
832  o->tot_h = ((b->width * (x + 1)) / s->num_x) - o->left;
833  o->tot_v = ((b->height * (y + 1)) / s->num_y) - o->top;
834  o->tot = o->tot_h*o->tot_v;
835  coef += o->tot * (4 - !!level);
836  }
837  return coef;
838 }
839 
840 /**
841  * VC-2 Specification ->
842  * 13.5.3 hq_slice(sx,sy)
843  */
844 static int decode_hq_slice(const DiracContext *s, DiracSlice *slice, uint8_t *tmp_buf)
845 {
846  int i, level, orientation, quant_idx;
847  int qfactor[MAX_DWT_LEVELS][4], qoffset[MAX_DWT_LEVELS][4];
848  GetBitContext *gb = &slice->gb;
849  SliceCoeffs coeffs_num[MAX_DWT_LEVELS];
850 
851  skip_bits_long(gb, 8*s->highquality.prefix_bytes);
852  quant_idx = get_bits(gb, 8);
853 
854  if (quant_idx > DIRAC_MAX_QUANT_INDEX - 1) {
855  av_log(s->avctx, AV_LOG_ERROR, "Invalid quantization index - %i\n", quant_idx);
856  return AVERROR_INVALIDDATA;
857  }
858 
859  /* Slice quantization (slice_quantizers() in the specs) */
860  for (level = 0; level < s->wavelet_depth; level++) {
861  for (orientation = !!level; orientation < 4; orientation++) {
862  const int quant = FFMAX(quant_idx - s->lowdelay.quant[level][orientation], 0);
863  qfactor[level][orientation] = ff_dirac_qscale_tab[quant];
864  qoffset[level][orientation] = ff_dirac_qoffset_intra_tab[quant] + 2;
865  }
866  }
867 
868  /* Luma + 2 Chroma planes */
869  for (i = 0; i < 3; i++) {
870  int coef_num, coef_par, off = 0;
871  int64_t length = s->highquality.size_scaler*get_bits(gb, 8);
872  int64_t bits_end = get_bits_count(gb) + 8*length;
873  const uint8_t *addr = align_get_bits(gb);
874 
875  if (length*8 > get_bits_left(gb)) {
876  av_log(s->avctx, AV_LOG_ERROR, "end too far away\n");
877  return AVERROR_INVALIDDATA;
878  }
879 
880  coef_num = subband_coeffs(s, slice->slice_x, slice->slice_y, i, coeffs_num);
881 
882  if (s->pshift)
883  coef_par = ff_dirac_golomb_read_32bit(addr, length,
884  tmp_buf, coef_num);
885  else
886  coef_par = ff_dirac_golomb_read_16bit(addr, length,
887  tmp_buf, coef_num);
888 
889  if (coef_num > coef_par) {
890  const int start_b = coef_par * (1 << (s->pshift + 1));
891  const int end_b = coef_num * (1 << (s->pshift + 1));
892  memset(&tmp_buf[start_b], 0, end_b - start_b);
893  }
894 
895  for (level = 0; level < s->wavelet_depth; level++) {
896  const SliceCoeffs *c = &coeffs_num[level];
897  for (orientation = !!level; orientation < 4; orientation++) {
898  const SubBand *b1 = &s->plane[i].band[level][orientation];
899  uint8_t *buf = b1->ibuf + c->top * b1->stride + (c->left << (s->pshift + 1));
900 
901  /* Change to c->tot_h <= 4 for AVX2 dequantization */
902  const int qfunc = s->pshift + 2*(c->tot_h <= 2);
903  s->diracdsp.dequant_subband[qfunc](&tmp_buf[off], buf, b1->stride,
904  qfactor[level][orientation],
905  qoffset[level][orientation],
906  c->tot_v, c->tot_h);
907 
908  off += c->tot << (s->pshift + 1);
909  }
910  }
911 
912  skip_bits_long(gb, bits_end - get_bits_count(gb));
913  }
914 
915  return 0;
916 }
917 
918 static int decode_hq_slice_row(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
919 {
920  int i;
921  const DiracContext *s = avctx->priv_data;
922  DiracSlice *slices = ((DiracSlice *)arg) + s->num_x*jobnr;
923  uint8_t *thread_buf = &s->thread_buf[s->thread_buf_size*threadnr];
924  for (i = 0; i < s->num_x; i++)
925  decode_hq_slice(s, &slices[i], thread_buf);
926  return 0;
927 }
928 
929 /**
930  * Dirac Specification ->
931  * 13.5.1 low_delay_transform_data()
932  */
934 {
935  AVCodecContext *avctx = s->avctx;
936  int slice_x, slice_y, bufsize;
937  int64_t coef_buf_size, bytes = 0;
938  const uint8_t *buf;
939  DiracSlice *slices;
941  int slice_num = 0;
942 
943  if (s->slice_params_num_buf != (s->num_x * s->num_y)) {
944  s->slice_params_buf = av_realloc_f(s->slice_params_buf, s->num_x * s->num_y, sizeof(DiracSlice));
945  if (!s->slice_params_buf) {
946  av_log(s->avctx, AV_LOG_ERROR, "slice params buffer allocation failure\n");
947  s->slice_params_num_buf = 0;
948  return AVERROR(ENOMEM);
949  }
950  s->slice_params_num_buf = s->num_x * s->num_y;
951  }
952  slices = s->slice_params_buf;
953 
954  /* 8 becacuse that's how much the golomb reader could overread junk data
955  * from another plane/slice at most, and 512 because SIMD */
956  coef_buf_size = subband_coeffs(s, s->num_x - 1, s->num_y - 1, 0, tmp) + 8;
957  coef_buf_size = (coef_buf_size << (1 + s->pshift)) + 512;
958 
959  if (s->threads_num_buf != avctx->thread_count ||
960  s->thread_buf_size != coef_buf_size) {
961  s->threads_num_buf = avctx->thread_count;
962  s->thread_buf_size = coef_buf_size;
963  s->thread_buf = av_realloc_f(s->thread_buf, avctx->thread_count, s->thread_buf_size);
964  if (!s->thread_buf) {
965  av_log(s->avctx, AV_LOG_ERROR, "thread buffer allocation failure\n");
966  return AVERROR(ENOMEM);
967  }
968  }
969 
970  align_get_bits(&s->gb);
971  /*[DIRAC_STD] 13.5.2 Slices. slice(sx,sy) */
972  buf = s->gb.buffer + get_bits_count(&s->gb)/8;
973  bufsize = get_bits_left(&s->gb);
974 
975  if (s->hq_picture) {
976  int i;
977 
978  for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) {
979  for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) {
980  bytes = s->highquality.prefix_bytes + 1;
981  for (i = 0; i < 3; i++) {
982  if (bytes <= bufsize/8)
983  bytes += buf[bytes] * s->highquality.size_scaler + 1;
984  }
985  if (bytes >= INT_MAX || bytes*8 > bufsize) {
986  av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n");
987  return AVERROR_INVALIDDATA;
988  }
989 
990  slices[slice_num].bytes = bytes;
991  slices[slice_num].slice_x = slice_x;
992  slices[slice_num].slice_y = slice_y;
993  init_get_bits(&slices[slice_num].gb, buf, bufsize);
994  slice_num++;
995 
996  buf += bytes;
997  if (bufsize/8 >= bytes)
998  bufsize -= bytes*8;
999  else
1000  bufsize = 0;
1001  }
1002  }
1003 
1004  if (s->num_x*s->num_y != slice_num) {
1005  av_log(s->avctx, AV_LOG_ERROR, "too few slices\n");
1006  return AVERROR_INVALIDDATA;
1007  }
1008 
1009  avctx->execute2(avctx, decode_hq_slice_row, slices, NULL, s->num_y);
1010  } else {
1011  for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) {
1012  for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) {
1013  bytes = (slice_num+1) * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den
1014  - slice_num * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den;
1015  if (bytes >= INT_MAX || bytes*8 > bufsize) {
1016  av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n");
1017  return AVERROR_INVALIDDATA;
1018  }
1019  slices[slice_num].bytes = bytes;
1020  slices[slice_num].slice_x = slice_x;
1021  slices[slice_num].slice_y = slice_y;
1022  init_get_bits(&slices[slice_num].gb, buf, bufsize);
1023  slice_num++;
1024 
1025  buf += bytes;
1026  if (bufsize/8 >= bytes)
1027  bufsize -= bytes*8;
1028  else
1029  bufsize = 0;
1030  }
1031  }
1032  avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
1033  sizeof(DiracSlice)); /* [DIRAC_STD] 13.5.2 Slices */
1034  }
1035 
1036  if (s->dc_prediction) {
1037  if (s->pshift) {
1038  intra_dc_prediction_10(&s->plane[0].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1039  intra_dc_prediction_10(&s->plane[1].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1040  intra_dc_prediction_10(&s->plane[2].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
1041  } else {
1042  intra_dc_prediction_8(&s->plane[0].band[0][0]);
1043  intra_dc_prediction_8(&s->plane[1].band[0][0]);
1044  intra_dc_prediction_8(&s->plane[2].band[0][0]);
1045  }
1046  }
1047 
1048  return 0;
1049 }
1050 
1052 {
1053  int i, w, h, level, orientation;
1054 
1055  for (i = 0; i < 3; i++) {
1056  Plane *p = &s->plane[i];
1057 
1058  p->width = s->seq.width >> (i ? s->chroma_x_shift : 0);
1059  p->height = s->seq.height >> (i ? s->chroma_y_shift : 0);
1060  p->idwt.width = w = CALC_PADDING(p->width , s->wavelet_depth);
1061  p->idwt.height = h = CALC_PADDING(p->height, s->wavelet_depth);
1062  p->idwt.stride = FFALIGN(p->idwt.width, 8) << (1 + s->pshift);
1063 
1064  for (level = s->wavelet_depth-1; level >= 0; level--) {
1065  w = w>>1;
1066  h = h>>1;
1067  for (orientation = !!level; orientation < 4; orientation++) {
1068  SubBand *b = &p->band[level][orientation];
1069 
1070  b->pshift = s->pshift;
1071  b->ibuf = p->idwt.buf;
1072  b->level = level;
1073  b->stride = p->idwt.stride << (s->wavelet_depth - level);
1074  b->width = w;
1075  b->height = h;
1076  b->orientation = orientation;
1077 
1078  if (orientation & 1)
1079  b->ibuf += w << (1+b->pshift);
1080  if (orientation > 1)
1081  b->ibuf += (b->stride>>1);
1082 
1083  if (level)
1084  b->parent = &p->band[level-1][orientation];
1085  }
1086  }
1087 
1088  if (i > 0) {
1089  p->xblen = s->plane[0].xblen >> s->chroma_x_shift;
1090  p->yblen = s->plane[0].yblen >> s->chroma_y_shift;
1091  p->xbsep = s->plane[0].xbsep >> s->chroma_x_shift;
1092  p->ybsep = s->plane[0].ybsep >> s->chroma_y_shift;
1093  }
1094 
1095  p->xoffset = (p->xblen - p->xbsep)/2;
1096  p->yoffset = (p->yblen - p->ybsep)/2;
1097  }
1098 }
1099 
1100 /**
1101  * Unpack the motion compensation parameters
1102  * Dirac Specification ->
1103  * 11.2 Picture prediction data. picture_prediction()
1104  */
1106 {
1107  static const uint8_t default_blen[] = { 4, 12, 16, 24 };
1108 
1109  GetBitContext *gb = &s->gb;
1110  unsigned idx, ref;
1111 
1112  align_get_bits(gb);
1113  /* [DIRAC_STD] 11.2.2 Block parameters. block_parameters() */
1114  /* Luma and Chroma are equal. 11.2.3 */
1115  idx = get_interleaved_ue_golomb(gb); /* [DIRAC_STD] index */
1116 
1117  if (idx > 4) {
1118  av_log(s->avctx, AV_LOG_ERROR, "Block prediction index too high\n");
1119  return AVERROR_INVALIDDATA;
1120  }
1121 
1122  if (idx == 0) {
1123  s->plane[0].xblen = get_interleaved_ue_golomb(gb);
1124  s->plane[0].yblen = get_interleaved_ue_golomb(gb);
1125  s->plane[0].xbsep = get_interleaved_ue_golomb(gb);
1126  s->plane[0].ybsep = get_interleaved_ue_golomb(gb);
1127  } else {
1128  /*[DIRAC_STD] preset_block_params(index). Table 11.1 */
1129  s->plane[0].xblen = default_blen[idx-1];
1130  s->plane[0].yblen = default_blen[idx-1];
1131  s->plane[0].xbsep = 4 * idx;
1132  s->plane[0].ybsep = 4 * idx;
1133  }
1134  /*[DIRAC_STD] 11.2.4 motion_data_dimensions()
1135  Calculated in function dirac_unpack_block_motion_data */
1136 
1137  if (s->plane[0].xblen % (1 << s->chroma_x_shift) != 0 ||
1138  s->plane[0].yblen % (1 << s->chroma_y_shift) != 0 ||
1139  !s->plane[0].xblen || !s->plane[0].yblen) {
1140  av_log(s->avctx, AV_LOG_ERROR,
1141  "invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
1142  s->plane[0].xblen, s->plane[0].yblen, s->chroma_x_shift, s->chroma_y_shift);
1143  return AVERROR_INVALIDDATA;
1144  }
1145  if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
1146  av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
1147  return AVERROR_INVALIDDATA;
1148  }
1149  if (s->plane[0].xbsep > s->plane[0].xblen || s->plane[0].ybsep > s->plane[0].yblen) {
1150  av_log(s->avctx, AV_LOG_ERROR, "Block separation greater than size\n");
1151  return AVERROR_INVALIDDATA;
1152  }
1153  if (FFMAX(s->plane[0].xblen, s->plane[0].yblen) > MAX_BLOCKSIZE) {
1154  av_log(s->avctx, AV_LOG_ERROR, "Unsupported large block size\n");
1155  return AVERROR_PATCHWELCOME;
1156  }
1157 
1158  /*[DIRAC_STD] 11.2.5 Motion vector precision. motion_vector_precision()
1159  Read motion vector precision */
1160  s->mv_precision = get_interleaved_ue_golomb(gb);
1161  if (s->mv_precision > 3) {
1162  av_log(s->avctx, AV_LOG_ERROR, "MV precision finer than eighth-pel\n");
1163  return AVERROR_INVALIDDATA;
1164  }
1165 
1166  /*[DIRAC_STD] 11.2.6 Global motion. global_motion()
1167  Read the global motion compensation parameters */
1168  s->globalmc_flag = get_bits1(gb);
1169  if (s->globalmc_flag) {
1170  memset(s->globalmc, 0, sizeof(s->globalmc));
1171  /* [DIRAC_STD] pan_tilt(gparams) */
1172  for (ref = 0; ref < s->num_refs; ref++) {
1173  if (get_bits1(gb)) {
1174  s->globalmc[ref].pan_tilt[0] = dirac_get_se_golomb(gb);
1175  s->globalmc[ref].pan_tilt[1] = dirac_get_se_golomb(gb);
1176  }
1177  /* [DIRAC_STD] zoom_rotate_shear(gparams)
1178  zoom/rotation/shear parameters */
1179  if (get_bits1(gb)) {
1180  s->globalmc[ref].zrs_exp = get_interleaved_ue_golomb(gb);
1181  s->globalmc[ref].zrs[0][0] = dirac_get_se_golomb(gb);
1182  s->globalmc[ref].zrs[0][1] = dirac_get_se_golomb(gb);
1183  s->globalmc[ref].zrs[1][0] = dirac_get_se_golomb(gb);
1184  s->globalmc[ref].zrs[1][1] = dirac_get_se_golomb(gb);
1185  } else {
1186  s->globalmc[ref].zrs[0][0] = 1;
1187  s->globalmc[ref].zrs[1][1] = 1;
1188  }
1189  /* [DIRAC_STD] perspective(gparams) */
1190  if (get_bits1(gb)) {
1191  s->globalmc[ref].perspective_exp = get_interleaved_ue_golomb(gb);
1192  s->globalmc[ref].perspective[0] = dirac_get_se_golomb(gb);
1193  s->globalmc[ref].perspective[1] = dirac_get_se_golomb(gb);
1194  }
1195  if (s->globalmc[ref].perspective_exp + (uint64_t)s->globalmc[ref].zrs_exp > 30) {
1196  return AVERROR_INVALIDDATA;
1197  }
1198 
1199  }
1200  }
1201 
1202  /*[DIRAC_STD] 11.2.7 Picture prediction mode. prediction_mode()
1203  Picture prediction mode, not currently used. */
1204  if (get_interleaved_ue_golomb(gb)) {
1205  av_log(s->avctx, AV_LOG_ERROR, "Unknown picture prediction mode\n");
1206  return AVERROR_INVALIDDATA;
1207  }
1208 
1209  /* [DIRAC_STD] 11.2.8 Reference picture weight. reference_picture_weights()
1210  just data read, weight calculation will be done later on. */
1211  s->weight_log2denom = 1;
1212  s->weight[0] = 1;
1213  s->weight[1] = 1;
1214 
1215  if (get_bits1(gb)) {
1216  s->weight_log2denom = get_interleaved_ue_golomb(gb);
1217  if (s->weight_log2denom < 1 || s->weight_log2denom > 8) {
1218  av_log(s->avctx, AV_LOG_ERROR, "weight_log2denom unsupported or invalid\n");
1219  s->weight_log2denom = 1;
1220  return AVERROR_INVALIDDATA;
1221  }
1222  s->weight[0] = dirac_get_se_golomb(gb);
1223  if (s->num_refs == 2)
1224  s->weight[1] = dirac_get_se_golomb(gb);
1225  }
1226  return 0;
1227 }
1228 
1229 /**
1230  * Dirac Specification ->
1231  * 11.3 Wavelet transform data. wavelet_transform()
1232  */
1234 {
1235  GetBitContext *gb = &s->gb;
1236  int i, level;
1237  unsigned tmp;
1238 
1239 #define CHECKEDREAD(dst, cond, errmsg) \
1240  tmp = get_interleaved_ue_golomb(gb); \
1241  if (cond) { \
1242  av_log(s->avctx, AV_LOG_ERROR, errmsg); \
1243  return AVERROR_INVALIDDATA; \
1244  }\
1245  dst = tmp;
1246 
1247  align_get_bits(gb);
1248 
1249  s->zero_res = s->num_refs ? get_bits1(gb) : 0;
1250  if (s->zero_res)
1251  return 0;
1252 
1253  /*[DIRAC_STD] 11.3.1 Transform parameters. transform_parameters() */
1254  CHECKEDREAD(s->wavelet_idx, tmp > 6, "wavelet_idx is too big\n")
1255 
1256  CHECKEDREAD(s->wavelet_depth, tmp > MAX_DWT_LEVELS || tmp < 1, "invalid number of DWT decompositions\n")
1257 
1258  if (!s->low_delay) {
1259  /* Codeblock parameters (core syntax only) */
1260  if (get_bits1(gb)) {
1261  for (i = 0; i <= s->wavelet_depth; i++) {
1262  CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
1263  CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
1264  }
1265 
1266  CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
1267  }
1268  else {
1269  for (i = 0; i <= s->wavelet_depth; i++)
1270  s->codeblock[i].width = s->codeblock[i].height = 1;
1271  }
1272  }
1273  else {
1274  s->num_x = get_interleaved_ue_golomb(gb);
1275  s->num_y = get_interleaved_ue_golomb(gb);
1276  if (s->num_x * s->num_y == 0 || s->num_x * (uint64_t)s->num_y > INT_MAX ||
1277  s->num_x * (uint64_t)s->avctx->width > INT_MAX ||
1278  s->num_y * (uint64_t)s->avctx->height > INT_MAX ||
1279  s->num_x > s->avctx->width ||
1280  s->num_y > s->avctx->height
1281  ) {
1282  av_log(s->avctx,AV_LOG_ERROR,"Invalid numx/y\n");
1283  s->num_x = s->num_y = 0;
1284  return AVERROR_INVALIDDATA;
1285  }
1286  if (s->ld_picture) {
1287  s->lowdelay.bytes.num = get_interleaved_ue_golomb(gb);
1288  s->lowdelay.bytes.den = get_interleaved_ue_golomb(gb);
1289  if (s->lowdelay.bytes.den <= 0) {
1290  av_log(s->avctx,AV_LOG_ERROR,"Invalid lowdelay.bytes.den\n");
1291  return AVERROR_INVALIDDATA;
1292  }
1293  } else if (s->hq_picture) {
1294  s->highquality.prefix_bytes = get_interleaved_ue_golomb(gb);
1295  s->highquality.size_scaler = get_interleaved_ue_golomb(gb);
1296  if (s->highquality.prefix_bytes >= INT_MAX / 8) {
1297  av_log(s->avctx,AV_LOG_ERROR,"too many prefix bytes\n");
1298  return AVERROR_INVALIDDATA;
1299  }
1300  }
1301 
1302  /* [DIRAC_STD] 11.3.5 Quantisation matrices (low-delay syntax). quant_matrix() */
1303  if (get_bits1(gb)) {
1304  av_log(s->avctx,AV_LOG_DEBUG,"Low Delay: Has Custom Quantization Matrix!\n");
1305  /* custom quantization matrix */
1306  for (level = 0; level < s->wavelet_depth; level++) {
1307  for (i = !!level; i < 4; i++) {
1308  s->lowdelay.quant[level][i] = get_interleaved_ue_golomb(gb);
1309  }
1310  }
1311  } else {
1312  if (s->wavelet_depth > 4) {
1313  av_log(s->avctx,AV_LOG_ERROR,"Mandatory custom low delay matrix missing for depth %d\n", s->wavelet_depth);
1314  return AVERROR_INVALIDDATA;
1315  }
1316  /* default quantization matrix */
1317  for (level = 0; level < s->wavelet_depth; level++)
1318  for (i = 0; i < 4; i++) {
1319  s->lowdelay.quant[level][i] = ff_dirac_default_qmat[s->wavelet_idx][level][i];
1320  /* haar with no shift differs for different depths */
1321  if (s->wavelet_idx == 3)
1322  s->lowdelay.quant[level][i] += 4*(s->wavelet_depth-1 - level);
1323  }
1324  }
1325  }
1326  return 0;
1327 }
1328 
1329 static inline int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
1330 {
1331  static const uint8_t avgsplit[7] = { 0, 0, 1, 1, 1, 2, 2 };
1332 
1333  if (!(x|y))
1334  return 0;
1335  else if (!y)
1336  return sbsplit[-1];
1337  else if (!x)
1338  return sbsplit[-stride];
1339 
1340  return avgsplit[sbsplit[-1] + sbsplit[-stride] + sbsplit[-stride-1]];
1341 }
1342 
1343 static inline int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
1344 {
1345  int pred;
1346 
1347  if (!(x|y))
1348  return 0;
1349  else if (!y)
1350  return block[-1].ref & refmask;
1351  else if (!x)
1352  return block[-stride].ref & refmask;
1353 
1354  /* return the majority */
1355  pred = (block[-1].ref & refmask) + (block[-stride].ref & refmask) + (block[-stride-1].ref & refmask);
1356  return (pred >> 1) & refmask;
1357 }
1358 
1359 static inline void pred_block_dc(DiracBlock *block, int stride, int x, int y)
1360 {
1361  int i, n = 0;
1362 
1363  memset(block->u.dc, 0, sizeof(block->u.dc));
1364 
1365  if (x && !(block[-1].ref & 3)) {
1366  for (i = 0; i < 3; i++)
1367  block->u.dc[i] += block[-1].u.dc[i];
1368  n++;
1369  }
1370 
1371  if (y && !(block[-stride].ref & 3)) {
1372  for (i = 0; i < 3; i++)
1373  block->u.dc[i] += block[-stride].u.dc[i];
1374  n++;
1375  }
1376 
1377  if (x && y && !(block[-1-stride].ref & 3)) {
1378  for (i = 0; i < 3; i++)
1379  block->u.dc[i] += block[-1-stride].u.dc[i];
1380  n++;
1381  }
1382 
1383  if (n == 2) {
1384  for (i = 0; i < 3; i++)
1385  block->u.dc[i] = (block->u.dc[i]+1)>>1;
1386  } else if (n == 3) {
1387  for (i = 0; i < 3; i++)
1388  block->u.dc[i] = divide3(block->u.dc[i]);
1389  }
1390 }
1391 
1392 static inline void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
1393 {
1394  int16_t *pred[3];
1395  int refmask = ref+1;
1396  int mask = refmask | DIRAC_REF_MASK_GLOBAL; /* exclude gmc blocks */
1397  int n = 0;
1398 
1399  if (x && (block[-1].ref & mask) == refmask)
1400  pred[n++] = block[-1].u.mv[ref];
1401 
1402  if (y && (block[-stride].ref & mask) == refmask)
1403  pred[n++] = block[-stride].u.mv[ref];
1404 
1405  if (x && y && (block[-stride-1].ref & mask) == refmask)
1406  pred[n++] = block[-stride-1].u.mv[ref];
1407 
1408  switch (n) {
1409  case 0:
1410  block->u.mv[ref][0] = 0;
1411  block->u.mv[ref][1] = 0;
1412  break;
1413  case 1:
1414  block->u.mv[ref][0] = pred[0][0];
1415  block->u.mv[ref][1] = pred[0][1];
1416  break;
1417  case 2:
1418  block->u.mv[ref][0] = (pred[0][0] + pred[1][0] + 1) >> 1;
1419  block->u.mv[ref][1] = (pred[0][1] + pred[1][1] + 1) >> 1;
1420  break;
1421  case 3:
1422  block->u.mv[ref][0] = mid_pred(pred[0][0], pred[1][0], pred[2][0]);
1423  block->u.mv[ref][1] = mid_pred(pred[0][1], pred[1][1], pred[2][1]);
1424  break;
1425  }
1426 }
1427 
1428 static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
1429 {
1430  int ez = s->globalmc[ref].zrs_exp;
1431  int ep = s->globalmc[ref].perspective_exp;
1432  int (*A)[2] = s->globalmc[ref].zrs;
1433  int *b = s->globalmc[ref].pan_tilt;
1434  int *c = s->globalmc[ref].perspective;
1435 
1436  int64_t m = (1<<ep) - (c[0]*(int64_t)x + c[1]*(int64_t)y);
1437  int64_t mx = m * (uint64_t)((A[0][0] * (int64_t)x + A[0][1]*(int64_t)y) + (1LL<<ez) * b[0]);
1438  int64_t my = m * (uint64_t)((A[1][0] * (int64_t)x + A[1][1]*(int64_t)y) + (1LL<<ez) * b[1]);
1439 
1440  block->u.mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep);
1441  block->u.mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep);
1442 }
1443 
1445  int stride, int x, int y)
1446 {
1447  int i;
1448 
1450  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF1);
1451 
1452  if (s->num_refs == 2) {
1454  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF2) << 1;
1455  }
1456 
1457  if (!block->ref) {
1458  pred_block_dc(block, stride, x, y);
1459  for (i = 0; i < 3; i++)
1460  block->u.dc[i] += (unsigned)dirac_get_arith_int(arith+1+i, CTX_DC_F1, CTX_DC_DATA);
1461  return;
1462  }
1463 
1464  if (s->globalmc_flag) {
1466  block->ref ^= dirac_get_arith_bit(arith, CTX_GLOBAL_BLOCK) << 2;
1467  }
1468 
1469  for (i = 0; i < s->num_refs; i++)
1470  if (block->ref & (i+1)) {
1471  if (block->ref & DIRAC_REF_MASK_GLOBAL) {
1472  global_mv(s, block, x, y, i);
1473  } else {
1474  pred_mv(block, stride, x, y, i);
1475  block->u.mv[i][0] += (unsigned)dirac_get_arith_int(arith + 4 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1476  block->u.mv[i][1] += (unsigned)dirac_get_arith_int(arith + 5 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1477  }
1478  }
1479 }
1480 
1481 /**
1482  * Copies the current block to the other blocks covered by the current superblock split mode
1483  */
1485 {
1486  int x, y;
1487  DiracBlock *dst = block;
1488 
1489  for (x = 1; x < size; x++)
1490  dst[x] = *block;
1491 
1492  for (y = 1; y < size; y++) {
1493  dst += stride;
1494  for (x = 0; x < size; x++)
1495  dst[x] = *block;
1496  }
1497 }
1498 
1499 /**
1500  * Dirac Specification ->
1501  * 12. Block motion data syntax
1502  */
1504 {
1505  GetBitContext *gb = &s->gb;
1506  uint8_t *sbsplit = s->sbsplit;
1507  int i, x, y, q, p;
1508  DiracArith arith[8];
1509 
1510  align_get_bits(gb);
1511 
1512  /* [DIRAC_STD] 11.2.4 and 12.2.1 Number of blocks and superblocks */
1513  s->sbwidth = DIVRNDUP(s->seq.width, 4*s->plane[0].xbsep);
1514  s->sbheight = DIVRNDUP(s->seq.height, 4*s->plane[0].ybsep);
1515  s->blwidth = 4 * s->sbwidth;
1516  s->blheight = 4 * s->sbheight;
1517 
1518  /* [DIRAC_STD] 12.3.1 Superblock splitting modes. superblock_split_modes()
1519  decode superblock split modes */
1520  ff_dirac_init_arith_decoder(arith, gb, get_interleaved_ue_golomb(gb)); /* get_interleaved_ue_golomb(gb) is the length */
1521  for (y = 0; y < s->sbheight; y++) {
1522  for (x = 0; x < s->sbwidth; x++) {
1523  unsigned int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
1524  if (split > 2)
1525  return AVERROR_INVALIDDATA;
1526  sbsplit[x] = (split + pred_sbsplit(sbsplit+x, s->sbwidth, x, y)) % 3;
1527  }
1528  sbsplit += s->sbwidth;
1529  }
1530 
1531  /* setup arith decoding */
1533  for (i = 0; i < s->num_refs; i++) {
1534  ff_dirac_init_arith_decoder(arith + 4 + 2 * i, gb, get_interleaved_ue_golomb(gb));
1535  ff_dirac_init_arith_decoder(arith + 5 + 2 * i, gb, get_interleaved_ue_golomb(gb));
1536  }
1537  for (i = 0; i < 3; i++)
1539 
1540  for (y = 0; y < s->sbheight; y++)
1541  for (x = 0; x < s->sbwidth; x++) {
1542  int blkcnt = 1 << s->sbsplit[y * s->sbwidth + x];
1543  int step = 4 >> s->sbsplit[y * s->sbwidth + x];
1544 
1545  for (q = 0; q < blkcnt; q++)
1546  for (p = 0; p < blkcnt; p++) {
1547  int bx = 4 * x + p*step;
1548  int by = 4 * y + q*step;
1549  DiracBlock *block = &s->blmotion[by*s->blwidth + bx];
1550  decode_block_params(s, arith, block, s->blwidth, bx, by);
1551  propagate_block_data(block, s->blwidth, step);
1552  }
1553  }
1554 
1555  for (i = 0; i < 4 + 2*s->num_refs; i++) {
1556  if (arith[i].error)
1557  return arith[i].error;
1558  }
1559 
1560  return 0;
1561 }
1562 
1563 static int weight(int i, int blen, int offset)
1564 {
1565 #define ROLLOFF(i) offset == 1 ? ((i) ? 5 : 3) : \
1566  (1 + (6*(i) + offset - 1) / (2*offset - 1))
1567 
1568  if (i < 2*offset)
1569  return ROLLOFF(i);
1570  else if (i > blen-1 - 2*offset)
1571  return ROLLOFF(blen-1 - i);
1572  return 8;
1573 }
1574 
1575 static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride,
1576  int left, int right, int wy)
1577 {
1578  int x;
1579  for (x = 0; left && x < p->xblen >> 1; x++)
1580  obmc_weight[x] = wy*8;
1581  for (; x < p->xblen >> right; x++)
1582  obmc_weight[x] = wy*weight(x, p->xblen, p->xoffset);
1583  for (; x < p->xblen; x++)
1584  obmc_weight[x] = wy*8;
1585  for (; x < stride; x++)
1586  obmc_weight[x] = 0;
1587 }
1588 
1589 static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride,
1590  int left, int right, int top, int bottom)
1591 {
1592  int y;
1593  for (y = 0; top && y < p->yblen >> 1; y++) {
1594  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1595  obmc_weight += stride;
1596  }
1597  for (; y < p->yblen >> bottom; y++) {
1598  int wy = weight(y, p->yblen, p->yoffset);
1599  init_obmc_weight_row(p, obmc_weight, stride, left, right, wy);
1600  obmc_weight += stride;
1601  }
1602  for (; y < p->yblen; y++) {
1603  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1604  obmc_weight += stride;
1605  }
1606 }
1607 
1608 static void init_obmc_weights(DiracContext *s, Plane *p, int by)
1609 {
1610  int top = !by;
1611  int bottom = by == s->blheight-1;
1612 
1613  /* don't bother re-initing for rows 2 to blheight-2, the weights don't change */
1614  if (top || bottom || by == 1) {
1615  init_obmc_weight(p, s->obmc_weight[0], MAX_BLOCKSIZE, 1, 0, top, bottom);
1616  init_obmc_weight(p, s->obmc_weight[1], MAX_BLOCKSIZE, 0, 0, top, bottom);
1617  init_obmc_weight(p, s->obmc_weight[2], MAX_BLOCKSIZE, 0, 1, top, bottom);
1618  }
1619 }
1620 
1621 static const uint8_t epel_weights[4][4][4] = {
1622  {{ 16, 0, 0, 0 },
1623  { 12, 4, 0, 0 },
1624  { 8, 8, 0, 0 },
1625  { 4, 12, 0, 0 }},
1626  {{ 12, 0, 4, 0 },
1627  { 9, 3, 3, 1 },
1628  { 6, 6, 2, 2 },
1629  { 3, 9, 1, 3 }},
1630  {{ 8, 0, 8, 0 },
1631  { 6, 2, 6, 2 },
1632  { 4, 4, 4, 4 },
1633  { 2, 6, 2, 6 }},
1634  {{ 4, 0, 12, 0 },
1635  { 3, 1, 9, 3 },
1636  { 2, 2, 6, 6 },
1637  { 1, 3, 3, 9 }}
1638 };
1639 
1640 /**
1641  * For block x,y, determine which of the hpel planes to do bilinear
1642  * interpolation from and set src[] to the location in each hpel plane
1643  * to MC from.
1644  *
1645  * @return the index of the put_dirac_pixels_tab function to use
1646  * 0 for 1 plane (fpel,hpel), 1 for 2 planes (qpel), 2 for 4 planes (qpel), and 3 for epel
1647  */
1648 static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
1649  int x, int y, int ref, int plane)
1650 {
1651  Plane *p = &s->plane[plane];
1652  uint8_t **ref_hpel = s->ref_pics[ref]->hpel[plane];
1653  int motion_x = block->u.mv[ref][0];
1654  int motion_y = block->u.mv[ref][1];
1655  int mx, my, i, epel, nplanes = 0;
1656 
1657  if (plane) {
1658  motion_x >>= s->chroma_x_shift;
1659  motion_y >>= s->chroma_y_shift;
1660  }
1661 
1662  mx = motion_x & ~(-1U << s->mv_precision);
1663  my = motion_y & ~(-1U << s->mv_precision);
1664  motion_x >>= s->mv_precision;
1665  motion_y >>= s->mv_precision;
1666  /* normalize subpel coordinates to epel */
1667  /* TODO: template this function? */
1668  mx <<= 3 - s->mv_precision;
1669  my <<= 3 - s->mv_precision;
1670 
1671  x += motion_x;
1672  y += motion_y;
1673  epel = (mx|my)&1;
1674 
1675  /* hpel position */
1676  if (!((mx|my)&3)) {
1677  nplanes = 1;
1678  src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->stride + x;
1679  } else {
1680  /* qpel or epel */
1681  nplanes = 4;
1682  for (i = 0; i < 4; i++)
1683  src[i] = ref_hpel[i] + y*p->stride + x;
1684 
1685  /* if we're interpolating in the right/bottom halves, adjust the planes as needed
1686  we increment x/y because the edge changes for half of the pixels */
1687  if (mx > 4) {
1688  src[0] += 1;
1689  src[2] += 1;
1690  x++;
1691  }
1692  if (my > 4) {
1693  src[0] += p->stride;
1694  src[1] += p->stride;
1695  y++;
1696  }
1697 
1698  /* hpel planes are:
1699  [0]: F [1]: H
1700  [2]: V [3]: C */
1701  if (!epel) {
1702  /* check if we really only need 2 planes since either mx or my is
1703  a hpel position. (epel weights of 0 handle this there) */
1704  if (!(mx&3)) {
1705  /* mx == 0: average [0] and [2]
1706  mx == 4: average [1] and [3] */
1707  src[!mx] = src[2 + !!mx];
1708  nplanes = 2;
1709  } else if (!(my&3)) {
1710  src[0] = src[(my>>1) ];
1711  src[1] = src[(my>>1)+1];
1712  nplanes = 2;
1713  }
1714  } else {
1715  /* adjust the ordering if needed so the weights work */
1716  if (mx > 4) {
1717  FFSWAP(const uint8_t *, src[0], src[1]);
1718  FFSWAP(const uint8_t *, src[2], src[3]);
1719  }
1720  if (my > 4) {
1721  FFSWAP(const uint8_t *, src[0], src[2]);
1722  FFSWAP(const uint8_t *, src[1], src[3]);
1723  }
1724  src[4] = epel_weights[my&3][mx&3];
1725  }
1726  }
1727 
1728  /* fixme: v/h _edge_pos */
1729  if (x + p->xblen > p->width +EDGE_WIDTH/2 ||
1730  y + p->yblen > p->height+EDGE_WIDTH/2 ||
1731  x < 0 || y < 0) {
1732  for (i = 0; i < nplanes; i++) {
1733  s->vdsp.emulated_edge_mc(s->edge_emu_buffer[i], src[i],
1734  p->stride, p->stride,
1735  p->xblen, p->yblen, x, y,
1736  p->width+EDGE_WIDTH/2, p->height+EDGE_WIDTH/2);
1737  src[i] = s->edge_emu_buffer[i];
1738  }
1739  }
1740  return (nplanes>>1) + epel;
1741 }
1742 
1743 static void add_dc(uint16_t *dst, int dc, int stride,
1744  uint8_t *obmc_weight, int xblen, int yblen)
1745 {
1746  int x, y;
1747  dc += 128;
1748 
1749  for (y = 0; y < yblen; y++) {
1750  for (x = 0; x < xblen; x += 2) {
1751  dst[x ] += dc * obmc_weight[x ];
1752  dst[x+1] += dc * obmc_weight[x+1];
1753  }
1754  dst += stride;
1755  obmc_weight += MAX_BLOCKSIZE;
1756  }
1757 }
1758 
1760  uint16_t *mctmp, uint8_t *obmc_weight,
1761  int plane, int dstx, int dsty)
1762 {
1763  Plane *p = &s->plane[plane];
1764  const uint8_t *src[5];
1765  int idx;
1766 
1767  switch (block->ref&3) {
1768  case 0: /* DC */
1769  add_dc(mctmp, block->u.dc[plane], p->stride, obmc_weight, p->xblen, p->yblen);
1770  return;
1771  case 1:
1772  case 2:
1773  idx = mc_subpel(s, block, src, dstx, dsty, (block->ref&3)-1, plane);
1774  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1775  if (s->weight_func)
1776  s->weight_func(s->mcscratch, p->stride, s->weight_log2denom,
1777  s->weight[0] + s->weight[1], p->yblen);
1778  break;
1779  case 3:
1780  idx = mc_subpel(s, block, src, dstx, dsty, 0, plane);
1781  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1782  idx = mc_subpel(s, block, src, dstx, dsty, 1, plane);
1783  if (s->biweight_func) {
1784  /* fixme: +32 is a quick hack */
1785  s->put_pixels_tab[idx](s->mcscratch + 32, src, p->stride, p->yblen);
1786  s->biweight_func(s->mcscratch, s->mcscratch+32, p->stride, s->weight_log2denom,
1787  s->weight[0], s->weight[1], p->yblen);
1788  } else
1789  s->avg_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1790  break;
1791  }
1792  s->add_obmc(mctmp, s->mcscratch, p->stride, obmc_weight, p->yblen);
1793 }
1794 
1795 static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
1796 {
1797  Plane *p = &s->plane[plane];
1798  int x, dstx = p->xbsep - p->xoffset;
1799 
1800  block_mc(s, block, mctmp, s->obmc_weight[0], plane, -p->xoffset, dsty);
1801  mctmp += p->xbsep;
1802 
1803  for (x = 1; x < s->blwidth-1; x++) {
1804  block_mc(s, block+x, mctmp, s->obmc_weight[1], plane, dstx, dsty);
1805  dstx += p->xbsep;
1806  mctmp += p->xbsep;
1807  }
1808  block_mc(s, block+x, mctmp, s->obmc_weight[2], plane, dstx, dsty);
1809 }
1810 
1811 static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
1812 {
1813  int idx = 0;
1814  if (xblen > 8)
1815  idx = 1;
1816  if (xblen > 16)
1817  idx = 2;
1818 
1819  memcpy(s->put_pixels_tab, s->diracdsp.put_dirac_pixels_tab[idx], sizeof(s->put_pixels_tab));
1820  memcpy(s->avg_pixels_tab, s->diracdsp.avg_dirac_pixels_tab[idx], sizeof(s->avg_pixels_tab));
1821  s->add_obmc = s->diracdsp.add_dirac_obmc[idx];
1822  if (s->weight_log2denom > 1 || s->weight[0] != 1 || s->weight[1] != 1) {
1823  s->weight_func = s->diracdsp.weight_dirac_pixels_tab[idx];
1824  s->biweight_func = s->diracdsp.biweight_dirac_pixels_tab[idx];
1825  } else {
1826  s->weight_func = NULL;
1827  s->biweight_func = NULL;
1828  }
1829 }
1830 
1831 static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
1832 {
1833  /* chroma allocates an edge of 8 when subsampled
1834  which for 4:2:2 means an h edge of 16 and v edge of 8
1835  just use 8 for everything for the moment */
1836  int i, edge = EDGE_WIDTH/2;
1837 
1838  ref->hpel[plane][0] = ref->avframe->data[plane];
1839  s->mpvencdsp.draw_edges(ref->hpel[plane][0], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM); /* EDGE_TOP | EDGE_BOTTOM values just copied to make it build, this needs to be ensured */
1840 
1841  /* no need for hpel if we only have fpel vectors */
1842  if (!s->mv_precision)
1843  return 0;
1844 
1845  for (i = 1; i < 4; i++) {
1846  if (!ref->hpel_base[plane][i])
1847  ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
1848  if (!ref->hpel_base[plane][i]) {
1849  return AVERROR(ENOMEM);
1850  }
1851  /* we need to be 16-byte aligned even for chroma */
1852  ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
1853  }
1854 
1855  if (!ref->interpolated[plane]) {
1856  s->diracdsp.dirac_hpel_filter(ref->hpel[plane][1], ref->hpel[plane][2],
1857  ref->hpel[plane][3], ref->hpel[plane][0],
1858  ref->avframe->linesize[plane], width, height);
1859  s->mpvencdsp.draw_edges(ref->hpel[plane][1], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1860  s->mpvencdsp.draw_edges(ref->hpel[plane][2], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1861  s->mpvencdsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1862  }
1863  ref->interpolated[plane] = 1;
1864 
1865  return 0;
1866 }
1867 
1868 /**
1869  * Dirac Specification ->
1870  * 13.0 Transform data syntax. transform_data()
1871  */
1873 {
1874  DWTContext d;
1875  int y, i, comp, dsty;
1876  int ret;
1877 
1878  if (s->low_delay) {
1879  /* [DIRAC_STD] 13.5.1 low_delay_transform_data() */
1880  if (!s->hq_picture) {
1881  for (comp = 0; comp < 3; comp++) {
1882  Plane *p = &s->plane[comp];
1883  memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
1884  }
1885  }
1886  if (!s->zero_res) {
1887  if ((ret = decode_lowdelay(s)) < 0)
1888  return ret;
1889  }
1890  }
1891 
1892  for (comp = 0; comp < 3; comp++) {
1893  Plane *p = &s->plane[comp];
1894  uint8_t *frame = s->current_picture->avframe->data[comp];
1895 
1896  /* FIXME: small resolutions */
1897  for (i = 0; i < 4; i++)
1898  s->edge_emu_buffer[i] = s->edge_emu_buffer_base + i*FFALIGN(p->width, 16);
1899 
1900  if (!s->zero_res && !s->low_delay)
1901  {
1902  memset(p->idwt.buf, 0, p->idwt.stride * p->idwt.height);
1903  ret = decode_component(s, comp); /* [DIRAC_STD] 13.4.1 core_transform_data() */
1904  if (ret < 0)
1905  return ret;
1906  }
1907  ret = ff_spatial_idwt_init(&d, &p->idwt, s->wavelet_idx+2,
1908  s->wavelet_depth, s->bit_depth);
1909  if (ret < 0)
1910  return ret;
1911 
1912  if (!s->num_refs) { /* intra */
1913  for (y = 0; y < p->height; y += 16) {
1914  int idx = (s->bit_depth - 8) >> 1;
1915  ff_spatial_idwt_slice2(&d, y+16); /* decode */
1916  s->diracdsp.put_signed_rect_clamped[idx](frame + y*p->stride,
1917  p->stride,
1918  p->idwt.buf + y*p->idwt.stride,
1919  p->idwt.stride, p->width, 16);
1920  }
1921  } else { /* inter */
1922  int rowheight = p->ybsep*p->stride;
1923 
1924  select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
1925 
1926  for (i = 0; i < s->num_refs; i++) {
1927  int ret = interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
1928  if (ret < 0)
1929  return ret;
1930  }
1931 
1932  memset(s->mctmp, 0, 4*p->yoffset*p->stride);
1933 
1934  dsty = -p->yoffset;
1935  for (y = 0; y < s->blheight; y++) {
1936  int h = 0,
1937  start = FFMAX(dsty, 0);
1938  uint16_t *mctmp = s->mctmp + y*rowheight;
1939  DiracBlock *blocks = s->blmotion + y*s->blwidth;
1940 
1941  init_obmc_weights(s, p, y);
1942 
1943  if (y == s->blheight-1 || start+p->ybsep > p->height)
1944  h = p->height - start;
1945  else
1946  h = p->ybsep - (start - dsty);
1947  if (h < 0)
1948  break;
1949 
1950  memset(mctmp+2*p->yoffset*p->stride, 0, 2*rowheight);
1951  mc_row(s, blocks, mctmp, comp, dsty);
1952 
1953  mctmp += (start - dsty)*p->stride + p->xoffset;
1954  ff_spatial_idwt_slice2(&d, start + h); /* decode */
1955  /* NOTE: add_rect_clamped hasn't been templated hence the shifts.
1956  * idwt.stride is passed as pixels, not in bytes as in the rest of the decoder */
1957  s->diracdsp.add_rect_clamped(frame + start*p->stride, mctmp, p->stride,
1958  (int16_t*)(p->idwt.buf) + start*(p->idwt.stride >> 1), (p->idwt.stride >> 1), p->width, h);
1959 
1960  dsty += p->ybsep;
1961  }
1962  }
1963  }
1964 
1965 
1966  return 0;
1967 }
1968 
1970 {
1971  int ret, i;
1972  int chroma_x_shift, chroma_y_shift;
1973  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_x_shift,
1974  &chroma_y_shift);
1975  if (ret < 0)
1976  return ret;
1977 
1978  f->width = avctx->width + 2 * EDGE_WIDTH;
1979  f->height = avctx->height + 2 * EDGE_WIDTH + 2;
1980  ret = ff_get_buffer(avctx, f, flags);
1981  if (ret < 0)
1982  return ret;
1983 
1984  for (i = 0; f->data[i]; i++) {
1985  int offset = (EDGE_WIDTH >> (i && i<3 ? chroma_y_shift : 0)) *
1986  f->linesize[i] + 32;
1987  f->data[i] += offset;
1988  }
1989  f->width = avctx->width;
1990  f->height = avctx->height;
1991 
1992  return 0;
1993 }
1994 
1995 /**
1996  * Dirac Specification ->
1997  * 11.1.1 Picture Header. picture_header()
1998  */
2000 {
2001  unsigned retire, picnum;
2002  int i, j, ret;
2003  int64_t refdist, refnum;
2004  GetBitContext *gb = &s->gb;
2005 
2006  /* [DIRAC_STD] 11.1.1 Picture Header. picture_header() PICTURE_NUM */
2007  picnum = s->current_picture->picture_number = get_bits_long(gb, 32);
2008 
2009 
2010  av_log(s->avctx,AV_LOG_DEBUG,"PICTURE_NUM: %d\n",picnum);
2011 
2012  /* if this is the first keyframe after a sequence header, start our
2013  reordering from here */
2014  if (s->frame_number < 0)
2015  s->frame_number = picnum;
2016 
2017  s->ref_pics[0] = s->ref_pics[1] = NULL;
2018  for (i = 0; i < s->num_refs; i++) {
2019  refnum = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
2020  refdist = INT64_MAX;
2021 
2022  /* find the closest reference to the one we want */
2023  /* Jordi: this is needed if the referenced picture hasn't yet arrived */
2024  for (j = 0; j < MAX_REFERENCE_FRAMES && refdist; j++)
2025  if (s->ref_frames[j]
2026  && FFABS(s->ref_frames[j]->picture_number - refnum) < refdist) {
2027  s->ref_pics[i] = s->ref_frames[j];
2028  refdist = FFABS(s->ref_frames[j]->picture_number - refnum);
2029  }
2030 
2031  if (!s->ref_pics[i] || refdist)
2032  av_log(s->avctx, AV_LOG_DEBUG, "Reference not found\n");
2033 
2034  /* if there were no references at all, allocate one */
2035  if (!s->ref_pics[i])
2036  for (j = 0; j < MAX_FRAMES; j++)
2037  if (!s->all_frames[j].avframe->data[0]) {
2038  s->ref_pics[i] = &s->all_frames[j];
2039  ret = get_buffer_with_edge(s->avctx, s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
2040  if (ret < 0)
2041  return ret;
2042  break;
2043  }
2044 
2045  if (!s->ref_pics[i]) {
2046  av_log(s->avctx, AV_LOG_ERROR, "Reference could not be allocated\n");
2047  return AVERROR_INVALIDDATA;
2048  }
2049 
2050  }
2051 
2052  /* retire the reference frames that are not used anymore */
2053  if (s->current_picture->reference) {
2054  retire = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
2055  if (retire != picnum) {
2056  DiracFrame *retire_pic = remove_frame(s->ref_frames, retire);
2057 
2058  if (retire_pic)
2059  retire_pic->reference &= DELAYED_PIC_REF;
2060  else
2061  av_log(s->avctx, AV_LOG_DEBUG, "Frame to retire not found\n");
2062  }
2063 
2064  /* if reference array is full, remove the oldest as per the spec */
2065  while (add_frame(s->ref_frames, MAX_REFERENCE_FRAMES, s->current_picture)) {
2066  av_log(s->avctx, AV_LOG_ERROR, "Reference frame overflow\n");
2067  remove_frame(s->ref_frames, s->ref_frames[0]->picture_number)->reference &= DELAYED_PIC_REF;
2068  }
2069  }
2070 
2071  if (s->num_refs) {
2072  ret = dirac_unpack_prediction_parameters(s); /* [DIRAC_STD] 11.2 Picture Prediction Data. picture_prediction() */
2073  if (ret < 0)
2074  return ret;
2075  ret = dirac_unpack_block_motion_data(s); /* [DIRAC_STD] 12. Block motion data syntax */
2076  if (ret < 0)
2077  return ret;
2078  }
2079  ret = dirac_unpack_idwt_params(s); /* [DIRAC_STD] 11.3 Wavelet transform data */
2080  if (ret < 0)
2081  return ret;
2082 
2083  init_planes(s);
2084  return 0;
2085 }
2086 
2087 static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
2088 {
2089  DiracFrame *out = s->delay_frames[0];
2090  int i, out_idx = 0;
2091  int ret;
2092 
2093  /* find frame with lowest picture number */
2094  for (i = 1; s->delay_frames[i]; i++)
2095  if (s->delay_frames[i]->picture_number < out->picture_number) {
2096  out = s->delay_frames[i];
2097  out_idx = i;
2098  }
2099 
2100  for (i = out_idx; s->delay_frames[i]; i++)
2101  s->delay_frames[i] = s->delay_frames[i+1];
2102 
2103  if (out) {
2104  out->reference ^= DELAYED_PIC_REF;
2105  if((ret = av_frame_ref(picture, out->avframe)) < 0)
2106  return ret;
2107  *got_frame = 1;
2108  }
2109 
2110  return 0;
2111 }
2112 
2113 /**
2114  * Dirac Specification ->
2115  * 9.6 Parse Info Header Syntax. parse_info()
2116  * 4 byte start code + byte parse code + 4 byte size + 4 byte previous size
2117  */
2118 #define DATA_UNIT_HEADER_SIZE 13
2119 
2120 /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3
2121  inside the function parse_sequence() */
2122 static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
2123 {
2124  DiracContext *s = avctx->priv_data;
2125  DiracFrame *pic = NULL;
2126  AVDiracSeqHeader *dsh;
2127  int ret, i;
2128  uint8_t parse_code;
2129  unsigned tmp;
2130 
2132  return AVERROR_INVALIDDATA;
2133 
2134  parse_code = buf[4];
2135 
2136  init_get_bits(&s->gb, &buf[13], 8*(size - DATA_UNIT_HEADER_SIZE));
2137 
2138  if (parse_code == DIRAC_PCODE_SEQ_HEADER) {
2139  if (s->seen_sequence_header)
2140  return 0;
2141 
2142  /* [DIRAC_STD] 10. Sequence header */
2144  if (ret < 0) {
2145  av_log(avctx, AV_LOG_ERROR, "error parsing sequence header");
2146  return ret;
2147  }
2148 
2150  ret = AVERROR(ERANGE);
2151  if (ret >= 0)
2152  ret = ff_set_dimensions(avctx, dsh->width, dsh->height);
2153  if (ret < 0) {
2154  av_freep(&dsh);
2155  return ret;
2156  }
2157 
2158  ff_set_sar(avctx, dsh->sample_aspect_ratio);
2159  avctx->pix_fmt = dsh->pix_fmt;
2160  avctx->color_range = dsh->color_range;
2161  avctx->color_trc = dsh->color_trc;
2162  avctx->color_primaries = dsh->color_primaries;
2163  avctx->colorspace = dsh->colorspace;
2164  avctx->profile = dsh->profile;
2165  avctx->level = dsh->level;
2166  avctx->framerate = dsh->framerate;
2167  s->bit_depth = dsh->bit_depth;
2168  s->version.major = dsh->version.major;
2169  s->version.minor = dsh->version.minor;
2170  s->seq = *dsh;
2171  av_freep(&dsh);
2172 
2173  s->pshift = s->bit_depth > 8;
2174 
2176  &s->chroma_x_shift,
2177  &s->chroma_y_shift);
2178  if (ret < 0)
2179  return ret;
2180 
2182  if (ret < 0)
2183  return ret;
2184 
2185  s->seen_sequence_header = 1;
2186  } else if (parse_code == DIRAC_PCODE_END_SEQ) { /* [DIRAC_STD] End of Sequence */
2188  s->seen_sequence_header = 0;
2189  } else if (parse_code == DIRAC_PCODE_AUX) {
2190  if (buf[13] == 1) { /* encoder implementation/version */
2191  int ver[3];
2192  /* versions older than 1.0.8 don't store quant delta for
2193  subbands with only one codeblock */
2194  if (sscanf(buf+14, "Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3)
2195  if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7)
2196  s->old_delta_quant = 1;
2197  }
2198  } else if (parse_code & 0x8) { /* picture data unit */
2199  if (!s->seen_sequence_header) {
2200  av_log(avctx, AV_LOG_DEBUG, "Dropping frame without sequence header\n");
2201  return AVERROR_INVALIDDATA;
2202  }
2203 
2204  /* find an unused frame */
2205  for (i = 0; i < MAX_FRAMES; i++)
2206  if (s->all_frames[i].avframe->data[0] == NULL)
2207  pic = &s->all_frames[i];
2208  if (!pic) {
2209  av_log(avctx, AV_LOG_ERROR, "framelist full\n");
2210  return AVERROR_INVALIDDATA;
2211  }
2212 
2213  av_frame_unref(pic->avframe);
2214 
2215  /* [DIRAC_STD] Defined in 9.6.1 ... */
2216  tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */
2217  if (tmp > 2) {
2218  av_log(avctx, AV_LOG_ERROR, "num_refs of 3\n");
2219  return AVERROR_INVALIDDATA;
2220  }
2221  s->num_refs = tmp;
2222  s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */
2223  s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */
2224  s->core_syntax = (parse_code & 0x88) == 0x08; /* [DIRAC_STD] is_core_syntax() */
2225  s->ld_picture = (parse_code & 0xF8) == 0xC8; /* [DIRAC_STD] is_ld_picture() */
2226  s->hq_picture = (parse_code & 0xF8) == 0xE8; /* [DIRAC_STD] is_hq_picture() */
2227  s->dc_prediction = (parse_code & 0x28) == 0x08; /* [DIRAC_STD] using_dc_prediction() */
2228  pic->reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */
2229  if (s->num_refs == 0) /* [DIRAC_STD] is_intra() */
2230  pic->avframe->flags |= AV_FRAME_FLAG_KEY;
2231  else
2232  pic->avframe->flags &= ~AV_FRAME_FLAG_KEY;
2233  pic->avframe->pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
2234 
2235  /* VC-2 Low Delay has a different parse code than the Dirac Low Delay */
2236  if (s->version.minor == 2 && parse_code == 0x88)
2237  s->ld_picture = 1;
2238 
2239  if (s->low_delay && !(s->ld_picture || s->hq_picture) ) {
2240  av_log(avctx, AV_LOG_ERROR, "Invalid low delay flag\n");
2241  return AVERROR_INVALIDDATA;
2242  }
2243 
2244  if ((ret = get_buffer_with_edge(avctx, pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
2245  return ret;
2246  s->current_picture = pic;
2247  s->plane[0].stride = pic->avframe->linesize[0];
2248  s->plane[1].stride = pic->avframe->linesize[1];
2249  s->plane[2].stride = pic->avframe->linesize[2];
2250 
2251  if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
2252  return AVERROR(ENOMEM);
2253 
2254  /* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
2256  if (ret < 0)
2257  return ret;
2258 
2259  /* [DIRAC_STD] 13.0 Transform data syntax. transform_data() */
2261  if (ret < 0)
2262  return ret;
2263  }
2264  return 0;
2265 }
2266 
2267 static int dirac_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2268  int *got_frame, AVPacket *pkt)
2269 {
2270  DiracContext *s = avctx->priv_data;
2271  const uint8_t *buf = pkt->data;
2272  int buf_size = pkt->size;
2273  int i, buf_idx = 0;
2274  int ret;
2275  unsigned data_unit_size;
2276 
2277  /* release unused frames */
2278  for (i = 0; i < MAX_FRAMES; i++)
2279  if (s->all_frames[i].avframe->data[0] && !s->all_frames[i].reference) {
2280  av_frame_unref(s->all_frames[i].avframe);
2281  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
2282  }
2283 
2284  s->current_picture = NULL;
2285  *got_frame = 0;
2286 
2287  /* end of stream, so flush delayed pics */
2288  if (buf_size == 0)
2289  return get_delayed_pic(s, picture, got_frame);
2290 
2291  for (;;) {
2292  /*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6
2293  [DIRAC_STD] PARSE_INFO_PREFIX = "BBCD" as defined in ISO/IEC 646
2294  BBCD start code search */
2295  for (; buf_idx + DATA_UNIT_HEADER_SIZE < buf_size; buf_idx++) {
2296  if (buf[buf_idx ] == 'B' && buf[buf_idx+1] == 'B' &&
2297  buf[buf_idx+2] == 'C' && buf[buf_idx+3] == 'D')
2298  break;
2299  }
2300  /* BBCD found or end of data */
2301  if (buf_idx + DATA_UNIT_HEADER_SIZE >= buf_size)
2302  break;
2303 
2304  data_unit_size = AV_RB32(buf+buf_idx+5);
2305  if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
2306  if(data_unit_size > buf_size - buf_idx)
2307  av_log(s->avctx, AV_LOG_ERROR,
2308  "Data unit with size %d is larger than input buffer, discarding\n",
2309  data_unit_size);
2310  buf_idx += 4;
2311  continue;
2312  }
2313  /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3 inside the function parse_sequence() */
2314  ret = dirac_decode_data_unit(avctx, buf+buf_idx, data_unit_size);
2315  if (ret < 0)
2316  {
2317  av_log(s->avctx, AV_LOG_ERROR,"Error in dirac_decode_data_unit\n");
2318  return ret;
2319  }
2320  buf_idx += data_unit_size;
2321  }
2322 
2323  if (!s->current_picture)
2324  return buf_size;
2325 
2326  if (s->current_picture->picture_number > s->frame_number) {
2327  DiracFrame *delayed_frame = remove_frame(s->delay_frames, s->frame_number);
2328 
2329  s->current_picture->reference |= DELAYED_PIC_REF;
2330 
2331  if (add_frame(s->delay_frames, MAX_DELAY, s->current_picture)) {
2332  unsigned min_num = s->delay_frames[0]->picture_number;
2333  /* Too many delayed frames, so we display the frame with the lowest pts */
2334  av_log(avctx, AV_LOG_ERROR, "Delay frame overflow\n");
2335 
2336  for (i = 1; s->delay_frames[i]; i++)
2337  if (s->delay_frames[i]->picture_number < min_num)
2338  min_num = s->delay_frames[i]->picture_number;
2339 
2340  delayed_frame = remove_frame(s->delay_frames, min_num);
2341  add_frame(s->delay_frames, MAX_DELAY, s->current_picture);
2342  }
2343 
2344  if (delayed_frame) {
2345  delayed_frame->reference ^= DELAYED_PIC_REF;
2346  if((ret = av_frame_ref(picture, delayed_frame->avframe)) < 0)
2347  return ret;
2348  s->frame_number = delayed_frame->picture_number + 1LL;
2349  *got_frame = 1;
2350  }
2351  } else if (s->current_picture->picture_number == s->frame_number) {
2352  /* The right frame at the right time :-) */
2353  if((ret = av_frame_ref(picture, s->current_picture->avframe)) < 0)
2354  return ret;
2355  s->frame_number = s->current_picture->picture_number + 1LL;
2356  *got_frame = 1;
2357  }
2358 
2359  return buf_idx;
2360 }
2361 
2363  .p.name = "dirac",
2364  CODEC_LONG_NAME("BBC Dirac VC-2"),
2365  .p.type = AVMEDIA_TYPE_VIDEO,
2366  .p.id = AV_CODEC_ID_DIRAC,
2367  .priv_data_size = sizeof(DiracContext),
2369  .close = dirac_decode_end,
2372  .flush = dirac_decode_flush,
2373  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2374 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
DWTPlane::buf
uint8_t * buf
Definition: dirac_dwt.h:41
DATA_UNIT_HEADER_SIZE
#define DATA_UNIT_HEADER_SIZE
Dirac Specification -> 9.6 Parse Info Header Syntax.
Definition: diracdec.c:2118
A
#define A(x)
Definition: vpx_arith.h:28
DiracContext::put_pixels_tab
void(* put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:230
DiracContext::blmotion
DiracBlock * blmotion
Definition: diracdec.c:219
av_dirac_parse_sequence_header
int av_dirac_parse_sequence_header(AVDiracSeqHeader **pdsh, const uint8_t *buf, size_t buf_size, void *log_ctx)
Parse a Dirac sequence header.
Definition: dirac.c:404
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:278
DiracContext::num_y
unsigned num_y
Definition: diracdec.c:176
level
uint8_t level
Definition: svq3.c:205
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
DiracContext::blwidth
int blwidth
Definition: diracdec.c:213
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DiracVersionInfo
Definition: dirac.h:80
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:691
SliceCoeffs::left
int left
Definition: diracdec.c:816
mem_internal.h
out
FILE * out
Definition: movenc.c:55
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:81
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
thread.h
DiracBlock::ref
uint8_t ref
Definition: diracdec.c:90
DiracFrame::picture_number
unsigned picture_number
Definition: diracdec.c:82
subband_hh
@ subband_hh
Definition: diracdec.c:248
CTX_MV_DATA
#define CTX_MV_DATA
Definition: dirac_arith.h:75
MAX_DWT_LEVELS
#define MAX_DWT_LEVELS
The spec limits the number of wavelet decompositions to 4 for both level 1 (VC-2) and 128 (long-gop d...
Definition: dirac.h:49
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
epel_weights
static const uint8_t epel_weights[4][4][4]
Definition: diracdec.c:1621
AV_CODEC_ID_DIRAC
@ AV_CODEC_ID_DIRAC
Definition: codec_id.h:168
dirac_decode_picture_header
static int dirac_decode_picture_header(DiracContext *s)
Dirac Specification -> 11.1.1 Picture Header.
Definition: diracdec.c:1999
SliceCoeffs::tot
int tot
Definition: diracdec.c:820
int64_t
long long int64_t
Definition: coverity.c:34
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
DiracContext::wavelet_idx
unsigned wavelet_idx
Definition: diracdec.c:166
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
dirac_unpack_prediction_parameters
static int dirac_unpack_prediction_parameters(DiracContext *s)
Unpack the motion compensation parameters Dirac Specification -> 11.2 Picture prediction data.
Definition: diracdec.c:1105
DIRAC_REF_MASK_REF1
#define DIRAC_REF_MASK_REF1
DiracBlock->ref flags, if set then the block does MC from the given ref.
Definition: diracdec.c:61
DiracContext::avctx
AVCodecContext * avctx
Definition: diracdec.c:138
get_interleaved_ue_golomb
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:143
mask
int mask
Definition: mediacodecdec_common.c:154
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
AVDiracSeqHeader::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: dirac.h:113
DiracVersionInfo::major
int major
Definition: dirac.h:81
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
SubBand::stride
int stride
Definition: diracdec.c:96
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
DWTPlane
Definition: dirac_dwt.h:37
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:684
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:539
SubBand::width
int width
Definition: cfhd.h:111
DiracContext::lowdelay
struct DiracContext::@103 lowdelay
SubBand::level
int level
Definition: diracdec.c:94
DiracContext::biweight_func
dirac_biweight_func biweight_func
Definition: diracdec.c:234
CTX_SB_F1
#define CTX_SB_F1
Definition: dirac_arith.h:69
CTX_ZERO_BLOCK
@ CTX_ZERO_BLOCK
Definition: dirac_arith.h:58
b
#define b
Definition: input.c:41
DiracContext::perspective_exp
unsigned perspective_exp
Definition: diracdec.c:205
DiracContext::bit_depth
int bit_depth
Definition: diracdec.c:151
decode_lowdelay_slice
static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
Dirac Specification -> 13.5.2 Slices.
Definition: diracdec.c:777
FFCodec
Definition: codec_internal.h:127
DiracContext::mpvencdsp
MpegvideoEncDSPContext mpvencdsp
Definition: diracdec.c:139
dirac_biweight_func
void(* dirac_biweight_func)(uint8_t *dst, const uint8_t *src, int stride, int log2_denom, int weightd, int weights, int h)
Definition: diracdsp.h:28
init_planes
static void init_planes(DiracContext *s)
Definition: diracdec.c:1051
dirac_dwt.h
DIRAC_REF_MASK_GLOBAL
#define DIRAC_REF_MASK_GLOBAL
Definition: diracdec.c:63
dirac_arith_init
static AVOnce dirac_arith_init
Definition: diracdec.c:386
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:661
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
DiracContext::delay_frames
DiracFrame * delay_frames[MAX_DELAY+1]
Definition: diracdec.c:240
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
ff_dirac_qscale_tab
const int32_t ff_dirac_qscale_tab[116]
Definition: diractab.c:34
AVDiracSeqHeader::color_range
enum AVColorRange color_range
Definition: dirac.h:111
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
MAX_DELAY
#define MAX_DELAY
Definition: diracdec.c:53
DiracArith
Definition: dirac_arith.h:79
dirac_get_arith_int
static int dirac_get_arith_int(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:194
CHECKEDREAD
#define CHECKEDREAD(dst, cond, errmsg)
decode_subband
static void decode_subband(const DiracContext *s, GetBitContext *gb, int quant, int slice_x, int slice_y, int bits_end, const SubBand *b1, const SubBand *b2)
Definition: diracdec.c:725
DiracContext::mcscratch
uint8_t * mcscratch
Definition: diracdec.c:225
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
AVDiracSeqHeader::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: dirac.h:108
DiracContext::current_picture
DiracFrame * current_picture
Definition: diracdec.c:236
alloc_buffers
static int alloc_buffers(DiracContext *s, int stride)
Definition: diracdec.c:325
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
DiracContext::zrs
int zrs[2][2]
Definition: diracdec.c:202
diractab.h
ff_dirac_default_qmat
const uint8_t ff_dirac_default_qmat[7][4][4]
Definition: diractab.c:24
DiracFrame
Definition: diracdec.c:76
CTX_DC_F1
#define CTX_DC_F1
Definition: dirac_arith.h:76
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:566
golomb.h
exp golomb vlc stuff
decode_subband_arith
static int decode_subband_arith(AVCodecContext *avctx, void *b)
Definition: diracdec.c:642
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
AVDiracSeqHeader::level
int level
Definition: dirac.h:105
SubBand::parent
struct SubBand * parent
Definition: diracdec.c:102
subband_lh
@ subband_lh
Definition: diracdec.c:247
codeblock
static int codeblock(const DiracContext *s, SubBand *b, GetBitContext *gb, DiracArith *c, int left, int right, int top, int bottom, int blockcnt_one, int is_arith)
Decode the coeffs in the rectangle defined by left, right, top, bottom [DIRAC_STD] 13....
Definition: diracdec.c:489
DiracContext::ld_picture
int ld_picture
Definition: diracdec.c:159
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:2034
DiracContext::edge_emu_buffer
uint8_t * edge_emu_buffer[4]
Definition: diracdec.c:221
AVDiracSeqHeader::version
DiracVersionInfo version
Definition: dirac.h:116
DiracContext::num_refs
int num_refs
Definition: diracdec.c:162
ff_spatial_idwt_init
int ff_spatial_idwt_init(DWTContext *d, DWTPlane *p, enum dwt_type type, int decomposition_count, int bit_depth)
Definition: dirac_dwt.c:36
DiracContext::sbheight
int sbheight
Definition: diracdec.c:216
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1593
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
GetBitContext
Definition: get_bits.h:108
DiracContext::blheight
int blheight
Definition: diracdec.c:214
val
static double val(void *priv, double ch)
Definition: aeval.c:77
dirac_unpack_block_motion_data
static int dirac_unpack_block_motion_data(DiracContext *s)
Dirac Specification ->
Definition: diracdec.c:1503
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3198
decode_component
static int decode_component(DiracContext *s, int comp)
Dirac Specification -> [DIRAC_STD] 13.4.1 core_transform_data()
Definition: diracdec.c:659
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
DiracSlice::gb
GetBitContext gb
Definition: diracdec.c:131
pred_sbsplit
static int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
Definition: diracdec.c:1329
pred_block_dc
static void pred_block_dc(DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1359
dirac.h
decode_subband_internal
static av_always_inline int decode_subband_internal(const DiracContext *s, SubBand *b, int is_arith)
Dirac Specification -> 13.4.2 Non-skipped subbands.
Definition: diracdec.c:599
DiracContext::codeblock
struct DiracContext::@102 codeblock[MAX_DWT_LEVELS+1]
select_dsp_funcs
static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
Definition: diracdec.c:1811
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
diracdsp.h
DiracContext::hq_picture
int hq_picture
Definition: diracdec.c:158
DiracSlice::bytes
int bytes
Definition: diracdec.c:134
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
dirac_weight_func
void(* dirac_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int h)
Definition: diracdsp.h:27
DiracContext::globalmc
struct DiracContext::@105 globalmc[2]
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:677
DiracContext::perspective
int perspective[2]
Definition: diracdec.c:203
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
AVDiracSeqHeader::bit_depth
int bit_depth
Definition: dirac.h:117
av_cold
#define av_cold
Definition: attributes.h:90
DiracContext::sbwidth
int sbwidth
Definition: diracdec.c:215
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
coeff_unpack_golomb
static int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
Definition: diracdec.c:441
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
DiracContext::chroma_y_shift
int chroma_y_shift
Definition: diracdec.c:149
ff_dirac_decoder
const FFCodec ff_dirac_decoder
Definition: diracdec.c:2362
ROLLOFF
#define ROLLOFF(i)
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
s
#define s(width, name)
Definition: cbs_vp9.c:198
DiracSlice::slice_x
int slice_x
Definition: diracdec.c:132
DiracContext::zero_res
int zero_res
Definition: diracdec.c:154
DiracContext::mctmp
uint16_t * mctmp
Definition: diracdec.c:224
Plane::xbsep
uint8_t xbsep
Definition: diracdec.c:120
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
MAX_REFERENCE_FRAMES
#define MAX_REFERENCE_FRAMES
The spec limits this to 3 for frame coding, but in practice can be as high as 6.
Definition: diracdec.c:52
ff_dirac_init_arith_decoder
void ff_dirac_init_arith_decoder(DiracArith *c, GetBitContext *gb, int length)
Definition: dirac_arith.c:96
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AVDiracSeqHeader::profile
int profile
Definition: dirac.h:104
CTX_DELTA_Q_F
@ CTX_DELTA_Q_F
Definition: dirac_arith.h:59
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
DiracContext::version
DiracVersionInfo version
Definition: diracdec.c:142
decode.h
get_bits.h
DiracContext::size_scaler
uint64_t size_scaler
Definition: diracdec.c:197
DWTPlane::stride
int stride
Definition: dirac_dwt.h:40
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1945
Plane::idwt
DWTPlane idwt
Definition: diracdec.c:110
bands
static const float bands[]
Definition: af_superequalizer.c:56
DiracContext::seen_sequence_header
int seen_sequence_header
Definition: diracdec.c:145
DiracContext::diracdsp
DiracDSPContext diracdsp
Definition: diracdec.c:141
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
DiracContext::weight
int16_t weight[2]
Definition: diracdec.c:210
AVDiracSeqHeader::framerate
AVRational framerate
Definition: dirac.h:107
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:32
DiracContext::slice_params_buf
DiracSlice * slice_params_buf
Definition: diracdec.c:182
DiracContext::edge_emu_buffer_base
uint8_t * edge_emu_buffer_base
Definition: diracdec.c:222
dirac_get_arith_bit
static int dirac_get_arith_bit(DiracArith *c, int ctx)
Definition: dirac_arith.h:137
mc_row
static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
Definition: diracdec.c:1795
ff_dirac_qoffset_inter_tab
const int ff_dirac_qoffset_inter_tab[122]
Definition: diractab.c:72
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
DIVRNDUP
#define DIVRNDUP(a, b)
Definition: diracdec.c:74
decode_hq_slice_row
static int decode_hq_slice_row(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
Definition: diracdec.c:918
DiracContext::weight_func
dirac_weight_func weight_func
Definition: diracdec.c:233
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
dirac_decode_frame_internal
static int dirac_decode_frame_internal(DiracContext *s)
Dirac Specification -> 13.0 Transform data syntax.
Definition: diracdec.c:1872
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:701
SliceCoeffs::tot_v
int tot_v
Definition: diracdec.c:819
decode_lowdelay
static int decode_lowdelay(DiracContext *s)
Dirac Specification -> 13.5.1 low_delay_transform_data()
Definition: diracdec.c:933
subband_coeffs
static int subband_coeffs(const DiracContext *s, int x, int y, int p, SliceCoeffs c[MAX_DWT_LEVELS])
Definition: diracdec.c:823
DiracContext::dc_prediction
int dc_prediction
Definition: diracdec.c:160
DiracContext::wavelet_depth
unsigned wavelet_depth
Definition: diracdec.c:165
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
AVDiracSeqHeader::colorspace
enum AVColorSpace colorspace
Definition: dirac.h:114
DiracSlice::slice_y
int slice_y
Definition: diracdec.c:133
DiracContext::ref_frames
DiracFrame * ref_frames[MAX_REFERENCE_FRAMES+1]
Definition: diracdec.c:239
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
DiracContext::old_delta_quant
unsigned old_delta_quant
schroedinger older than 1.0.8 doesn't store quant delta if only one codebook exists in a band
Definition: diracdec.c:172
DiracContext::highquality
struct DiracContext::@104 highquality
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:109
dirac_get_arith_uint
static int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:178
CTX_MV_F1
#define CTX_MV_F1
Definition: dirac_arith.h:74
DIRAC_MAX_QUANT_INDEX
#define DIRAC_MAX_QUANT_INDEX
Definition: diractab.h:41
DIRAC_PCODE_AUX
@ DIRAC_PCODE_AUX
Definition: dirac.h:64
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1794
DiracContext::thread_buf_size
int thread_buf_size
Definition: diracdec.c:180
subband_ll
@ subband_ll
Definition: diracdec.c:245
AVOnce
#define AVOnce
Definition: thread.h:202
DiracContext::is_arith
int is_arith
Definition: diracdec.c:155
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
DiracContext::width
unsigned width
Definition: diracdec.c:186
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1563
ff_spatial_idwt_slice2
void ff_spatial_idwt_slice2(DWTContext *d, int y)
Definition: dirac_dwt.c:69
dirac_subband
dirac_subband
Definition: diracdec.c:244
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:69
add_frame
static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
Definition: diracdec.c:276
INTRA_DC_PRED
#define INTRA_DC_PRED(n, type)
Dirac Specification -> 13.3 intra_dc_prediction(band)
Definition: diracdec.c:571
f
f
Definition: af_crystalizer.c:122
dirac_decode_end
static av_cold int dirac_decode_end(AVCodecContext *avctx)
Definition: diracdec.c:424
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:491
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1697
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
Plane::yoffset
uint8_t yoffset
Definition: diracdec.c:124
Plane::yblen
uint8_t yblen
Definition: diracdec.c:118
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
Plane::height
int height
Definition: cfhd.h:119
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:372
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
DiracContext::gb
GetBitContext gb
Definition: diracdec.c:143
init_obmc_weight_row
static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int wy)
Definition: diracdec.c:1575
DiracContext::codeblock_mode
unsigned codeblock_mode
Definition: diracdec.c:173
size
int size
Definition: twinvq_data.h:10344
DiracContext::chroma_x_shift
int chroma_x_shift
Definition: diracdec.c:148
SubBand::length
unsigned length
Definition: diracdec.c:105
DiracContext::seq
AVDiracSeqHeader seq
Definition: diracdec.c:144
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DiracContext::bytes
AVRational bytes
Definition: diracdec.c:191
dirac_vlc.h
DiracContext::weight_log2denom
unsigned weight_log2denom
Definition: diracdec.c:211
SubBand
Definition: cfhd.h:108
DiracContext::thread_buf
uint8_t * thread_buf
Definition: diracdec.c:178
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:89
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:2035
Plane::width
int width
Definition: cfhd.h:118
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
dirac_get_se_golomb
static int dirac_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:359
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
add_dc
static void add_dc(uint16_t *dst, int dc, int stride, uint8_t *obmc_weight, int xblen, int yblen)
Definition: diracdec.c:1743
dirac_decode_flush
static av_cold void dirac_decode_flush(AVCodecContext *avctx)
Definition: diracdec.c:416
DIRAC_PCODE_SEQ_HEADER
@ DIRAC_PCODE_SEQ_HEADER
Definition: dirac.h:62
get_buffer_with_edge
static int get_buffer_with_edge(AVCodecContext *avctx, AVFrame *f, int flags)
Definition: diracdec.c:1969
pred_block_mode
static int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
Definition: diracdec.c:1343
subband_nb
@ subband_nb
Definition: diracdec.c:249
init_obmc_weights
static void init_obmc_weights(DiracContext *s, Plane *p, int by)
Definition: diracdec.c:1608
Plane::stride
ptrdiff_t stride
Definition: cfhd.h:120
DiracContext::num_x
unsigned num_x
Definition: diracdec.c:175
DiracContext::prefix_bytes
unsigned prefix_bytes
Definition: diracdec.c:196
MpegvideoEncDSPContext
Definition: mpegvideoencdsp.h:32
DiracBlock
Definition: diracdec.c:85
dirac_decode_frame
static int dirac_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_frame, AVPacket *pkt)
Definition: diracdec.c:2267
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
DiracContext::low_delay
int low_delay
Definition: diracdec.c:157
pred_mv
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1392
UNPACK_ARITH
#define UNPACK_ARITH(n, type)
Definition: diracdec.c:452
alloc_sequence_buffers
static int alloc_sequence_buffers(DiracContext *s)
Definition: diracdec.c:287
DiracDSPContext
Definition: diracdsp.h:30
DIRAC_REF_MASK_REF2
#define DIRAC_REF_MASK_REF2
Definition: diracdec.c:62
PARSE_VALUES
#define PARSE_VALUES(type, x, gb, ebits, buf1, buf2)
Definition: diracdec.c:713
decode_block_params
static void decode_block_params(DiracContext *s, DiracArith arith[8], DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1444
decode_hq_slice
static int decode_hq_slice(const DiracContext *s, DiracSlice *slice, uint8_t *tmp_buf)
VC-2 Specification -> 13.5.3 hq_slice(sx,sy)
Definition: diracdec.c:844
ff_dirac_golomb_read_16bit
int ff_dirac_golomb_read_16bit(const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs)
Definition: dirac_vlc.c:1095
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
SubBand::ibuf
uint8_t * ibuf
Definition: diracdec.c:101
DiracContext::globalmc_flag
int globalmc_flag
Definition: diracdec.c:161
CTX_PMODE_REF2
#define CTX_PMODE_REF2
Definition: dirac_arith.h:72
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
DiracFrame::avframe
AVFrame * avframe
Definition: diracdec.c:77
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
dirac_arith.h
ff_dirac_init_arith_tables
av_cold void ff_dirac_init_arith_tables(void)
Definition: dirac_arith.c:86
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
MAX_BLOCKSIZE
#define MAX_BLOCKSIZE
Definition: diracdec.c:56
mc_subpel
static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5], int x, int y, int ref, int plane)
For block x,y, determine which of the hpel planes to do bilinear interpolation from and set src[] to ...
Definition: diracdec.c:1648
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
DiracSlice
Definition: diracdec.c:130
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
interpolate_refplane
static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
Definition: diracdec.c:1831
DWTContext
Definition: dirac_dwt.h:54
SliceCoeffs::tot_h
int tot_h
Definition: diracdec.c:818
DiracContext::core_syntax
int core_syntax
Definition: diracdec.c:156
DiracVersionInfo::minor
int minor
Definition: dirac.h:82
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
DiracContext::mv_precision
uint8_t mv_precision
Definition: diracdec.c:209
DiracContext::height
unsigned height
Definition: diracdec.c:187
SubBand::coeff_data
const uint8_t * coeff_data
Definition: diracdec.c:106
AVDiracSeqHeader
Definition: dirac.h:85
mid_pred
#define mid_pred
Definition: mathops.h:96
SliceCoeffs::top
int top
Definition: diracdec.c:817
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
Plane::xoffset
uint8_t xoffset
Definition: diracdec.c:123
SliceCoeffs
Definition: diracdec.c:815
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
SubBand::pshift
int pshift
Definition: diracdec.c:99
DiracContext::pan_tilt
int pan_tilt[2]
Definition: diracdec.c:201
SubBand::quant
int quant
Definition: diracdec.c:100
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
remove_frame
static DiracFrame * remove_frame(DiracFrame *framelist[], unsigned picnum)
Definition: diracdec.c:258
U
#define U(x)
Definition: vpx_arith.h:37
ff_dirac_golomb_read_32bit
int ff_dirac_golomb_read_32bit(const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs)
Definition: dirac_vlc.c:1115
DIRAC_PCODE_END_SEQ
@ DIRAC_PCODE_END_SEQ
Definition: dirac.h:63
AVDiracSeqHeader::pix_fmt
enum AVPixelFormat pix_fmt
Definition: dirac.h:110
decode_subband_golomb
static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
Definition: diracdec.c:648
DiracContext
Definition: diracdec.c:137
ff_dirac_qoffset_intra_tab
const int32_t ff_dirac_qoffset_intra_tab[120]
Definition: diractab.c:53
CTX_SB_DATA
#define CTX_SB_DATA
Definition: dirac_arith.h:70
DiracContext::add_obmc
void(* add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
Definition: diracdec.c:232
AVCodecContext
main external API structure.
Definition: avcodec.h:451
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1623
subband_hl
@ subband_hl
Definition: diracdec.c:246
DiracContext::all_frames
DiracFrame all_frames[MAX_FRAMES]
Definition: diracdec.c:241
free_sequence_buffers
static av_cold void free_sequence_buffers(DiracContext *s)
Definition: diracdec.c:354
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1650
mpegvideoencdsp.h
CALC_PADDING
#define CALC_PADDING(size, depth)
Definition: diracdec.c:71
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
DiracContext::threads_num_buf
int threads_num_buf
Definition: diracdec.c:179
DiracContext::vdsp
VideoDSPContext vdsp
Definition: diracdec.c:140
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
CTX_GLOBAL_BLOCK
#define CTX_GLOBAL_BLOCK
Definition: dirac_arith.h:73
Plane
Definition: cfhd.h:117
DiracFrame::reference
int reference
Definition: diracdec.c:81
divide3
static int divide3(int x)
Definition: diracdec.c:253
VideoDSPContext
Definition: videodsp.h:40
Plane::xblen
uint8_t xblen
Definition: diracdec.c:117
DiracContext::quant
uint8_t quant[MAX_DWT_LEVELS][4]
Definition: diracdec.c:192
DiracContext::plane
Plane plane[3]
Definition: diracdec.c:147
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
Plane::band
SubBand band[DWT_LEVELS_3D][4]
Definition: cfhd.h:130
propagate_block_data
static void propagate_block_data(DiracBlock *block, int stride, int size)
Copies the current block to the other blocks covered by the current superblock split mode.
Definition: diracdec.c:1484
DiracContext::buffer_stride
int buffer_stride
Definition: diracdec.c:226
DiracContext::slice_params_num_buf
int slice_params_num_buf
Definition: diracdec.c:183
Plane::ybsep
uint8_t ybsep
Definition: diracdec.c:121
AVDiracSeqHeader::width
unsigned width
Definition: dirac.h:86
CTX_PMODE_REF1
#define CTX_PMODE_REF1
Definition: dirac_arith.h:71
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
global_mv
static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
Definition: diracdec.c:1428
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
DiracContext::avg_pixels_tab
void(* avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:231
videodsp.h
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
DiracContext::ref_pics
DiracFrame * ref_pics[2]
Definition: diracdec.c:237
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
int32_t
int32_t
Definition: audioconvert.c:56
DiracContext::sbsplit
uint8_t * sbsplit
Definition: diracdec.c:218
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
AVDiracSeqHeader::height
unsigned height
Definition: dirac.h:87
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
CTX_DELTA_Q_DATA
@ CTX_DELTA_Q_DATA
Definition: dirac_arith.h:60
dirac_decode_data_unit
static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
Definition: diracdec.c:2122
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
dirac_decode_init
static av_cold int dirac_decode_init(AVCodecContext *avctx)
Definition: diracdec.c:388
h
h
Definition: vp9dsp_template.c:2070
ff_diracdsp_init
av_cold void ff_diracdsp_init(DiracDSPContext *c)
Definition: diracdsp.c:221
dirac_unpack_idwt_params
static int dirac_unpack_idwt_params(DiracContext *s)
Dirac Specification -> 11.3 Wavelet transform data.
Definition: diracdec.c:1233
DiracContext::zrs_exp
unsigned zrs_exp
Definition: diracdec.c:204
width
#define width
Definition: dsp.h:85
DiracContext::obmc_weight
uint8_t obmc_weight[3][MAX_BLOCKSIZE *MAX_BLOCKSIZE]
Definition: diracdec.c:228
DWTPlane::width
int width
Definition: dirac_dwt.h:38
DiracContext::frame_number
int64_t frame_number
Definition: diracdec.c:146
DiracArith::error
int error
Definition: dirac_arith.h:88
AVDiracSeqHeader::color_primaries
enum AVColorPrimaries color_primaries
Definition: dirac.h:112
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1642
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
src
#define src
Definition: vp8dsp.c:248
CTX_DC_DATA
#define CTX_DC_DATA
Definition: dirac_arith.h:77
get_delayed_pic
static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
Definition: diracdec.c:2087
SubBand::height
int height
Definition: cfhd.h:113
DiracContext::pshift
int pshift
Definition: diracdec.c:152
MAX_FRAMES
#define MAX_FRAMES
Definition: diracdec.c:54
SubBand::orientation
int orientation
Definition: diracdec.c:95
init_obmc_weight
static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int top, int bottom)
Definition: diracdec.c:1589
block_mc
static void block_mc(DiracContext *s, DiracBlock *block, uint16_t *mctmp, uint8_t *obmc_weight, int plane, int dstx, int dsty)
Definition: diracdec.c:1759
DWTPlane::height
int height
Definition: dirac_dwt.h:39
AV_WN16
#define AV_WN16(p, v)
Definition: intreadwrite.h:368