FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwaccel_internal.h"
31 #include "hwconfig.h"
32 #include "profiles.h"
33 #include "progressframe.h"
34 #include "libavutil/refstruct.h"
35 #include "thread.h"
36 #include "pthread_internal.h"
37 
38 #include "videodsp.h"
39 #include "vp89_rac.h"
40 #include "vp9.h"
41 #include "vp9data.h"
42 #include "vp9dec.h"
43 #include "vpx_rac.h"
44 #include "libavutil/attributes.h"
45 #include "libavutil/avassert.h"
46 #include "libavutil/mem.h"
47 #include "libavutil/pixdesc.h"
49 
50 #define VP9_SYNCCODE 0x498342
51 
52 #if HAVE_THREADS
53 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
54  (offsetof(VP9Context, progress_mutex)),
55  (offsetof(VP9Context, progress_cond)));
56 
57 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
58  VP9Context *s = avctx->priv_data;
59 
60  if (avctx->active_thread_type & FF_THREAD_SLICE) {
61  if (s->entries)
62  av_freep(&s->entries);
63 
64  s->entries = av_malloc_array(n, sizeof(atomic_int));
65  if (!s->entries)
66  return AVERROR(ENOMEM);
67  }
68  return 0;
69 }
70 
71 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
72  pthread_mutex_lock(&s->progress_mutex);
73  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
74  pthread_cond_signal(&s->progress_cond);
75  pthread_mutex_unlock(&s->progress_mutex);
76 }
77 
78 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
79  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
80  return;
81 
82  pthread_mutex_lock(&s->progress_mutex);
83  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
84  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
85  pthread_mutex_unlock(&s->progress_mutex);
86 }
87 #else
88 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
89 #endif
90 
92 {
93  av_freep(&td->b_base);
94  av_freep(&td->block_base);
96 }
97 
98 static void vp9_frame_unref(VP9Frame *f)
99 {
101  av_refstruct_unref(&f->header_ref);
102  av_refstruct_unref(&f->extradata);
103  av_refstruct_unref(&f->hwaccel_picture_private);
104  f->segmentation_map = NULL;
105 }
106 
108 {
109  VP9Context *s = avctx->priv_data;
110  int ret, sz;
111 
113  if (ret < 0)
114  return ret;
115 
116  sz = 64 * s->sb_cols * s->sb_rows;
117  if (sz != s->frame_extradata_pool_size) {
118  av_refstruct_pool_uninit(&s->frame_extradata_pool);
119  s->frame_extradata_pool = av_refstruct_pool_alloc(sz * (1 + sizeof(VP9mvrefPair)),
121  if (!s->frame_extradata_pool) {
122  s->frame_extradata_pool_size = 0;
123  ret = AVERROR(ENOMEM);
124  goto fail;
125  }
126  s->frame_extradata_pool_size = sz;
127  }
128  f->extradata = av_refstruct_pool_get(s->frame_extradata_pool);
129  if (!f->extradata) {
130  ret = AVERROR(ENOMEM);
131  goto fail;
132  }
133 
134  f->segmentation_map = f->extradata;
135  f->mv = (VP9mvrefPair *) ((char*)f->extradata + sz);
136 
137  ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private);
138  if (ret < 0)
139  goto fail;
140 
141  return 0;
142 
143 fail:
145  return ret;
146 }
147 
149 {
150  av_refstruct_replace(&dst->header_ref, src->header_ref);
151  dst->frame_header = src->frame_header;
152 
153  ff_progress_frame_replace(&dst->tf, &src->tf);
154 
155  av_refstruct_replace(&dst->extradata, src->extradata);
156 
157  dst->segmentation_map = src->segmentation_map;
158  dst->mv = src->mv;
159  dst->uses_2pass = src->uses_2pass;
160 
161  av_refstruct_replace(&dst->hwaccel_picture_private,
162  src->hwaccel_picture_private);
163 }
164 
165 static int update_size(AVCodecContext *avctx, int w, int h)
166 {
167 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
168  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
169  CONFIG_VP9_D3D12VA_HWACCEL + \
170  CONFIG_VP9_NVDEC_HWACCEL + \
171  CONFIG_VP9_VAAPI_HWACCEL + \
172  CONFIG_VP9_VDPAU_HWACCEL + \
173  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL + \
174  CONFIG_VP9_VULKAN_HWACCEL)
175  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
176  VP9Context *s = avctx->priv_data;
177  uint8_t *p;
178  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
179  int lflvl_len, i;
180  int changed = 0;
181 
182  av_assert0(w > 0 && h > 0);
183 
184  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
185  changed = 1;
186  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
187  return ret;
188 
189  switch (s->pix_fmt) {
190  case AV_PIX_FMT_YUV420P:
192 #if CONFIG_VP9_DXVA2_HWACCEL
193  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
194 #endif
195 #if CONFIG_VP9_D3D11VA_HWACCEL
196  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
197  *fmtp++ = AV_PIX_FMT_D3D11;
198 #endif
199 #if CONFIG_VP9_D3D12VA_HWACCEL
200  *fmtp++ = AV_PIX_FMT_D3D12;
201 #endif
202 #if CONFIG_VP9_NVDEC_HWACCEL
203  *fmtp++ = AV_PIX_FMT_CUDA;
204 #endif
205 #if CONFIG_VP9_VAAPI_HWACCEL
206  *fmtp++ = AV_PIX_FMT_VAAPI;
207 #endif
208 #if CONFIG_VP9_VDPAU_HWACCEL
209  *fmtp++ = AV_PIX_FMT_VDPAU;
210 #endif
211 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
212  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
213 #endif
214 #if CONFIG_VP9_VULKAN_HWACCEL
215  *fmtp++ = AV_PIX_FMT_VULKAN;
216 #endif
217  break;
219 #if CONFIG_VP9_NVDEC_HWACCEL
220  *fmtp++ = AV_PIX_FMT_CUDA;
221 #endif
222 #if CONFIG_VP9_VAAPI_HWACCEL
223  *fmtp++ = AV_PIX_FMT_VAAPI;
224 #endif
225 #if CONFIG_VP9_VDPAU_HWACCEL
226  *fmtp++ = AV_PIX_FMT_VDPAU;
227 #endif
228 #if CONFIG_VP9_VULKAN_HWACCEL
229  *fmtp++ = AV_PIX_FMT_VULKAN;
230 #endif
231  break;
232  case AV_PIX_FMT_YUV444P:
235 #if CONFIG_VP9_VAAPI_HWACCEL
236  *fmtp++ = AV_PIX_FMT_VAAPI;
237 #endif
238 #if CONFIG_VP9_VULKAN_HWACCEL
239  *fmtp++ = AV_PIX_FMT_VULKAN;
240 #endif
241  break;
242  case AV_PIX_FMT_GBRP:
243  case AV_PIX_FMT_GBRP10:
244  case AV_PIX_FMT_GBRP12:
245 #if CONFIG_VP9_VAAPI_HWACCEL
246  *fmtp++ = AV_PIX_FMT_VAAPI;
247 #endif
248 #if CONFIG_VP9_VULKAN_HWACCEL
249  *fmtp++ = AV_PIX_FMT_VULKAN;
250 #endif
251  break;
252  }
253 
254  *fmtp++ = s->pix_fmt;
255  *fmtp = AV_PIX_FMT_NONE;
256 
257  ret = ff_get_format(avctx, pix_fmts);
258  if (ret < 0)
259  return ret;
260 
261  avctx->pix_fmt = ret;
262  s->gf_fmt = s->pix_fmt;
263  s->w = w;
264  s->h = h;
265  }
266 
267  cols = (w + 7) >> 3;
268  rows = (h + 7) >> 3;
269 
270  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
271  return changed;
272 
273  s->last_fmt = s->pix_fmt;
274  s->sb_cols = (w + 63) >> 6;
275  s->sb_rows = (h + 63) >> 6;
276  s->cols = (w + 7) >> 3;
277  s->rows = (h + 7) >> 3;
278  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
279 
280 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
281  av_freep(&s->intra_pred_data[0]);
282  // FIXME we slightly over-allocate here for subsampled chroma, but a little
283  // bit of padding shouldn't affect performance...
284  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
285  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
286  if (!p)
287  return AVERROR(ENOMEM);
288  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
289  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
290  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
291  assign(s->above_y_nnz_ctx, uint8_t *, 16);
292  assign(s->above_mode_ctx, uint8_t *, 16);
293  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
294  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
295  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
296  assign(s->above_partition_ctx, uint8_t *, 8);
297  assign(s->above_skip_ctx, uint8_t *, 8);
298  assign(s->above_txfm_ctx, uint8_t *, 8);
299  assign(s->above_segpred_ctx, uint8_t *, 8);
300  assign(s->above_intra_ctx, uint8_t *, 8);
301  assign(s->above_comp_ctx, uint8_t *, 8);
302  assign(s->above_ref_ctx, uint8_t *, 8);
303  assign(s->above_filter_ctx, uint8_t *, 8);
304  assign(s->lflvl, VP9Filter *, lflvl_len);
305 #undef assign
306 
307  if (s->td) {
308  for (i = 0; i < s->active_tile_cols; i++)
309  vp9_tile_data_free(&s->td[i]);
310  }
311 
312  if (s->s.h.bpp != s->last_bpp) {
313  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
314  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
315  s->last_bpp = s->s.h.bpp;
316  changed = 1;
317  }
318 
319  return changed;
320 }
321 
323 {
324  int i;
325  VP9Context *s = avctx->priv_data;
326  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
327  VP9TileData *td = &s->td[0];
328 
329  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
330  return 0;
331 
332  vp9_tile_data_free(td);
333  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
334  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
335  if (s->s.frames[CUR_FRAME].uses_2pass) {
336  int sbs = s->sb_cols * s->sb_rows;
337 
338  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
339  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
340  16 * 16 + 2 * chroma_eobs) * sbs);
341  if (!td->b_base || !td->block_base)
342  return AVERROR(ENOMEM);
343  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
344  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
345  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
346  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
347  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
348 
350  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
351  if (!td->block_structure)
352  return AVERROR(ENOMEM);
353  }
354  } else {
355  for (i = 1; i < s->active_tile_cols; i++)
356  vp9_tile_data_free(&s->td[i]);
357 
358  for (i = 0; i < s->active_tile_cols; i++) {
359  s->td[i].b_base = av_malloc(sizeof(VP9Block));
360  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
361  16 * 16 + 2 * chroma_eobs);
362  if (!s->td[i].b_base || !s->td[i].block_base)
363  return AVERROR(ENOMEM);
364  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
365  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
366  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
367  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
368  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
369 
371  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
372  if (!s->td[i].block_structure)
373  return AVERROR(ENOMEM);
374  }
375  }
376  }
377  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
378 
379  return 0;
380 }
381 
382 // The sign bit is at the end, not the start, of a bit sequence
384 {
385  int v = get_bits(gb, n);
386  return get_bits1(gb) ? -v : v;
387 }
388 
389 static av_always_inline int inv_recenter_nonneg(int v, int m)
390 {
391  if (v > 2 * m)
392  return v;
393  if (v & 1)
394  return m - ((v + 1) >> 1);
395  return m + (v >> 1);
396 }
397 
398 // differential forward probability updates
399 static int update_prob(VPXRangeCoder *c, int p)
400 {
401  static const uint8_t inv_map_table[255] = {
402  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
403  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
404  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
405  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
406  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
407  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
408  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
409  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
410  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
411  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
412  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
413  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
414  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
415  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
416  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
417  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
418  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
419  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
420  252, 253, 253,
421  };
422  int d;
423 
424  /* This code is trying to do a differential probability update. For a
425  * current probability A in the range [1, 255], the difference to a new
426  * probability of any value can be expressed differentially as 1-A, 255-A
427  * where some part of this (absolute range) exists both in positive as
428  * well as the negative part, whereas another part only exists in one
429  * half. We're trying to code this shared part differentially, i.e.
430  * times two where the value of the lowest bit specifies the sign, and
431  * the single part is then coded on top of this. This absolute difference
432  * then again has a value of [0, 254], but a bigger value in this range
433  * indicates that we're further away from the original value A, so we
434  * can code this as a VLC code, since higher values are increasingly
435  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
436  * updates vs. the 'fine, exact' updates further down the range, which
437  * adds one extra dimension to this differential update model. */
438 
439  if (!vp89_rac_get(c)) {
440  d = vp89_rac_get_uint(c, 4) + 0;
441  } else if (!vp89_rac_get(c)) {
442  d = vp89_rac_get_uint(c, 4) + 16;
443  } else if (!vp89_rac_get(c)) {
444  d = vp89_rac_get_uint(c, 5) + 32;
445  } else {
446  d = vp89_rac_get_uint(c, 7);
447  if (d >= 65)
448  d = (d << 1) - 65 + vp89_rac_get(c);
449  d += 64;
450  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
451  }
452 
453  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
454  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
455 }
456 
458 {
459  static const enum AVColorSpace colorspaces[8] = {
462  };
463  VP9Context *s = avctx->priv_data;
464  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
465 
466  s->bpp_index = bits;
467  s->s.h.bpp = 8 + bits * 2;
468  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
469  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
470  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
471  static const enum AVPixelFormat pix_fmt_rgb[3] = {
473  };
474  s->ss_h = s->ss_v = 0;
475  avctx->color_range = AVCOL_RANGE_JPEG;
476  s->pix_fmt = pix_fmt_rgb[bits];
477  if (avctx->profile & 1) {
478  if (get_bits1(&s->gb)) {
479  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
480  return AVERROR_INVALIDDATA;
481  }
482  } else {
483  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
484  avctx->profile);
485  return AVERROR_INVALIDDATA;
486  }
487  } else {
488  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
495  };
497  if (avctx->profile & 1) {
498  s->ss_h = get_bits1(&s->gb);
499  s->ss_v = get_bits1(&s->gb);
500  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
501  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
502  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
503  avctx->profile);
504  return AVERROR_INVALIDDATA;
505  } else if (get_bits1(&s->gb)) {
506  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
507  avctx->profile);
508  return AVERROR_INVALIDDATA;
509  }
510  } else {
511  s->ss_h = s->ss_v = 1;
512  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
513  }
514  }
515 
516  return 0;
517 }
518 
520  const uint8_t *data, int size, int *ref)
521 {
522  VP9Context *s = avctx->priv_data;
523  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
524  int last_invisible;
525  const uint8_t *data2;
526  int changed;
527 
528  /* general header */
529  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
530  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
531  return ret;
532  }
533  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
534  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
535  return AVERROR_INVALIDDATA;
536  }
537  avctx->profile = get_bits1(&s->gb);
538  avctx->profile |= get_bits1(&s->gb) << 1;
539  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
540  if (avctx->profile > 3) {
541  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
542  return AVERROR_INVALIDDATA;
543  }
544  s->s.h.profile = avctx->profile;
545  if (get_bits1(&s->gb)) {
546  *ref = get_bits(&s->gb, 3);
547  return 0;
548  }
549 
550  s->last_keyframe = s->s.h.keyframe;
551  s->s.h.keyframe = !get_bits1(&s->gb);
552 
553  last_invisible = s->s.h.invisible;
554  s->s.h.invisible = !get_bits1(&s->gb);
555  s->s.h.errorres = get_bits1(&s->gb);
556  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
557 
558  if (s->s.h.keyframe) {
559  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
560  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
561  return AVERROR_INVALIDDATA;
562  }
563  if ((ret = read_colorspace_details(avctx)) < 0)
564  return ret;
565  // for profile 1, here follows the subsampling bits
566  s->s.h.refreshrefmask = 0xff;
567  w = get_bits(&s->gb, 16) + 1;
568  h = get_bits(&s->gb, 16) + 1;
569  if (get_bits1(&s->gb)) // display size
570  skip_bits(&s->gb, 32);
571  } else {
572  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
573  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
574  if (s->s.h.intraonly) {
575  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
576  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
577  return AVERROR_INVALIDDATA;
578  }
579  if (avctx->profile >= 1) {
580  if ((ret = read_colorspace_details(avctx)) < 0)
581  return ret;
582  } else {
583  s->ss_h = s->ss_v = 1;
584  s->s.h.bpp = 8;
585  s->bpp_index = 0;
586  s->bytesperpixel = 1;
587  s->pix_fmt = AV_PIX_FMT_YUV420P;
588  avctx->colorspace = AVCOL_SPC_BT470BG;
589  avctx->color_range = AVCOL_RANGE_MPEG;
590  }
591  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
592  w = get_bits(&s->gb, 16) + 1;
593  h = get_bits(&s->gb, 16) + 1;
594  if (get_bits1(&s->gb)) // display size
595  skip_bits(&s->gb, 32);
596  } else {
597  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
598  s->s.h.refidx[0] = get_bits(&s->gb, 3);
599  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
600  s->s.h.refidx[1] = get_bits(&s->gb, 3);
601  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
602  s->s.h.refidx[2] = get_bits(&s->gb, 3);
603  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
604  if (!s->s.refs[s->s.h.refidx[0]].f ||
605  !s->s.refs[s->s.h.refidx[1]].f ||
606  !s->s.refs[s->s.h.refidx[2]].f) {
607  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
608  return AVERROR_INVALIDDATA;
609  }
610  if (get_bits1(&s->gb)) {
611  w = s->s.refs[s->s.h.refidx[0]].f->width;
612  h = s->s.refs[s->s.h.refidx[0]].f->height;
613  } else if (get_bits1(&s->gb)) {
614  w = s->s.refs[s->s.h.refidx[1]].f->width;
615  h = s->s.refs[s->s.h.refidx[1]].f->height;
616  } else if (get_bits1(&s->gb)) {
617  w = s->s.refs[s->s.h.refidx[2]].f->width;
618  h = s->s.refs[s->s.h.refidx[2]].f->height;
619  } else {
620  w = get_bits(&s->gb, 16) + 1;
621  h = get_bits(&s->gb, 16) + 1;
622  }
623  // Note that in this code, "CUR_FRAME" is actually before we
624  // have formally allocated a frame, and thus actually represents
625  // the _last_ frame
626  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f &&
627  s->s.frames[CUR_FRAME].tf.f->width == w &&
628  s->s.frames[CUR_FRAME].tf.f->height == h;
629  if (get_bits1(&s->gb)) // display size
630  skip_bits(&s->gb, 32);
631  s->s.h.highprecisionmvs = get_bits1(&s->gb);
632  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
633  get_bits(&s->gb, 2);
634  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
635  s->s.h.signbias[0] != s->s.h.signbias[2];
636  if (s->s.h.allowcompinter) {
637  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
638  s->s.h.fixcompref = 2;
639  s->s.h.varcompref[0] = 0;
640  s->s.h.varcompref[1] = 1;
641  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
642  s->s.h.fixcompref = 1;
643  s->s.h.varcompref[0] = 0;
644  s->s.h.varcompref[1] = 2;
645  } else {
646  s->s.h.fixcompref = 0;
647  s->s.h.varcompref[0] = 1;
648  s->s.h.varcompref[1] = 2;
649  }
650  }
651  }
652  }
653  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
654  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
655  s->s.h.framectxid = c = get_bits(&s->gb, 2);
656  if (s->s.h.keyframe || s->s.h.intraonly)
657  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
658 
659  /* loopfilter header data */
660  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
661  // reset loopfilter defaults
662  s->s.h.lf_delta.ref[0] = 1;
663  s->s.h.lf_delta.ref[1] = 0;
664  s->s.h.lf_delta.ref[2] = -1;
665  s->s.h.lf_delta.ref[3] = -1;
666  s->s.h.lf_delta.mode[0] = 0;
667  s->s.h.lf_delta.mode[1] = 0;
668  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
669  }
670  s->s.h.filter.level = get_bits(&s->gb, 6);
671  sharp = get_bits(&s->gb, 3);
672  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
673  // the old cache values since they are still valid
674  if (s->s.h.filter.sharpness != sharp) {
675  for (i = 1; i <= 63; i++) {
676  int limit = i;
677 
678  if (sharp > 0) {
679  limit >>= (sharp + 3) >> 2;
680  limit = FFMIN(limit, 9 - sharp);
681  }
682  limit = FFMAX(limit, 1);
683 
684  s->filter_lut.lim_lut[i] = limit;
685  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
686  }
687  }
688  s->s.h.filter.sharpness = sharp;
689  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
690  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
691  for (i = 0; i < 4; i++)
692  if (get_bits1(&s->gb))
693  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
694  for (i = 0; i < 2; i++)
695  if (get_bits1(&s->gb))
696  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
697  }
698  }
699 
700  /* quantization header data */
701  s->s.h.yac_qi = get_bits(&s->gb, 8);
702  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
703  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
704  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
705  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
706  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
707 #if FF_API_CODEC_PROPS
709  if (s->s.h.lossless)
712 #endif
713 
714  /* segmentation header info */
715  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
716  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
717  for (i = 0; i < 7; i++)
718  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
719  get_bits(&s->gb, 8) : 255;
720  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
721  for (i = 0; i < 3; i++)
722  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
723  get_bits(&s->gb, 8) : 255;
724  }
725 
726  if (get_bits1(&s->gb)) {
727  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
728  for (i = 0; i < 8; i++) {
729  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
730  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
731  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
732  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
733  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
734  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
735  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
736  }
737  }
738  } else {
739  // Reset fields under segmentation switch if segmentation is disabled.
740  // This is necessary because some hwaccels don't ignore these fields
741  // if segmentation is disabled.
742  s->s.h.segmentation.temporal = 0;
743  s->s.h.segmentation.update_map = 0;
744  }
745 
746  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
747  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
748  int qyac, qydc, quvac, quvdc, lflvl, sh;
749 
750  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
751  if (s->s.h.segmentation.absolute_vals)
752  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
753  else
754  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
755  } else {
756  qyac = s->s.h.yac_qi;
757  }
758  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
759  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
760  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
761  qyac = av_clip_uintp2(qyac, 8);
762 
763  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
764  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
765  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
766  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
767 
768  sh = s->s.h.filter.level >= 32;
769  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
770  if (s->s.h.segmentation.absolute_vals)
771  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
772  else
773  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
774  } else {
775  lflvl = s->s.h.filter.level;
776  }
777  if (s->s.h.lf_delta.enabled) {
778  s->s.h.segmentation.feat[i].lflvl[0][0] =
779  s->s.h.segmentation.feat[i].lflvl[0][1] =
780  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
781  for (j = 1; j < 4; j++) {
782  s->s.h.segmentation.feat[i].lflvl[j][0] =
783  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
784  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
785  s->s.h.segmentation.feat[i].lflvl[j][1] =
786  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
787  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
788  }
789  } else {
790  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
791  sizeof(s->s.h.segmentation.feat[i].lflvl));
792  }
793  }
794 
795  /* tiling info */
796  if ((changed = update_size(avctx, w, h)) < 0) {
797  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
798  w, h, s->pix_fmt);
799  return changed;
800  }
801  for (s->s.h.tiling.log2_tile_cols = 0;
802  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
803  s->s.h.tiling.log2_tile_cols++) ;
804  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
805  max = FFMAX(0, max - 1);
806  while (max > s->s.h.tiling.log2_tile_cols) {
807  if (get_bits1(&s->gb))
808  s->s.h.tiling.log2_tile_cols++;
809  else
810  break;
811  }
812  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
813  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
814  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols) || changed) {
815  int n_range_coders;
816  VPXRangeCoder *rc;
817 
818  if (s->td) {
819  for (i = 0; i < s->active_tile_cols; i++)
820  vp9_tile_data_free(&s->td[i]);
821  av_freep(&s->td);
822  }
823 
824  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
825  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
826  s->s.h.tiling.tile_cols : 1;
827  vp9_alloc_entries(avctx, s->sb_rows);
828  if (avctx->active_thread_type == FF_THREAD_SLICE) {
829  n_range_coders = 4; // max_tile_rows
830  } else {
831  n_range_coders = s->s.h.tiling.tile_cols;
832  }
833  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
834  n_range_coders * sizeof(VPXRangeCoder));
835  if (!s->td)
836  return AVERROR(ENOMEM);
837  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
838  for (i = 0; i < s->active_tile_cols; i++) {
839  s->td[i].s = s;
840  s->td[i].c_b = rc;
841  rc += n_range_coders;
842  }
843  }
844 
845  /* check reference frames */
846  if (!s->s.h.keyframe && !s->s.h.intraonly) {
847  int valid_ref_frame = 0;
848  for (i = 0; i < 3; i++) {
849  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
850  int refw = ref->width, refh = ref->height;
851 
852  if (ref->format != avctx->pix_fmt) {
853  av_log(avctx, AV_LOG_ERROR,
854  "Ref pixfmt (%s) did not match current frame (%s)",
855  av_get_pix_fmt_name(ref->format),
856  av_get_pix_fmt_name(avctx->pix_fmt));
857  return AVERROR_INVALIDDATA;
858  } else if (refw == w && refh == h) {
859  s->mvscale[i][0] = s->mvscale[i][1] = 0;
860  } else {
861  /* Check to make sure at least one of frames that */
862  /* this frame references has valid dimensions */
863  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
864  av_log(avctx, AV_LOG_WARNING,
865  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
866  refw, refh, w, h);
867  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
868  continue;
869  }
870  s->mvscale[i][0] = (refw << 14) / w;
871  s->mvscale[i][1] = (refh << 14) / h;
872  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
873  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
874  }
875  valid_ref_frame++;
876  }
877  if (!valid_ref_frame) {
878  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
879  return AVERROR_INVALIDDATA;
880  }
881  }
882 
883  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
884  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
885  s->prob_ctx[3].p = ff_vp9_default_probs;
886  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
887  sizeof(ff_vp9_default_coef_probs));
888  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
889  sizeof(ff_vp9_default_coef_probs));
890  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
891  sizeof(ff_vp9_default_coef_probs));
892  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
893  sizeof(ff_vp9_default_coef_probs));
894  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
895  s->prob_ctx[c].p = ff_vp9_default_probs;
896  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
897  sizeof(ff_vp9_default_coef_probs));
898  }
899 
900  // next 16 bits is size of the rest of the header (arith-coded)
901  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
902  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
903 
904  data2 = align_get_bits(&s->gb);
905  if (size2 > size - (data2 - data)) {
906  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
907  return AVERROR_INVALIDDATA;
908  }
909  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
910  if (ret < 0)
911  return ret;
912 
913  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
914  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
915  return AVERROR_INVALIDDATA;
916  }
917 
918  for (i = 0; i < s->active_tile_cols; i++) {
919  if (s->s.h.keyframe || s->s.h.intraonly) {
920  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
921  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
922  } else {
923  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
924  }
925  s->td[i].nb_block_structure = 0;
926  }
927 
928  /* FIXME is it faster to not copy here, but do it down in the fw updates
929  * as explicit copies if the fw update is missing (and skip the copy upon
930  * fw update)? */
931  s->prob.p = s->prob_ctx[c].p;
932 
933  // txfm updates
934  if (s->s.h.lossless) {
935  s->s.h.txfmmode = TX_4X4;
936  } else {
937  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
938  if (s->s.h.txfmmode == 3)
939  s->s.h.txfmmode += vp89_rac_get(&s->c);
940 
941  if (s->s.h.txfmmode == TX_SWITCHABLE) {
942  for (i = 0; i < 2; i++)
943  if (vpx_rac_get_prob_branchy(&s->c, 252))
944  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
945  for (i = 0; i < 2; i++)
946  for (j = 0; j < 2; j++)
947  if (vpx_rac_get_prob_branchy(&s->c, 252))
948  s->prob.p.tx16p[i][j] =
949  update_prob(&s->c, s->prob.p.tx16p[i][j]);
950  for (i = 0; i < 2; i++)
951  for (j = 0; j < 3; j++)
952  if (vpx_rac_get_prob_branchy(&s->c, 252))
953  s->prob.p.tx32p[i][j] =
954  update_prob(&s->c, s->prob.p.tx32p[i][j]);
955  }
956  }
957 
958  // coef updates
959  for (i = 0; i < 4; i++) {
960  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
961  if (vp89_rac_get(&s->c)) {
962  for (j = 0; j < 2; j++)
963  for (k = 0; k < 2; k++)
964  for (l = 0; l < 6; l++)
965  for (m = 0; m < 6; m++) {
966  uint8_t *p = s->prob.coef[i][j][k][l][m];
967  uint8_t *r = ref[j][k][l][m];
968  if (m >= 3 && l == 0) // dc only has 3 pt
969  break;
970  for (n = 0; n < 3; n++) {
971  if (vpx_rac_get_prob_branchy(&s->c, 252))
972  p[n] = update_prob(&s->c, r[n]);
973  else
974  p[n] = r[n];
975  }
976  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
977  }
978  } else {
979  for (j = 0; j < 2; j++)
980  for (k = 0; k < 2; k++)
981  for (l = 0; l < 6; l++)
982  for (m = 0; m < 6; m++) {
983  uint8_t *p = s->prob.coef[i][j][k][l][m];
984  uint8_t *r = ref[j][k][l][m];
985  if (m > 3 && l == 0) // dc only has 3 pt
986  break;
987  memcpy(p, r, 3);
988  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
989  }
990  }
991  if (s->s.h.txfmmode == i)
992  break;
993  }
994 
995  // mode updates
996  for (i = 0; i < 3; i++)
997  if (vpx_rac_get_prob_branchy(&s->c, 252))
998  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
999  if (!s->s.h.keyframe && !s->s.h.intraonly) {
1000  for (i = 0; i < 7; i++)
1001  for (j = 0; j < 3; j++)
1002  if (vpx_rac_get_prob_branchy(&s->c, 252))
1003  s->prob.p.mv_mode[i][j] =
1004  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
1005 
1006  if (s->s.h.filtermode == FILTER_SWITCHABLE)
1007  for (i = 0; i < 4; i++)
1008  for (j = 0; j < 2; j++)
1009  if (vpx_rac_get_prob_branchy(&s->c, 252))
1010  s->prob.p.filter[i][j] =
1011  update_prob(&s->c, s->prob.p.filter[i][j]);
1012 
1013  for (i = 0; i < 4; i++)
1014  if (vpx_rac_get_prob_branchy(&s->c, 252))
1015  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
1016 
1017  if (s->s.h.allowcompinter) {
1018  s->s.h.comppredmode = vp89_rac_get(&s->c);
1019  if (s->s.h.comppredmode)
1020  s->s.h.comppredmode += vp89_rac_get(&s->c);
1021  if (s->s.h.comppredmode == PRED_SWITCHABLE)
1022  for (i = 0; i < 5; i++)
1023  if (vpx_rac_get_prob_branchy(&s->c, 252))
1024  s->prob.p.comp[i] =
1025  update_prob(&s->c, s->prob.p.comp[i]);
1026  } else {
1027  s->s.h.comppredmode = PRED_SINGLEREF;
1028  }
1029 
1030  if (s->s.h.comppredmode != PRED_COMPREF) {
1031  for (i = 0; i < 5; i++) {
1032  if (vpx_rac_get_prob_branchy(&s->c, 252))
1033  s->prob.p.single_ref[i][0] =
1034  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1035  if (vpx_rac_get_prob_branchy(&s->c, 252))
1036  s->prob.p.single_ref[i][1] =
1037  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1038  }
1039  }
1040 
1041  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1042  for (i = 0; i < 5; i++)
1043  if (vpx_rac_get_prob_branchy(&s->c, 252))
1044  s->prob.p.comp_ref[i] =
1045  update_prob(&s->c, s->prob.p.comp_ref[i]);
1046  }
1047 
1048  for (i = 0; i < 4; i++)
1049  for (j = 0; j < 9; j++)
1050  if (vpx_rac_get_prob_branchy(&s->c, 252))
1051  s->prob.p.y_mode[i][j] =
1052  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1053 
1054  for (i = 0; i < 4; i++)
1055  for (j = 0; j < 4; j++)
1056  for (k = 0; k < 3; k++)
1057  if (vpx_rac_get_prob_branchy(&s->c, 252))
1058  s->prob.p.partition[3 - i][j][k] =
1059  update_prob(&s->c,
1060  s->prob.p.partition[3 - i][j][k]);
1061 
1062  // mv fields don't use the update_prob subexp model for some reason
1063  for (i = 0; i < 3; i++)
1064  if (vpx_rac_get_prob_branchy(&s->c, 252))
1065  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1066 
1067  for (i = 0; i < 2; i++) {
1068  if (vpx_rac_get_prob_branchy(&s->c, 252))
1069  s->prob.p.mv_comp[i].sign =
1070  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1071 
1072  for (j = 0; j < 10; j++)
1073  if (vpx_rac_get_prob_branchy(&s->c, 252))
1074  s->prob.p.mv_comp[i].classes[j] =
1075  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1076 
1077  if (vpx_rac_get_prob_branchy(&s->c, 252))
1078  s->prob.p.mv_comp[i].class0 =
1079  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1080 
1081  for (j = 0; j < 10; j++)
1082  if (vpx_rac_get_prob_branchy(&s->c, 252))
1083  s->prob.p.mv_comp[i].bits[j] =
1084  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1085  }
1086 
1087  for (i = 0; i < 2; i++) {
1088  for (j = 0; j < 2; j++)
1089  for (k = 0; k < 3; k++)
1090  if (vpx_rac_get_prob_branchy(&s->c, 252))
1091  s->prob.p.mv_comp[i].class0_fp[j][k] =
1092  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1093 
1094  for (j = 0; j < 3; j++)
1095  if (vpx_rac_get_prob_branchy(&s->c, 252))
1096  s->prob.p.mv_comp[i].fp[j] =
1097  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1098  }
1099 
1100  if (s->s.h.highprecisionmvs) {
1101  for (i = 0; i < 2; i++) {
1102  if (vpx_rac_get_prob_branchy(&s->c, 252))
1103  s->prob.p.mv_comp[i].class0_hp =
1104  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1105 
1106  if (vpx_rac_get_prob_branchy(&s->c, 252))
1107  s->prob.p.mv_comp[i].hp =
1108  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1109  }
1110  }
1111  }
1112 
1113  return (data2 - data) + size2;
1114 }
1115 
1116 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1117  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1118 {
1119  const VP9Context *s = td->s;
1120  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1121  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1122  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1123  s->prob.p.partition[bl][c];
1124  enum BlockPartition bp;
1125  ptrdiff_t hbs = 4 >> bl;
1126  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1127  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1128  int bytesperpixel = s->bytesperpixel;
1129 
1130  if (bl == BL_8X8) {
1132  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1133  } else if (col + hbs < s->cols) { // FIXME why not <=?
1134  if (row + hbs < s->rows) { // FIXME why not <=?
1136  switch (bp) {
1137  case PARTITION_NONE:
1138  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1139  break;
1140  case PARTITION_H:
1141  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1142  yoff += hbs * 8 * y_stride;
1143  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1144  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1145  break;
1146  case PARTITION_V:
1147  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1148  yoff += hbs * 8 * bytesperpixel;
1149  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1150  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1151  break;
1152  case PARTITION_SPLIT:
1153  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1154  decode_sb(td, row, col + hbs, lflvl,
1155  yoff + 8 * hbs * bytesperpixel,
1156  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1157  yoff += hbs * 8 * y_stride;
1158  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1159  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1160  decode_sb(td, row + hbs, col + hbs, lflvl,
1161  yoff + 8 * hbs * bytesperpixel,
1162  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1163  break;
1164  default:
1165  av_unreachable("ff_vp9_partition_tree only has "
1166  "the four PARTITION_* terminal codes");
1167  }
1168  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1169  bp = PARTITION_SPLIT;
1170  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1171  decode_sb(td, row, col + hbs, lflvl,
1172  yoff + 8 * hbs * bytesperpixel,
1173  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1174  } else {
1175  bp = PARTITION_H;
1176  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1177  }
1178  } else if (row + hbs < s->rows) { // FIXME why not <=?
1179  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1180  bp = PARTITION_SPLIT;
1181  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1182  yoff += hbs * 8 * y_stride;
1183  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1184  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1185  } else {
1186  bp = PARTITION_V;
1187  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1188  }
1189  } else {
1190  bp = PARTITION_SPLIT;
1191  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1192  }
1193  td->counts.partition[bl][c][bp]++;
1194 }
1195 
1196 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1197  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1198 {
1199  const VP9Context *s = td->s;
1200  VP9Block *b = td->b;
1201  ptrdiff_t hbs = 4 >> bl;
1202  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1203  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1204  int bytesperpixel = s->bytesperpixel;
1205 
1206  if (bl == BL_8X8) {
1207  av_assert2(b->bl == BL_8X8);
1208  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1209  } else if (td->b->bl == bl) {
1210  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1211  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1212  yoff += hbs * 8 * y_stride;
1213  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1214  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1215  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1216  yoff += hbs * 8 * bytesperpixel;
1217  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1218  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1219  }
1220  } else {
1221  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1222  if (col + hbs < s->cols) { // FIXME why not <=?
1223  if (row + hbs < s->rows) {
1224  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1225  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1226  yoff += hbs * 8 * y_stride;
1227  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1228  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1229  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1230  yoff + 8 * hbs * bytesperpixel,
1231  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1232  } else {
1233  yoff += hbs * 8 * bytesperpixel;
1234  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1235  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1236  }
1237  } else if (row + hbs < s->rows) {
1238  yoff += hbs * 8 * y_stride;
1239  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1240  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1241  }
1242  }
1243 }
1244 
1245 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1246 {
1247  int sb_start = ( idx * n) >> log2_n;
1248  int sb_end = ((idx + 1) * n) >> log2_n;
1249  *start = FFMIN(sb_start, n) << 3;
1250  *end = FFMIN(sb_end, n) << 3;
1251 }
1252 
1254 {
1255  int i;
1256 
1257  av_freep(&s->intra_pred_data[0]);
1258  for (i = 0; i < s->active_tile_cols; i++)
1259  vp9_tile_data_free(&s->td[i]);
1260 }
1261 
1263 {
1264  VP9Context *s = avctx->priv_data;
1265  int i;
1266 
1267  for (int i = 0; i < 3; i++)
1268  vp9_frame_unref(&s->s.frames[i]);
1269  av_refstruct_pool_uninit(&s->frame_extradata_pool);
1270  for (i = 0; i < 8; i++) {
1271  ff_progress_frame_unref(&s->s.refs[i]);
1272  ff_progress_frame_unref(&s->next_refs[i]);
1273  vp9_frame_unref(&s->s.ref_frames[i]);
1274  }
1275 
1276  free_buffers(s);
1277 #if HAVE_THREADS
1278  av_freep(&s->entries);
1279  ff_pthread_free(s, vp9_context_offsets);
1280 #endif
1281 
1282  av_refstruct_unref(&s->header_ref);
1283  ff_cbs_fragment_free(&s->current_frag);
1284  ff_cbs_close(&s->cbc);
1285 
1286  av_freep(&s->td);
1287  return 0;
1288 }
1289 
1290 static int decode_tiles(AVCodecContext *avctx,
1291  const uint8_t *data, int size)
1292 {
1293  VP9Context *s = avctx->priv_data;
1294  VP9TileData *td = &s->td[0];
1295  int row, col, tile_row, tile_col, ret;
1296  int bytesperpixel;
1297  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1298  AVFrame *f;
1299  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1300 
1301  f = s->s.frames[CUR_FRAME].tf.f;
1302  ls_y = f->linesize[0];
1303  ls_uv =f->linesize[1];
1304  bytesperpixel = s->bytesperpixel;
1305 
1306  yoff = uvoff = 0;
1307  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1308  set_tile_offset(&tile_row_start, &tile_row_end,
1309  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1310 
1311  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1312  int64_t tile_size;
1313 
1314  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1315  tile_row == s->s.h.tiling.tile_rows - 1) {
1316  tile_size = size;
1317  } else {
1318  tile_size = AV_RB32(data);
1319  data += 4;
1320  size -= 4;
1321  }
1322  if (tile_size > size)
1323  return AVERROR_INVALIDDATA;
1324  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1325  if (ret < 0)
1326  return ret;
1327  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1328  return AVERROR_INVALIDDATA;
1329  data += tile_size;
1330  size -= tile_size;
1331  }
1332 
1333  for (row = tile_row_start; row < tile_row_end;
1334  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1335  VP9Filter *lflvl_ptr = s->lflvl;
1336  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1337 
1338  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1339  set_tile_offset(&tile_col_start, &tile_col_end,
1340  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1341  td->tile_col_start = tile_col_start;
1342  if (s->pass != 2) {
1343  memset(td->left_partition_ctx, 0, 8);
1344  memset(td->left_skip_ctx, 0, 8);
1345  if (s->s.h.keyframe || s->s.h.intraonly) {
1346  memset(td->left_mode_ctx, DC_PRED, 16);
1347  } else {
1348  memset(td->left_mode_ctx, NEARESTMV, 8);
1349  }
1350  memset(td->left_y_nnz_ctx, 0, 16);
1351  memset(td->left_uv_nnz_ctx, 0, 32);
1352  memset(td->left_segpred_ctx, 0, 8);
1353 
1354  td->c = &td->c_b[tile_col];
1355  }
1356 
1357  for (col = tile_col_start;
1358  col < tile_col_end;
1359  col += 8, yoff2 += 64 * bytesperpixel,
1360  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1361  // FIXME integrate with lf code (i.e. zero after each
1362  // use, similar to invtxfm coefficients, or similar)
1363  if (s->pass != 1) {
1364  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1365  }
1366 
1367  if (s->pass == 2) {
1368  decode_sb_mem(td, row, col, lflvl_ptr,
1369  yoff2, uvoff2, BL_64X64);
1370  } else {
1371  if (vpx_rac_is_end(td->c)) {
1372  return AVERROR_INVALIDDATA;
1373  }
1374  decode_sb(td, row, col, lflvl_ptr,
1375  yoff2, uvoff2, BL_64X64);
1376  }
1377  }
1378  }
1379 
1380  if (s->pass == 1)
1381  continue;
1382 
1383  // backup pre-loopfilter reconstruction data for intra
1384  // prediction of next row of sb64s
1385  if (row + 8 < s->rows) {
1386  memcpy(s->intra_pred_data[0],
1387  f->data[0] + yoff + 63 * ls_y,
1388  8 * s->cols * bytesperpixel);
1389  memcpy(s->intra_pred_data[1],
1390  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1391  8 * s->cols * bytesperpixel >> s->ss_h);
1392  memcpy(s->intra_pred_data[2],
1393  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1394  8 * s->cols * bytesperpixel >> s->ss_h);
1395  }
1396 
1397  // loopfilter one row
1398  if (s->s.h.filter.level) {
1399  yoff2 = yoff;
1400  uvoff2 = uvoff;
1401  lflvl_ptr = s->lflvl;
1402  for (col = 0; col < s->cols;
1403  col += 8, yoff2 += 64 * bytesperpixel,
1404  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1405  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1406  yoff2, uvoff2);
1407  }
1408  }
1409 
1410  // FIXME maybe we can make this more finegrained by running the
1411  // loopfilter per-block instead of after each sbrow
1412  // In fact that would also make intra pred left preparation easier?
1413  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, row >> 3);
1414  }
1415  }
1416  return 0;
1417 }
1418 
1419 #if HAVE_THREADS
1420 static av_always_inline
1421 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1422  int threadnr)
1423 {
1424  VP9Context *s = avctx->priv_data;
1425  VP9TileData *td = &s->td[jobnr];
1426  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1427  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1428  unsigned tile_cols_len;
1429  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1430  VP9Filter *lflvl_ptr_base;
1431  AVFrame *f;
1432 
1433  f = s->s.frames[CUR_FRAME].tf.f;
1434  ls_y = f->linesize[0];
1435  ls_uv =f->linesize[1];
1436 
1437  set_tile_offset(&tile_col_start, &tile_col_end,
1438  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1439  td->tile_col_start = tile_col_start;
1440  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1441  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1442  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1443 
1444  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1445  set_tile_offset(&tile_row_start, &tile_row_end,
1446  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1447 
1448  td->c = &td->c_b[tile_row];
1449  for (row = tile_row_start; row < tile_row_end;
1450  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1451  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1452  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1453 
1454  memset(td->left_partition_ctx, 0, 8);
1455  memset(td->left_skip_ctx, 0, 8);
1456  if (s->s.h.keyframe || s->s.h.intraonly) {
1457  memset(td->left_mode_ctx, DC_PRED, 16);
1458  } else {
1459  memset(td->left_mode_ctx, NEARESTMV, 8);
1460  }
1461  memset(td->left_y_nnz_ctx, 0, 16);
1462  memset(td->left_uv_nnz_ctx, 0, 32);
1463  memset(td->left_segpred_ctx, 0, 8);
1464 
1465  for (col = tile_col_start;
1466  col < tile_col_end;
1467  col += 8, yoff2 += 64 * bytesperpixel,
1468  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1469  // FIXME integrate with lf code (i.e. zero after each
1470  // use, similar to invtxfm coefficients, or similar)
1471  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1472  decode_sb(td, row, col, lflvl_ptr,
1473  yoff2, uvoff2, BL_64X64);
1474  }
1475 
1476  // backup pre-loopfilter reconstruction data for intra
1477  // prediction of next row of sb64s
1478  tile_cols_len = tile_col_end - tile_col_start;
1479  if (row + 8 < s->rows) {
1480  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1481  f->data[0] + yoff + 63 * ls_y,
1482  8 * tile_cols_len * bytesperpixel);
1483  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1484  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1485  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1486  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1487  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1488  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1489  }
1490 
1491  vp9_report_tile_progress(s, row >> 3, 1);
1492  }
1493  }
1494  return 0;
1495 }
1496 
1497 static av_always_inline
1498 int loopfilter_proc(AVCodecContext *avctx)
1499 {
1500  VP9Context *s = avctx->priv_data;
1501  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1502  VP9Filter *lflvl_ptr;
1503  int bytesperpixel = s->bytesperpixel, col, i;
1504  AVFrame *f;
1505 
1506  f = s->s.frames[CUR_FRAME].tf.f;
1507  ls_y = f->linesize[0];
1508  ls_uv =f->linesize[1];
1509 
1510  for (i = 0; i < s->sb_rows; i++) {
1511  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1512 
1513  if (s->s.h.filter.level) {
1514  yoff = (ls_y * 64)*i;
1515  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1516  lflvl_ptr = s->lflvl+s->sb_cols*i;
1517  for (col = 0; col < s->cols;
1518  col += 8, yoff += 64 * bytesperpixel,
1519  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1520  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1521  yoff, uvoff);
1522  }
1523  }
1524  }
1525  return 0;
1526 }
1527 #endif
1528 
1530 {
1531  AVVideoEncParams *par;
1532  unsigned int tile, nb_blocks = 0;
1533 
1534  if (s->s.h.segmentation.enabled) {
1535  for (tile = 0; tile < s->active_tile_cols; tile++)
1536  nb_blocks += s->td[tile].nb_block_structure;
1537  }
1538 
1540  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1541  if (!par)
1542  return AVERROR(ENOMEM);
1543 
1544  par->qp = s->s.h.yac_qi;
1545  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1546  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1547  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1548  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1549  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1550 
1551  if (nb_blocks) {
1552  unsigned int block = 0;
1553  unsigned int tile, block_tile;
1554 
1555  for (tile = 0; tile < s->active_tile_cols; tile++) {
1556  VP9TileData *td = &s->td[tile];
1557 
1558  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1560  unsigned int row = td->block_structure[block_tile].row;
1561  unsigned int col = td->block_structure[block_tile].col;
1562  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1563 
1564  b->src_x = col * 8;
1565  b->src_y = row * 8;
1566  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1567  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1568 
1569  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1570  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1571  if (s->s.h.segmentation.absolute_vals)
1572  b->delta_qp -= par->qp;
1573  }
1574  }
1575  }
1576  }
1577 
1578  return 0;
1579 }
1580 
1582  int *got_frame, AVPacket *pkt)
1583 {
1584  const uint8_t *data = pkt->data;
1585  int size = pkt->size;
1586  VP9Context *s = avctx->priv_data;
1587  int ret, i, j, ref;
1588  CodedBitstreamUnit *unit;
1589  VP9RawFrame *rf;
1590 
1591  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1592  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1593  const VP9Frame *src;
1594  AVFrame *f;
1595 
1596  ret = ff_cbs_read_packet(s->cbc, &s->current_frag, pkt);
1597  if (ret < 0) {
1598  ff_cbs_fragment_reset(&s->current_frag);
1599  av_log(avctx, AV_LOG_ERROR, "Failed to read frame header.\n");
1600  return ret;
1601  }
1602 
1603  unit = &s->current_frag.units[0];
1604  rf = unit->content;
1605 
1606  av_refstruct_replace(&s->header_ref, unit->content_ref);
1607  s->frame_header = &rf->header;
1608 
1609  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1610  return ret;
1611  } else if (ret == 0) {
1612  if (!s->s.refs[ref].f) {
1613  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1614  return AVERROR_INVALIDDATA;
1615  }
1616  for (int i = 0; i < 8; i++)
1617  ff_progress_frame_replace(&s->next_refs[i], &s->s.refs[i]);
1618  ff_thread_finish_setup(avctx);
1619  ff_progress_frame_await(&s->s.refs[ref], INT_MAX);
1620  ff_cbs_fragment_reset(&s->current_frag);
1621 
1622  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1623  return ret;
1624  frame->pts = pkt->pts;
1625  frame->pkt_dts = pkt->dts;
1626  *got_frame = 1;
1627  return pkt->size;
1628  }
1629  data += ret;
1630  size -= ret;
1631 
1632  src = !s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres ?
1633  &s->s.frames[CUR_FRAME] : &s->s.frames[BLANK_FRAME];
1634  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly)
1635  vp9_frame_replace(&s->s.frames[REF_FRAME_SEGMAP], src);
1636  vp9_frame_replace(&s->s.frames[REF_FRAME_MVPAIR], src);
1637  vp9_frame_unref(&s->s.frames[CUR_FRAME]);
1638  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1639  return ret;
1640 
1641  s->s.frames[CUR_FRAME].header_ref = av_refstruct_ref(s->header_ref);
1642  s->s.frames[CUR_FRAME].frame_header = s->frame_header;
1643 
1644  f = s->s.frames[CUR_FRAME].tf.f;
1645  if (s->s.h.keyframe)
1646  f->flags |= AV_FRAME_FLAG_KEY;
1647  else
1648  f->flags &= ~AV_FRAME_FLAG_KEY;
1649  if (s->s.h.lossless)
1650  f->flags |= AV_FRAME_FLAG_LOSSLESS;
1651  else
1652  f->flags &= ~AV_FRAME_FLAG_LOSSLESS;
1653  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1654 
1655  // Non-existent frames have the implicit dimension 0x0 != CUR_FRAME
1656  if (!s->s.frames[REF_FRAME_MVPAIR].tf.f ||
1657  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1658  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1659  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1660  }
1661 
1662  // ref frame setup
1663  for (i = 0; i < 8; i++) {
1664  ff_progress_frame_replace(&s->next_refs[i],
1665  s->s.h.refreshrefmask & (1 << i) ?
1666  &s->s.frames[CUR_FRAME].tf : &s->s.refs[i]);
1667  }
1668 
1669  if (avctx->hwaccel) {
1670  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1671  ret = hwaccel->start_frame(avctx, pkt->buf, pkt->data, pkt->size);
1672  if (ret < 0)
1673  return ret;
1674  ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1675  if (ret < 0)
1676  return ret;
1677  ret = hwaccel->end_frame(avctx);
1678  if (ret < 0)
1679  return ret;
1680 
1681  for (i = 0; i < 8; i++) {
1682  vp9_frame_replace(&s->s.ref_frames[i],
1683  s->s.h.refreshrefmask & (1 << i) ?
1684  &s->s.frames[CUR_FRAME] : &s->s.ref_frames[i]);
1685  }
1686 
1687  goto finish;
1688  }
1689 
1690  // main tile decode loop
1691  memset(s->above_partition_ctx, 0, s->cols);
1692  memset(s->above_skip_ctx, 0, s->cols);
1693  if (s->s.h.keyframe || s->s.h.intraonly) {
1694  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1695  } else {
1696  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1697  }
1698  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1699  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1700  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1701  memset(s->above_segpred_ctx, 0, s->cols);
1702  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1703  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1704  if ((ret = update_block_buffers(avctx)) < 0) {
1705  av_log(avctx, AV_LOG_ERROR,
1706  "Failed to allocate block buffers\n");
1707  return ret;
1708  }
1709  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1710  int j, k, l, m;
1711 
1712  for (i = 0; i < 4; i++) {
1713  for (j = 0; j < 2; j++)
1714  for (k = 0; k < 2; k++)
1715  for (l = 0; l < 6; l++)
1716  for (m = 0; m < 6; m++)
1717  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1718  s->prob.coef[i][j][k][l][m], 3);
1719  if (s->s.h.txfmmode == i)
1720  break;
1721  }
1722  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1723  ff_thread_finish_setup(avctx);
1724  } else if (!s->s.h.refreshctx) {
1725  ff_thread_finish_setup(avctx);
1726  }
1727 
1728 #if HAVE_THREADS
1729  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1730  for (i = 0; i < s->sb_rows; i++)
1731  atomic_init(&s->entries[i], 0);
1732  }
1733 #endif
1734 
1735  do {
1736  for (i = 0; i < s->active_tile_cols; i++) {
1737  s->td[i].b = s->td[i].b_base;
1738  s->td[i].block = s->td[i].block_base;
1739  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1740  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1741  s->td[i].eob = s->td[i].eob_base;
1742  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1743  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1744  s->td[i].error_info = 0;
1745  }
1746 
1747 #if HAVE_THREADS
1748  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1749  int tile_row, tile_col;
1750 
1751  av_assert1(!s->pass);
1752 
1753  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1754  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1755  int64_t tile_size;
1756 
1757  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1758  tile_row == s->s.h.tiling.tile_rows - 1) {
1759  tile_size = size;
1760  } else {
1761  tile_size = AV_RB32(data);
1762  data += 4;
1763  size -= 4;
1764  }
1765  if (tile_size > size)
1766  return AVERROR_INVALIDDATA;
1767  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1768  if (ret < 0)
1769  return ret;
1770  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1771  return AVERROR_INVALIDDATA;
1772  data += tile_size;
1773  size -= tile_size;
1774  }
1775  }
1776 
1777  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1778  } else
1779 #endif
1780  {
1781  ret = decode_tiles(avctx, data, size);
1782  if (ret < 0)
1783  goto fail;
1784  }
1785 
1786  // Sum all counts fields into td[0].counts for tile threading
1787  if (avctx->active_thread_type == FF_THREAD_SLICE)
1788  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1789  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1790  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1791 
1792  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1794  ff_thread_finish_setup(avctx);
1795  }
1796  } while (s->pass++ == 1);
1797 
1798  if (s->td->error_info < 0) {
1799  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1800  s->td->error_info = 0;
1802  goto fail;
1803  }
1805  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1806  if (ret < 0)
1807  goto fail;
1808  }
1809 
1810 finish:
1811  ff_cbs_fragment_reset(&s->current_frag);
1812 
1813  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1814  // ref frame setup
1815  for (int i = 0; i < 8; i++)
1816  ff_progress_frame_replace(&s->s.refs[i], &s->next_refs[i]);
1817 
1818  if (!s->s.h.invisible) {
1819  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1820  return ret;
1821  *got_frame = 1;
1822  }
1823 
1824  return pkt->size;
1825 fail:
1826  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1827  return ret;
1828 }
1829 
1831 {
1832  VP9Context *s = avctx->priv_data;
1833  int i;
1834 
1835  for (i = 0; i < 3; i++)
1836  vp9_frame_unref(&s->s.frames[i]);
1837 
1838  for (i = 0; i < 8; i++) {
1839  ff_progress_frame_unref(&s->s.refs[i]);
1840  vp9_frame_unref(&s->s.ref_frames[i]);
1841  }
1842 
1843  ff_cbs_fragment_reset(&s->current_frag);
1844  ff_cbs_flush(s->cbc);
1845 
1846  if (FF_HW_HAS_CB(avctx, flush))
1847  FF_HW_SIMPLE_CALL(avctx, flush);
1848 }
1849 
1851 {
1852  VP9Context *s = avctx->priv_data;
1853  int ret;
1854 
1855  s->last_bpp = 0;
1856  s->s.h.filter.sharpness = -1;
1857 
1858  ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_VP9, avctx);
1859  if (ret < 0)
1860  return ret;
1861 
1862 #if HAVE_THREADS
1863  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1864  ret = ff_pthread_init(s, vp9_context_offsets);
1865  if (ret < 0)
1866  return ret;
1867  }
1868 #endif
1869 
1870  return 0;
1871 }
1872 
1873 #if HAVE_THREADS
1874 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1875 {
1876  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1877 
1878  for (int i = 0; i < 3; i++)
1879  vp9_frame_replace(&s->s.frames[i], &ssrc->s.frames[i]);
1880  for (int i = 0; i < 8; i++)
1881  ff_progress_frame_replace(&s->s.refs[i], &ssrc->next_refs[i]);
1882  av_refstruct_replace(&s->frame_extradata_pool, ssrc->frame_extradata_pool);
1883  s->frame_extradata_pool_size = ssrc->frame_extradata_pool_size;
1884 
1885  av_refstruct_replace(&s->header_ref, ssrc->header_ref);
1886  for (int i = 0; i < 8; i++)
1887  vp9_frame_replace(&s->s.ref_frames[i], &ssrc->s.ref_frames[i]);
1888 
1889  s->frame_header = ssrc->frame_header;
1890  memcpy(s->cbc->priv_data, ssrc->cbc->priv_data, sizeof(CodedBitstreamVP9Context));
1891 
1892  s->s.h.invisible = ssrc->s.h.invisible;
1893  s->s.h.keyframe = ssrc->s.h.keyframe;
1894  s->s.h.intraonly = ssrc->s.h.intraonly;
1895  s->ss_v = ssrc->ss_v;
1896  s->ss_h = ssrc->ss_h;
1897  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1898  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1899  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1900  s->bytesperpixel = ssrc->bytesperpixel;
1901  s->gf_fmt = ssrc->gf_fmt;
1902  s->w = ssrc->w;
1903  s->h = ssrc->h;
1904  s->s.h.bpp = ssrc->s.h.bpp;
1905  s->bpp_index = ssrc->bpp_index;
1906  s->pix_fmt = ssrc->pix_fmt;
1907  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1908  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1909  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1910  sizeof(s->s.h.segmentation.feat));
1911 
1912  return 0;
1913 }
1914 #endif
1915 
1917  .p.name = "vp9",
1918  CODEC_LONG_NAME("Google VP9"),
1919  .p.type = AVMEDIA_TYPE_VIDEO,
1920  .p.id = AV_CODEC_ID_VP9,
1921  .priv_data_size = sizeof(VP9Context),
1922  .init = vp9_decode_init,
1926  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1929  .flush = vp9_decode_flush,
1930  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1931  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1932  .bsfs = "vp9_superframe_split",
1933  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1934 #if CONFIG_VP9_DXVA2_HWACCEL
1935  HWACCEL_DXVA2(vp9),
1936 #endif
1937 #if CONFIG_VP9_D3D11VA_HWACCEL
1938  HWACCEL_D3D11VA(vp9),
1939 #endif
1940 #if CONFIG_VP9_D3D11VA2_HWACCEL
1941  HWACCEL_D3D11VA2(vp9),
1942 #endif
1943 #if CONFIG_VP9_D3D12VA_HWACCEL
1944  HWACCEL_D3D12VA(vp9),
1945 #endif
1946 #if CONFIG_VP9_NVDEC_HWACCEL
1947  HWACCEL_NVDEC(vp9),
1948 #endif
1949 #if CONFIG_VP9_VAAPI_HWACCEL
1950  HWACCEL_VAAPI(vp9),
1951 #endif
1952 #if CONFIG_VP9_VDPAU_HWACCEL
1953  HWACCEL_VDPAU(vp9),
1954 #endif
1955 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1956  HWACCEL_VIDEOTOOLBOX(vp9),
1957 #endif
1958 #if CONFIG_VP9_VULKAN_HWACCEL
1959  HWACCEL_VULKAN(vp9),
1960 #endif
1961  NULL
1962  },
1963 };
VP9TileData::left_y_nnz_ctx
uint8_t left_y_nnz_ctx[16]
Definition: vp9dec.h:216
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1922
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1413
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:64
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1290
CodedBitstreamUnit::content_ref
void * content_ref
If content is reference counted, a RefStruct reference backing content.
Definition: cbs.h:119
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:107
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:53
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:51
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
VP9TileData::uvblock_base
int16_t * uvblock_base[2]
Definition: vp9dec.h:232
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1208
VP9TileData::partition
unsigned partition[4][4][4]
Definition: vp9dec.h:207
VP9Frame
Definition: vp9shared.h:66
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1916
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1116
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
CodedBitstreamUnit::content
void * content
Pointer to the decomposed form of this unit.
Definition: cbs.h:114
int64_t
long long int64_t
Definition: coverity.c:34
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:254
VP9TileData::left_skip_ctx
uint8_t left_skip_ctx[8]
Definition: vp9dec.h:221
VP9TileData::row
int row
Definition: vp9dec.h:177
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
pixdesc.h
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
BlockPartition
BlockPartition
Definition: vp9shared.h:36
AVPacket::data
uint8_t * data
Definition: packet.h:588
DC_PRED
@ DC_PRED
Definition: vp9.h:48
pthread_mutex_lock
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
Definition: os2threads.h:119
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
b
#define b
Definition: input.c:42
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
Definition: decode.c:1882
data
const char data[16]
Definition: mxf.c:149
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:165
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1196
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:174
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:519
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:539
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
VP9TileData::c_b
VPXRangeCoder * c_b
Definition: vp9dec.h:175
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:701
VP9TileData::left_segpred_ctx
uint8_t left_segpred_ctx[8]
Definition: vp9dec.h:223
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:50
VP9Block::bl
enum BlockLevel bl
Definition: vp9dec.h:91
vp89_rac.h
VP9Filter
Definition: vp9dec.h:79
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
VP9TileData::b
VP9Block * b
Definition: vp9dec.h:180
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:92
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:379
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
CodedBitstreamUnit
Coded bitstream unit structure.
Definition: cbs.h:77
VP9Block
Definition: vp9dec.h:85
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:704
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
FFHWAccel
Definition: hwaccel_internal.h:34
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:218
AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:558
GetBitContext
Definition: get_bits.h:109
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:37
vp9_frame_unref
static void vp9_frame_unref(VP9Frame *f)
Definition: vp9.c:98
progressframe.h
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
VP9TileData::col
int col
Definition: vp9dec.h:177
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1262
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:542
avassert.h
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:68
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1646
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
BL_8X8
@ BL_8X8
Definition: vp9shared.h:83
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:39
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2279
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
pthread_mutex_unlock
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
Definition: os2threads.h:126
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:707
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1905
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
VP9TileData::block_size_idx_x
unsigned int block_size_idx_x
Definition: vp9dec.h:240
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1581
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:571
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:677
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:97
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:116
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:173
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:541
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts.c:552
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
pthread_internal.h
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:341
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
VP9mv
Definition: vp9shared.h:56
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:40
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:179
VP9RawFrame
Definition: cbs_vp9.h:164
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
VP9TileData::counts
struct VP9TileData::@322 counts
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp9_frame_replace
static void vp9_frame_replace(VP9Frame *dst, const VP9Frame *src)
Definition: vp9.c:148
VP9RawFrame::header
VP9RawFrameHeader header
Definition: cbs_vp9.h:165
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
f
f
Definition: af_crystalizer.c:122
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:589
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
VP9TileData::eob_base
uint8_t * eob_base
Definition: vp9dec.h:233
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:66
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:43
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:457
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:544
VP9TileData::block_structure
struct VP9TileData::@324 * block_structure
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:88
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
VP9TileData::b_base
VP9Block * b_base
Definition: vp9dec.h:180
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1253
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:546
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1581
AVCodecHWConfigInternal
Definition: hwconfig.h:25
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:322
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
av_refstruct_ref
void * av_refstruct_ref(void *obj)
Create a new reference to an object managed via this API, i.e.
Definition: refstruct.c:140
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
CodedBitstreamVP9Context
Definition: cbs_vp9.h:192
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
VP9TileData::block_base
int16_t * block_base
Definition: vp9dec.h:232
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:389
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1580
VP9TileData::left_uv_nnz_ctx
uint8_t left_uv_nnz_ctx[2][16]
Definition: vp9dec.h:219
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:104
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:708
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:399
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:711
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:444
VP9TileData::block_size_idx_y
unsigned int block_size_idx_y
Definition: vp9dec.h:241
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:559
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:700
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:63
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:383
VP9TileData::left_mode_ctx
uint8_t left_mode_ctx[16]
Definition: vp9dec.h:217
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VP9TileData::c
VPXRangeCoder * c
Definition: vp9dec.h:176
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
VP9TileData::s
const VP9Context * s
Definition: vp9dec.h:174
BL_64X64
@ BL_64X64
Definition: vp9shared.h:80
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1850
tile
static int FUNC() tile(CodedBitstreamContext *ctx, RWContext *rw, APVRawTile *current, int tile_idx, uint32_t tile_size)
Definition: cbs_apv_syntax_template.c:224
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:560
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
av_refstruct_pool_alloc
AVRefStructPool * av_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to av_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:91
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:61
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:543
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:395
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1912
VP9TileData
Definition: vp9dec.h:173
VP9TileData::uveob_base
uint8_t * uveob_base[2]
Definition: vp9dec.h:233
HWACCEL_VULKAN
#define HWACCEL_VULKAN(codec)
Definition: hwconfig.h:76
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1588
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:82
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1626
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1264
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:44
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
BlockLevel
BlockLevel
Definition: vp9shared.h:79
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1782
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:105
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:172
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(struct AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:179
w
uint8_t w
Definition: llvidencdsp.c:39
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1529
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:38
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
BLANK_FRAME
#define BLANK_FRAME
Definition: vp9shared.h:175
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1645
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:545
h
h
Definition: vp9dsp_template.c:2070
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
VP9TileData::nb_block_structure
unsigned int nb_block_structure
Definition: vp9dec.h:243
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:702
VP9TileData::tile_col_start
unsigned tile_col_start
Definition: vp9dec.h:181
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
src
#define src
Definition: vp8dsp.c:248
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:155
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1245
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540
vp9_decode_flush
static av_cold void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1830
VP9TileData::left_partition_ctx
uint8_t left_partition_ctx[8]
Definition: vp9dec.h:220