FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwaccel_internal.h"
31 #include "hwconfig.h"
32 #include "profiles.h"
33 #include "progressframe.h"
34 #include "libavutil/refstruct.h"
35 #include "thread.h"
36 #include "pthread_internal.h"
37 
38 #include "videodsp.h"
39 #include "vp89_rac.h"
40 #include "vp9.h"
41 #include "vp9data.h"
42 #include "vp9dec.h"
43 #include "vpx_rac.h"
44 #include "libavutil/attributes.h"
45 #include "libavutil/avassert.h"
46 #include "libavutil/mem.h"
47 #include "libavutil/pixdesc.h"
49 
50 #define VP9_SYNCCODE 0x498342
51 
52 #if HAVE_THREADS
53 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
54  (offsetof(VP9Context, progress_mutex)),
55  (offsetof(VP9Context, progress_cond)));
56 
57 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
58  VP9Context *s = avctx->priv_data;
59 
60  if (avctx->active_thread_type & FF_THREAD_SLICE) {
61  if (s->entries)
62  av_freep(&s->entries);
63 
64  s->entries = av_malloc_array(n, sizeof(atomic_int));
65  if (!s->entries)
66  return AVERROR(ENOMEM);
67  }
68  return 0;
69 }
70 
71 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
72  pthread_mutex_lock(&s->progress_mutex);
73  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
74  pthread_cond_signal(&s->progress_cond);
75  pthread_mutex_unlock(&s->progress_mutex);
76 }
77 
78 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
79  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
80  return;
81 
82  pthread_mutex_lock(&s->progress_mutex);
83  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
84  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
85  pthread_mutex_unlock(&s->progress_mutex);
86 }
87 #else
88 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
89 #endif
90 
92 {
93  av_freep(&td->b_base);
94  av_freep(&td->block_base);
96 }
97 
98 static void vp9_frame_unref(VP9Frame *f)
99 {
101  av_refstruct_unref(&f->header_ref);
102  av_refstruct_unref(&f->extradata);
103  av_refstruct_unref(&f->hwaccel_picture_private);
104  f->segmentation_map = NULL;
105 }
106 
108 {
109  VP9Context *s = avctx->priv_data;
110  int ret, sz;
111 
113  if (ret < 0)
114  return ret;
115 
116  sz = 64 * s->sb_cols * s->sb_rows;
117  if (sz != s->frame_extradata_pool_size) {
118  av_refstruct_pool_uninit(&s->frame_extradata_pool);
119  s->frame_extradata_pool = av_refstruct_pool_alloc(sz * (1 + sizeof(VP9mvrefPair)),
121  if (!s->frame_extradata_pool) {
122  s->frame_extradata_pool_size = 0;
123  ret = AVERROR(ENOMEM);
124  goto fail;
125  }
126  s->frame_extradata_pool_size = sz;
127  }
128  f->extradata = av_refstruct_pool_get(s->frame_extradata_pool);
129  if (!f->extradata) {
130  ret = AVERROR(ENOMEM);
131  goto fail;
132  }
133 
134  f->segmentation_map = f->extradata;
135  f->mv = (VP9mvrefPair *) ((char*)f->extradata + sz);
136 
137  ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private);
138  if (ret < 0)
139  goto fail;
140 
141  return 0;
142 
143 fail:
145  return ret;
146 }
147 
149 {
150  av_refstruct_replace(&dst->header_ref, src->header_ref);
151  dst->frame_header = src->frame_header;
152 
153  ff_progress_frame_replace(&dst->tf, &src->tf);
154 
155  av_refstruct_replace(&dst->extradata, src->extradata);
156 
157  dst->segmentation_map = src->segmentation_map;
158  dst->mv = src->mv;
159  dst->uses_2pass = src->uses_2pass;
160 
161  av_refstruct_replace(&dst->hwaccel_picture_private,
162  src->hwaccel_picture_private);
163 }
164 
165 static int update_size(AVCodecContext *avctx, int w, int h)
166 {
167 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
168  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
169  CONFIG_VP9_D3D12VA_HWACCEL + \
170  CONFIG_VP9_NVDEC_HWACCEL + \
171  CONFIG_VP9_VAAPI_HWACCEL + \
172  CONFIG_VP9_VDPAU_HWACCEL + \
173  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL + \
174  CONFIG_VP9_VULKAN_HWACCEL)
175  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
176  VP9Context *s = avctx->priv_data;
177  uint8_t *p;
178  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
179  int lflvl_len, i;
180 
181  av_assert0(w > 0 && h > 0);
182 
183  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
184  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
185  return ret;
186 
187  switch (s->pix_fmt) {
188  case AV_PIX_FMT_YUV420P:
190 #if CONFIG_VP9_DXVA2_HWACCEL
191  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
192 #endif
193 #if CONFIG_VP9_D3D11VA_HWACCEL
194  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
195  *fmtp++ = AV_PIX_FMT_D3D11;
196 #endif
197 #if CONFIG_VP9_D3D12VA_HWACCEL
198  *fmtp++ = AV_PIX_FMT_D3D12;
199 #endif
200 #if CONFIG_VP9_NVDEC_HWACCEL
201  *fmtp++ = AV_PIX_FMT_CUDA;
202 #endif
203 #if CONFIG_VP9_VAAPI_HWACCEL
204  *fmtp++ = AV_PIX_FMT_VAAPI;
205 #endif
206 #if CONFIG_VP9_VDPAU_HWACCEL
207  *fmtp++ = AV_PIX_FMT_VDPAU;
208 #endif
209 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
210  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
211 #endif
212 #if CONFIG_VP9_VULKAN_HWACCEL
213  *fmtp++ = AV_PIX_FMT_VULKAN;
214 #endif
215  break;
217 #if CONFIG_VP9_NVDEC_HWACCEL
218  *fmtp++ = AV_PIX_FMT_CUDA;
219 #endif
220 #if CONFIG_VP9_VAAPI_HWACCEL
221  *fmtp++ = AV_PIX_FMT_VAAPI;
222 #endif
223 #if CONFIG_VP9_VDPAU_HWACCEL
224  *fmtp++ = AV_PIX_FMT_VDPAU;
225 #endif
226 #if CONFIG_VP9_VULKAN_HWACCEL
227  *fmtp++ = AV_PIX_FMT_VULKAN;
228 #endif
229  break;
230  case AV_PIX_FMT_YUV444P:
233 #if CONFIG_VP9_VAAPI_HWACCEL
234  *fmtp++ = AV_PIX_FMT_VAAPI;
235 #endif
236 #if CONFIG_VP9_VULKAN_HWACCEL
237  *fmtp++ = AV_PIX_FMT_VULKAN;
238 #endif
239  break;
240  case AV_PIX_FMT_GBRP:
241  case AV_PIX_FMT_GBRP10:
242  case AV_PIX_FMT_GBRP12:
243 #if CONFIG_VP9_VAAPI_HWACCEL
244  *fmtp++ = AV_PIX_FMT_VAAPI;
245 #endif
246 #if CONFIG_VP9_VULKAN_HWACCEL
247  *fmtp++ = AV_PIX_FMT_VULKAN;
248 #endif
249  break;
250  }
251 
252  *fmtp++ = s->pix_fmt;
253  *fmtp = AV_PIX_FMT_NONE;
254 
255  ret = ff_get_format(avctx, pix_fmts);
256  if (ret < 0)
257  return ret;
258 
259  avctx->pix_fmt = ret;
260  s->gf_fmt = s->pix_fmt;
261  s->w = w;
262  s->h = h;
263  }
264 
265  cols = (w + 7) >> 3;
266  rows = (h + 7) >> 3;
267 
268  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
269  return 0;
270 
271  s->last_fmt = s->pix_fmt;
272  s->sb_cols = (w + 63) >> 6;
273  s->sb_rows = (h + 63) >> 6;
274  s->cols = (w + 7) >> 3;
275  s->rows = (h + 7) >> 3;
276  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
277 
278 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
279  av_freep(&s->intra_pred_data[0]);
280  // FIXME we slightly over-allocate here for subsampled chroma, but a little
281  // bit of padding shouldn't affect performance...
282  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
283  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
284  if (!p)
285  return AVERROR(ENOMEM);
286  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
287  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
288  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
289  assign(s->above_y_nnz_ctx, uint8_t *, 16);
290  assign(s->above_mode_ctx, uint8_t *, 16);
291  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
292  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
293  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
294  assign(s->above_partition_ctx, uint8_t *, 8);
295  assign(s->above_skip_ctx, uint8_t *, 8);
296  assign(s->above_txfm_ctx, uint8_t *, 8);
297  assign(s->above_segpred_ctx, uint8_t *, 8);
298  assign(s->above_intra_ctx, uint8_t *, 8);
299  assign(s->above_comp_ctx, uint8_t *, 8);
300  assign(s->above_ref_ctx, uint8_t *, 8);
301  assign(s->above_filter_ctx, uint8_t *, 8);
302  assign(s->lflvl, VP9Filter *, lflvl_len);
303 #undef assign
304 
305  if (s->td) {
306  for (i = 0; i < s->active_tile_cols; i++)
307  vp9_tile_data_free(&s->td[i]);
308  }
309 
310  if (s->s.h.bpp != s->last_bpp) {
311  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
312  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
313  s->last_bpp = s->s.h.bpp;
314  }
315 
316  return 0;
317 }
318 
320 {
321  int i;
322  VP9Context *s = avctx->priv_data;
323  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
324  VP9TileData *td = &s->td[0];
325 
326  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
327  return 0;
328 
329  vp9_tile_data_free(td);
330  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
331  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
332  if (s->s.frames[CUR_FRAME].uses_2pass) {
333  int sbs = s->sb_cols * s->sb_rows;
334 
335  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
336  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
337  16 * 16 + 2 * chroma_eobs) * sbs);
338  if (!td->b_base || !td->block_base)
339  return AVERROR(ENOMEM);
340  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
341  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
342  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
343  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
344  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
345 
347  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
348  if (!td->block_structure)
349  return AVERROR(ENOMEM);
350  }
351  } else {
352  for (i = 1; i < s->active_tile_cols; i++)
353  vp9_tile_data_free(&s->td[i]);
354 
355  for (i = 0; i < s->active_tile_cols; i++) {
356  s->td[i].b_base = av_malloc(sizeof(VP9Block));
357  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
358  16 * 16 + 2 * chroma_eobs);
359  if (!s->td[i].b_base || !s->td[i].block_base)
360  return AVERROR(ENOMEM);
361  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
362  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
363  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
364  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
365  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
366 
368  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
369  if (!s->td[i].block_structure)
370  return AVERROR(ENOMEM);
371  }
372  }
373  }
374  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
375 
376  return 0;
377 }
378 
379 // The sign bit is at the end, not the start, of a bit sequence
381 {
382  int v = get_bits(gb, n);
383  return get_bits1(gb) ? -v : v;
384 }
385 
386 static av_always_inline int inv_recenter_nonneg(int v, int m)
387 {
388  if (v > 2 * m)
389  return v;
390  if (v & 1)
391  return m - ((v + 1) >> 1);
392  return m + (v >> 1);
393 }
394 
395 // differential forward probability updates
396 static int update_prob(VPXRangeCoder *c, int p)
397 {
398  static const uint8_t inv_map_table[255] = {
399  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
400  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
401  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
402  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
403  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
404  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
405  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
406  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
407  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
408  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
409  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
410  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
411  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
412  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
413  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
414  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
415  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
416  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
417  252, 253, 253,
418  };
419  int d;
420 
421  /* This code is trying to do a differential probability update. For a
422  * current probability A in the range [1, 255], the difference to a new
423  * probability of any value can be expressed differentially as 1-A, 255-A
424  * where some part of this (absolute range) exists both in positive as
425  * well as the negative part, whereas another part only exists in one
426  * half. We're trying to code this shared part differentially, i.e.
427  * times two where the value of the lowest bit specifies the sign, and
428  * the single part is then coded on top of this. This absolute difference
429  * then again has a value of [0, 254], but a bigger value in this range
430  * indicates that we're further away from the original value A, so we
431  * can code this as a VLC code, since higher values are increasingly
432  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
433  * updates vs. the 'fine, exact' updates further down the range, which
434  * adds one extra dimension to this differential update model. */
435 
436  if (!vp89_rac_get(c)) {
437  d = vp89_rac_get_uint(c, 4) + 0;
438  } else if (!vp89_rac_get(c)) {
439  d = vp89_rac_get_uint(c, 4) + 16;
440  } else if (!vp89_rac_get(c)) {
441  d = vp89_rac_get_uint(c, 5) + 32;
442  } else {
443  d = vp89_rac_get_uint(c, 7);
444  if (d >= 65)
445  d = (d << 1) - 65 + vp89_rac_get(c);
446  d += 64;
447  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
448  }
449 
450  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
451  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
452 }
453 
455 {
456  static const enum AVColorSpace colorspaces[8] = {
459  };
460  VP9Context *s = avctx->priv_data;
461  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
462 
463  s->bpp_index = bits;
464  s->s.h.bpp = 8 + bits * 2;
465  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
466  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
467  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
468  static const enum AVPixelFormat pix_fmt_rgb[3] = {
470  };
471  s->ss_h = s->ss_v = 0;
472  avctx->color_range = AVCOL_RANGE_JPEG;
473  s->pix_fmt = pix_fmt_rgb[bits];
474  if (avctx->profile & 1) {
475  if (get_bits1(&s->gb)) {
476  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
477  return AVERROR_INVALIDDATA;
478  }
479  } else {
480  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
481  avctx->profile);
482  return AVERROR_INVALIDDATA;
483  }
484  } else {
485  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
492  };
494  if (avctx->profile & 1) {
495  s->ss_h = get_bits1(&s->gb);
496  s->ss_v = get_bits1(&s->gb);
497  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
498  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
499  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
500  avctx->profile);
501  return AVERROR_INVALIDDATA;
502  } else if (get_bits1(&s->gb)) {
503  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
504  avctx->profile);
505  return AVERROR_INVALIDDATA;
506  }
507  } else {
508  s->ss_h = s->ss_v = 1;
509  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
510  }
511  }
512 
513  return 0;
514 }
515 
517  const uint8_t *data, int size, int *ref)
518 {
519  VP9Context *s = avctx->priv_data;
520  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
521  int last_invisible;
522  const uint8_t *data2;
523 
524  /* general header */
525  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
526  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
527  return ret;
528  }
529  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
530  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
531  return AVERROR_INVALIDDATA;
532  }
533  avctx->profile = get_bits1(&s->gb);
534  avctx->profile |= get_bits1(&s->gb) << 1;
535  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
536  if (avctx->profile > 3) {
537  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
538  return AVERROR_INVALIDDATA;
539  }
540  s->s.h.profile = avctx->profile;
541  if (get_bits1(&s->gb)) {
542  *ref = get_bits(&s->gb, 3);
543  return 0;
544  }
545 
546  s->last_keyframe = s->s.h.keyframe;
547  s->s.h.keyframe = !get_bits1(&s->gb);
548 
549  last_invisible = s->s.h.invisible;
550  s->s.h.invisible = !get_bits1(&s->gb);
551  s->s.h.errorres = get_bits1(&s->gb);
552  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
553 
554  if (s->s.h.keyframe) {
555  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
556  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
557  return AVERROR_INVALIDDATA;
558  }
559  if ((ret = read_colorspace_details(avctx)) < 0)
560  return ret;
561  // for profile 1, here follows the subsampling bits
562  s->s.h.refreshrefmask = 0xff;
563  w = get_bits(&s->gb, 16) + 1;
564  h = get_bits(&s->gb, 16) + 1;
565  if (get_bits1(&s->gb)) // display size
566  skip_bits(&s->gb, 32);
567  } else {
568  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
569  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
570  if (s->s.h.intraonly) {
571  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
572  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
573  return AVERROR_INVALIDDATA;
574  }
575  if (avctx->profile >= 1) {
576  if ((ret = read_colorspace_details(avctx)) < 0)
577  return ret;
578  } else {
579  s->ss_h = s->ss_v = 1;
580  s->s.h.bpp = 8;
581  s->bpp_index = 0;
582  s->bytesperpixel = 1;
583  s->pix_fmt = AV_PIX_FMT_YUV420P;
584  avctx->colorspace = AVCOL_SPC_BT470BG;
585  avctx->color_range = AVCOL_RANGE_MPEG;
586  }
587  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
588  w = get_bits(&s->gb, 16) + 1;
589  h = get_bits(&s->gb, 16) + 1;
590  if (get_bits1(&s->gb)) // display size
591  skip_bits(&s->gb, 32);
592  } else {
593  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
594  s->s.h.refidx[0] = get_bits(&s->gb, 3);
595  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
596  s->s.h.refidx[1] = get_bits(&s->gb, 3);
597  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
598  s->s.h.refidx[2] = get_bits(&s->gb, 3);
599  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
600  if (!s->s.refs[s->s.h.refidx[0]].f ||
601  !s->s.refs[s->s.h.refidx[1]].f ||
602  !s->s.refs[s->s.h.refidx[2]].f) {
603  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
604  return AVERROR_INVALIDDATA;
605  }
606  if (get_bits1(&s->gb)) {
607  w = s->s.refs[s->s.h.refidx[0]].f->width;
608  h = s->s.refs[s->s.h.refidx[0]].f->height;
609  } else if (get_bits1(&s->gb)) {
610  w = s->s.refs[s->s.h.refidx[1]].f->width;
611  h = s->s.refs[s->s.h.refidx[1]].f->height;
612  } else if (get_bits1(&s->gb)) {
613  w = s->s.refs[s->s.h.refidx[2]].f->width;
614  h = s->s.refs[s->s.h.refidx[2]].f->height;
615  } else {
616  w = get_bits(&s->gb, 16) + 1;
617  h = get_bits(&s->gb, 16) + 1;
618  }
619  // Note that in this code, "CUR_FRAME" is actually before we
620  // have formally allocated a frame, and thus actually represents
621  // the _last_ frame
622  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f &&
623  s->s.frames[CUR_FRAME].tf.f->width == w &&
624  s->s.frames[CUR_FRAME].tf.f->height == h;
625  if (get_bits1(&s->gb)) // display size
626  skip_bits(&s->gb, 32);
627  s->s.h.highprecisionmvs = get_bits1(&s->gb);
628  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
629  get_bits(&s->gb, 2);
630  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
631  s->s.h.signbias[0] != s->s.h.signbias[2];
632  if (s->s.h.allowcompinter) {
633  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
634  s->s.h.fixcompref = 2;
635  s->s.h.varcompref[0] = 0;
636  s->s.h.varcompref[1] = 1;
637  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
638  s->s.h.fixcompref = 1;
639  s->s.h.varcompref[0] = 0;
640  s->s.h.varcompref[1] = 2;
641  } else {
642  s->s.h.fixcompref = 0;
643  s->s.h.varcompref[0] = 1;
644  s->s.h.varcompref[1] = 2;
645  }
646  }
647  }
648  }
649  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
650  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
651  s->s.h.framectxid = c = get_bits(&s->gb, 2);
652  if (s->s.h.keyframe || s->s.h.intraonly)
653  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
654 
655  /* loopfilter header data */
656  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
657  // reset loopfilter defaults
658  s->s.h.lf_delta.ref[0] = 1;
659  s->s.h.lf_delta.ref[1] = 0;
660  s->s.h.lf_delta.ref[2] = -1;
661  s->s.h.lf_delta.ref[3] = -1;
662  s->s.h.lf_delta.mode[0] = 0;
663  s->s.h.lf_delta.mode[1] = 0;
664  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
665  }
666  s->s.h.filter.level = get_bits(&s->gb, 6);
667  sharp = get_bits(&s->gb, 3);
668  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
669  // the old cache values since they are still valid
670  if (s->s.h.filter.sharpness != sharp) {
671  for (i = 1; i <= 63; i++) {
672  int limit = i;
673 
674  if (sharp > 0) {
675  limit >>= (sharp + 3) >> 2;
676  limit = FFMIN(limit, 9 - sharp);
677  }
678  limit = FFMAX(limit, 1);
679 
680  s->filter_lut.lim_lut[i] = limit;
681  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
682  }
683  }
684  s->s.h.filter.sharpness = sharp;
685  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
686  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
687  for (i = 0; i < 4; i++)
688  if (get_bits1(&s->gb))
689  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
690  for (i = 0; i < 2; i++)
691  if (get_bits1(&s->gb))
692  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
693  }
694  }
695 
696  /* quantization header data */
697  s->s.h.yac_qi = get_bits(&s->gb, 8);
698  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
699  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
700  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
701  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
702  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
703 #if FF_API_CODEC_PROPS
705  if (s->s.h.lossless)
708 #endif
709 
710  /* segmentation header info */
711  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
712  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
713  for (i = 0; i < 7; i++)
714  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
715  get_bits(&s->gb, 8) : 255;
716  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
717  for (i = 0; i < 3; i++)
718  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
719  get_bits(&s->gb, 8) : 255;
720  }
721 
722  if (get_bits1(&s->gb)) {
723  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
724  for (i = 0; i < 8; i++) {
725  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
726  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
727  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
728  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
729  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
730  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
731  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
732  }
733  }
734  } else {
735  // Reset fields under segmentation switch if segmentation is disabled.
736  // This is necessary because some hwaccels don't ignore these fields
737  // if segmentation is disabled.
738  s->s.h.segmentation.temporal = 0;
739  s->s.h.segmentation.update_map = 0;
740  }
741 
742  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
743  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
744  int qyac, qydc, quvac, quvdc, lflvl, sh;
745 
746  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
747  if (s->s.h.segmentation.absolute_vals)
748  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
749  else
750  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
751  } else {
752  qyac = s->s.h.yac_qi;
753  }
754  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
755  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
756  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
757  qyac = av_clip_uintp2(qyac, 8);
758 
759  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
760  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
761  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
762  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
763 
764  sh = s->s.h.filter.level >= 32;
765  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
766  if (s->s.h.segmentation.absolute_vals)
767  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
768  else
769  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
770  } else {
771  lflvl = s->s.h.filter.level;
772  }
773  if (s->s.h.lf_delta.enabled) {
774  s->s.h.segmentation.feat[i].lflvl[0][0] =
775  s->s.h.segmentation.feat[i].lflvl[0][1] =
776  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
777  for (j = 1; j < 4; j++) {
778  s->s.h.segmentation.feat[i].lflvl[j][0] =
779  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
780  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
781  s->s.h.segmentation.feat[i].lflvl[j][1] =
782  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
783  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
784  }
785  } else {
786  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
787  sizeof(s->s.h.segmentation.feat[i].lflvl));
788  }
789  }
790 
791  /* tiling info */
792  if ((ret = update_size(avctx, w, h)) < 0) {
793  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
794  w, h, s->pix_fmt);
795  return ret;
796  }
797  for (s->s.h.tiling.log2_tile_cols = 0;
798  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
799  s->s.h.tiling.log2_tile_cols++) ;
800  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
801  max = FFMAX(0, max - 1);
802  while (max > s->s.h.tiling.log2_tile_cols) {
803  if (get_bits1(&s->gb))
804  s->s.h.tiling.log2_tile_cols++;
805  else
806  break;
807  }
808  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
809  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
810  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
811  int n_range_coders;
812  VPXRangeCoder *rc;
813 
814  if (s->td) {
815  for (i = 0; i < s->active_tile_cols; i++)
816  vp9_tile_data_free(&s->td[i]);
817  av_freep(&s->td);
818  }
819 
820  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
821  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
822  s->s.h.tiling.tile_cols : 1;
823  vp9_alloc_entries(avctx, s->sb_rows);
824  if (avctx->active_thread_type == FF_THREAD_SLICE) {
825  n_range_coders = 4; // max_tile_rows
826  } else {
827  n_range_coders = s->s.h.tiling.tile_cols;
828  }
829  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
830  n_range_coders * sizeof(VPXRangeCoder));
831  if (!s->td)
832  return AVERROR(ENOMEM);
833  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
834  for (i = 0; i < s->active_tile_cols; i++) {
835  s->td[i].s = s;
836  s->td[i].c_b = rc;
837  rc += n_range_coders;
838  }
839  }
840 
841  /* check reference frames */
842  if (!s->s.h.keyframe && !s->s.h.intraonly) {
843  int valid_ref_frame = 0;
844  for (i = 0; i < 3; i++) {
845  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
846  int refw = ref->width, refh = ref->height;
847 
848  if (ref->format != avctx->pix_fmt) {
849  av_log(avctx, AV_LOG_ERROR,
850  "Ref pixfmt (%s) did not match current frame (%s)",
851  av_get_pix_fmt_name(ref->format),
852  av_get_pix_fmt_name(avctx->pix_fmt));
853  return AVERROR_INVALIDDATA;
854  } else if (refw == w && refh == h) {
855  s->mvscale[i][0] = s->mvscale[i][1] = 0;
856  } else {
857  /* Check to make sure at least one of frames that */
858  /* this frame references has valid dimensions */
859  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
860  av_log(avctx, AV_LOG_WARNING,
861  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
862  refw, refh, w, h);
863  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
864  continue;
865  }
866  s->mvscale[i][0] = (refw << 14) / w;
867  s->mvscale[i][1] = (refh << 14) / h;
868  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
869  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
870  }
871  valid_ref_frame++;
872  }
873  if (!valid_ref_frame) {
874  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
875  return AVERROR_INVALIDDATA;
876  }
877  }
878 
879  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
880  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
881  s->prob_ctx[3].p = ff_vp9_default_probs;
882  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
883  sizeof(ff_vp9_default_coef_probs));
884  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
885  sizeof(ff_vp9_default_coef_probs));
886  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
887  sizeof(ff_vp9_default_coef_probs));
888  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
889  sizeof(ff_vp9_default_coef_probs));
890  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
891  s->prob_ctx[c].p = ff_vp9_default_probs;
892  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
893  sizeof(ff_vp9_default_coef_probs));
894  }
895 
896  // next 16 bits is size of the rest of the header (arith-coded)
897  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
898  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
899 
900  data2 = align_get_bits(&s->gb);
901  if (size2 > size - (data2 - data)) {
902  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
903  return AVERROR_INVALIDDATA;
904  }
905  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
906  if (ret < 0)
907  return ret;
908 
909  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
910  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
911  return AVERROR_INVALIDDATA;
912  }
913 
914  for (i = 0; i < s->active_tile_cols; i++) {
915  if (s->s.h.keyframe || s->s.h.intraonly) {
916  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
917  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
918  } else {
919  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
920  }
921  s->td[i].nb_block_structure = 0;
922  }
923 
924  /* FIXME is it faster to not copy here, but do it down in the fw updates
925  * as explicit copies if the fw update is missing (and skip the copy upon
926  * fw update)? */
927  s->prob.p = s->prob_ctx[c].p;
928 
929  // txfm updates
930  if (s->s.h.lossless) {
931  s->s.h.txfmmode = TX_4X4;
932  } else {
933  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
934  if (s->s.h.txfmmode == 3)
935  s->s.h.txfmmode += vp89_rac_get(&s->c);
936 
937  if (s->s.h.txfmmode == TX_SWITCHABLE) {
938  for (i = 0; i < 2; i++)
939  if (vpx_rac_get_prob_branchy(&s->c, 252))
940  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
941  for (i = 0; i < 2; i++)
942  for (j = 0; j < 2; j++)
943  if (vpx_rac_get_prob_branchy(&s->c, 252))
944  s->prob.p.tx16p[i][j] =
945  update_prob(&s->c, s->prob.p.tx16p[i][j]);
946  for (i = 0; i < 2; i++)
947  for (j = 0; j < 3; j++)
948  if (vpx_rac_get_prob_branchy(&s->c, 252))
949  s->prob.p.tx32p[i][j] =
950  update_prob(&s->c, s->prob.p.tx32p[i][j]);
951  }
952  }
953 
954  // coef updates
955  for (i = 0; i < 4; i++) {
956  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
957  if (vp89_rac_get(&s->c)) {
958  for (j = 0; j < 2; j++)
959  for (k = 0; k < 2; k++)
960  for (l = 0; l < 6; l++)
961  for (m = 0; m < 6; m++) {
962  uint8_t *p = s->prob.coef[i][j][k][l][m];
963  uint8_t *r = ref[j][k][l][m];
964  if (m >= 3 && l == 0) // dc only has 3 pt
965  break;
966  for (n = 0; n < 3; n++) {
967  if (vpx_rac_get_prob_branchy(&s->c, 252))
968  p[n] = update_prob(&s->c, r[n]);
969  else
970  p[n] = r[n];
971  }
972  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
973  }
974  } else {
975  for (j = 0; j < 2; j++)
976  for (k = 0; k < 2; k++)
977  for (l = 0; l < 6; l++)
978  for (m = 0; m < 6; m++) {
979  uint8_t *p = s->prob.coef[i][j][k][l][m];
980  uint8_t *r = ref[j][k][l][m];
981  if (m > 3 && l == 0) // dc only has 3 pt
982  break;
983  memcpy(p, r, 3);
984  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
985  }
986  }
987  if (s->s.h.txfmmode == i)
988  break;
989  }
990 
991  // mode updates
992  for (i = 0; i < 3; i++)
993  if (vpx_rac_get_prob_branchy(&s->c, 252))
994  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
995  if (!s->s.h.keyframe && !s->s.h.intraonly) {
996  for (i = 0; i < 7; i++)
997  for (j = 0; j < 3; j++)
998  if (vpx_rac_get_prob_branchy(&s->c, 252))
999  s->prob.p.mv_mode[i][j] =
1000  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
1001 
1002  if (s->s.h.filtermode == FILTER_SWITCHABLE)
1003  for (i = 0; i < 4; i++)
1004  for (j = 0; j < 2; j++)
1005  if (vpx_rac_get_prob_branchy(&s->c, 252))
1006  s->prob.p.filter[i][j] =
1007  update_prob(&s->c, s->prob.p.filter[i][j]);
1008 
1009  for (i = 0; i < 4; i++)
1010  if (vpx_rac_get_prob_branchy(&s->c, 252))
1011  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
1012 
1013  if (s->s.h.allowcompinter) {
1014  s->s.h.comppredmode = vp89_rac_get(&s->c);
1015  if (s->s.h.comppredmode)
1016  s->s.h.comppredmode += vp89_rac_get(&s->c);
1017  if (s->s.h.comppredmode == PRED_SWITCHABLE)
1018  for (i = 0; i < 5; i++)
1019  if (vpx_rac_get_prob_branchy(&s->c, 252))
1020  s->prob.p.comp[i] =
1021  update_prob(&s->c, s->prob.p.comp[i]);
1022  } else {
1023  s->s.h.comppredmode = PRED_SINGLEREF;
1024  }
1025 
1026  if (s->s.h.comppredmode != PRED_COMPREF) {
1027  for (i = 0; i < 5; i++) {
1028  if (vpx_rac_get_prob_branchy(&s->c, 252))
1029  s->prob.p.single_ref[i][0] =
1030  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1031  if (vpx_rac_get_prob_branchy(&s->c, 252))
1032  s->prob.p.single_ref[i][1] =
1033  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1034  }
1035  }
1036 
1037  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1038  for (i = 0; i < 5; i++)
1039  if (vpx_rac_get_prob_branchy(&s->c, 252))
1040  s->prob.p.comp_ref[i] =
1041  update_prob(&s->c, s->prob.p.comp_ref[i]);
1042  }
1043 
1044  for (i = 0; i < 4; i++)
1045  for (j = 0; j < 9; j++)
1046  if (vpx_rac_get_prob_branchy(&s->c, 252))
1047  s->prob.p.y_mode[i][j] =
1048  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1049 
1050  for (i = 0; i < 4; i++)
1051  for (j = 0; j < 4; j++)
1052  for (k = 0; k < 3; k++)
1053  if (vpx_rac_get_prob_branchy(&s->c, 252))
1054  s->prob.p.partition[3 - i][j][k] =
1055  update_prob(&s->c,
1056  s->prob.p.partition[3 - i][j][k]);
1057 
1058  // mv fields don't use the update_prob subexp model for some reason
1059  for (i = 0; i < 3; i++)
1060  if (vpx_rac_get_prob_branchy(&s->c, 252))
1061  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1062 
1063  for (i = 0; i < 2; i++) {
1064  if (vpx_rac_get_prob_branchy(&s->c, 252))
1065  s->prob.p.mv_comp[i].sign =
1066  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1067 
1068  for (j = 0; j < 10; j++)
1069  if (vpx_rac_get_prob_branchy(&s->c, 252))
1070  s->prob.p.mv_comp[i].classes[j] =
1071  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1072 
1073  if (vpx_rac_get_prob_branchy(&s->c, 252))
1074  s->prob.p.mv_comp[i].class0 =
1075  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1076 
1077  for (j = 0; j < 10; j++)
1078  if (vpx_rac_get_prob_branchy(&s->c, 252))
1079  s->prob.p.mv_comp[i].bits[j] =
1080  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1081  }
1082 
1083  for (i = 0; i < 2; i++) {
1084  for (j = 0; j < 2; j++)
1085  for (k = 0; k < 3; k++)
1086  if (vpx_rac_get_prob_branchy(&s->c, 252))
1087  s->prob.p.mv_comp[i].class0_fp[j][k] =
1088  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1089 
1090  for (j = 0; j < 3; j++)
1091  if (vpx_rac_get_prob_branchy(&s->c, 252))
1092  s->prob.p.mv_comp[i].fp[j] =
1093  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1094  }
1095 
1096  if (s->s.h.highprecisionmvs) {
1097  for (i = 0; i < 2; i++) {
1098  if (vpx_rac_get_prob_branchy(&s->c, 252))
1099  s->prob.p.mv_comp[i].class0_hp =
1100  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1101 
1102  if (vpx_rac_get_prob_branchy(&s->c, 252))
1103  s->prob.p.mv_comp[i].hp =
1104  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1105  }
1106  }
1107  }
1108 
1109  return (data2 - data) + size2;
1110 }
1111 
1112 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1113  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1114 {
1115  const VP9Context *s = td->s;
1116  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1117  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1118  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1119  s->prob.p.partition[bl][c];
1120  enum BlockPartition bp;
1121  ptrdiff_t hbs = 4 >> bl;
1122  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1123  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1124  int bytesperpixel = s->bytesperpixel;
1125 
1126  if (bl == BL_8X8) {
1128  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1129  } else if (col + hbs < s->cols) { // FIXME why not <=?
1130  if (row + hbs < s->rows) { // FIXME why not <=?
1132  switch (bp) {
1133  case PARTITION_NONE:
1134  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1135  break;
1136  case PARTITION_H:
1137  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1138  yoff += hbs * 8 * y_stride;
1139  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1140  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1141  break;
1142  case PARTITION_V:
1143  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1144  yoff += hbs * 8 * bytesperpixel;
1145  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1146  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1147  break;
1148  case PARTITION_SPLIT:
1149  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1150  decode_sb(td, row, col + hbs, lflvl,
1151  yoff + 8 * hbs * bytesperpixel,
1152  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1153  yoff += hbs * 8 * y_stride;
1154  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1155  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1156  decode_sb(td, row + hbs, col + hbs, lflvl,
1157  yoff + 8 * hbs * bytesperpixel,
1158  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1159  break;
1160  default:
1161  av_unreachable("ff_vp9_partition_tree only has "
1162  "the four PARTITION_* terminal codes");
1163  }
1164  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1165  bp = PARTITION_SPLIT;
1166  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1167  decode_sb(td, row, col + hbs, lflvl,
1168  yoff + 8 * hbs * bytesperpixel,
1169  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1170  } else {
1171  bp = PARTITION_H;
1172  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1173  }
1174  } else if (row + hbs < s->rows) { // FIXME why not <=?
1175  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1176  bp = PARTITION_SPLIT;
1177  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1178  yoff += hbs * 8 * y_stride;
1179  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1180  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1181  } else {
1182  bp = PARTITION_V;
1183  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1184  }
1185  } else {
1186  bp = PARTITION_SPLIT;
1187  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1188  }
1189  td->counts.partition[bl][c][bp]++;
1190 }
1191 
1192 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1193  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1194 {
1195  const VP9Context *s = td->s;
1196  VP9Block *b = td->b;
1197  ptrdiff_t hbs = 4 >> bl;
1198  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1199  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1200  int bytesperpixel = s->bytesperpixel;
1201 
1202  if (bl == BL_8X8) {
1203  av_assert2(b->bl == BL_8X8);
1204  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1205  } else if (td->b->bl == bl) {
1206  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1207  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1208  yoff += hbs * 8 * y_stride;
1209  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1210  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1211  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1212  yoff += hbs * 8 * bytesperpixel;
1213  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1214  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1215  }
1216  } else {
1217  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1218  if (col + hbs < s->cols) { // FIXME why not <=?
1219  if (row + hbs < s->rows) {
1220  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1221  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1222  yoff += hbs * 8 * y_stride;
1223  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1224  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1225  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1226  yoff + 8 * hbs * bytesperpixel,
1227  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1228  } else {
1229  yoff += hbs * 8 * bytesperpixel;
1230  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1231  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1232  }
1233  } else if (row + hbs < s->rows) {
1234  yoff += hbs * 8 * y_stride;
1235  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1236  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1237  }
1238  }
1239 }
1240 
1241 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1242 {
1243  int sb_start = ( idx * n) >> log2_n;
1244  int sb_end = ((idx + 1) * n) >> log2_n;
1245  *start = FFMIN(sb_start, n) << 3;
1246  *end = FFMIN(sb_end, n) << 3;
1247 }
1248 
1250 {
1251  int i;
1252 
1253  av_freep(&s->intra_pred_data[0]);
1254  for (i = 0; i < s->active_tile_cols; i++)
1255  vp9_tile_data_free(&s->td[i]);
1256 }
1257 
1259 {
1260  VP9Context *s = avctx->priv_data;
1261  int i;
1262 
1263  for (int i = 0; i < 3; i++)
1264  vp9_frame_unref(&s->s.frames[i]);
1265  av_refstruct_pool_uninit(&s->frame_extradata_pool);
1266  for (i = 0; i < 8; i++) {
1267  ff_progress_frame_unref(&s->s.refs[i]);
1268  ff_progress_frame_unref(&s->next_refs[i]);
1269  vp9_frame_unref(&s->s.ref_frames[i]);
1270  }
1271 
1272  free_buffers(s);
1273 #if HAVE_THREADS
1274  av_freep(&s->entries);
1275  ff_pthread_free(s, vp9_context_offsets);
1276 #endif
1277 
1278  av_refstruct_unref(&s->header_ref);
1279  ff_cbs_fragment_free(&s->current_frag);
1280  ff_cbs_close(&s->cbc);
1281 
1282  av_freep(&s->td);
1283  return 0;
1284 }
1285 
1286 static int decode_tiles(AVCodecContext *avctx,
1287  const uint8_t *data, int size)
1288 {
1289  VP9Context *s = avctx->priv_data;
1290  VP9TileData *td = &s->td[0];
1291  int row, col, tile_row, tile_col, ret;
1292  int bytesperpixel;
1293  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1294  AVFrame *f;
1295  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1296 
1297  f = s->s.frames[CUR_FRAME].tf.f;
1298  ls_y = f->linesize[0];
1299  ls_uv =f->linesize[1];
1300  bytesperpixel = s->bytesperpixel;
1301 
1302  yoff = uvoff = 0;
1303  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1304  set_tile_offset(&tile_row_start, &tile_row_end,
1305  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1306 
1307  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1308  int64_t tile_size;
1309 
1310  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1311  tile_row == s->s.h.tiling.tile_rows - 1) {
1312  tile_size = size;
1313  } else {
1314  tile_size = AV_RB32(data);
1315  data += 4;
1316  size -= 4;
1317  }
1318  if (tile_size > size)
1319  return AVERROR_INVALIDDATA;
1320  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1321  if (ret < 0)
1322  return ret;
1323  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1324  return AVERROR_INVALIDDATA;
1325  data += tile_size;
1326  size -= tile_size;
1327  }
1328 
1329  for (row = tile_row_start; row < tile_row_end;
1330  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1331  VP9Filter *lflvl_ptr = s->lflvl;
1332  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1333 
1334  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1335  set_tile_offset(&tile_col_start, &tile_col_end,
1336  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1337  td->tile_col_start = tile_col_start;
1338  if (s->pass != 2) {
1339  memset(td->left_partition_ctx, 0, 8);
1340  memset(td->left_skip_ctx, 0, 8);
1341  if (s->s.h.keyframe || s->s.h.intraonly) {
1342  memset(td->left_mode_ctx, DC_PRED, 16);
1343  } else {
1344  memset(td->left_mode_ctx, NEARESTMV, 8);
1345  }
1346  memset(td->left_y_nnz_ctx, 0, 16);
1347  memset(td->left_uv_nnz_ctx, 0, 32);
1348  memset(td->left_segpred_ctx, 0, 8);
1349 
1350  td->c = &td->c_b[tile_col];
1351  }
1352 
1353  for (col = tile_col_start;
1354  col < tile_col_end;
1355  col += 8, yoff2 += 64 * bytesperpixel,
1356  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1357  // FIXME integrate with lf code (i.e. zero after each
1358  // use, similar to invtxfm coefficients, or similar)
1359  if (s->pass != 1) {
1360  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1361  }
1362 
1363  if (s->pass == 2) {
1364  decode_sb_mem(td, row, col, lflvl_ptr,
1365  yoff2, uvoff2, BL_64X64);
1366  } else {
1367  if (vpx_rac_is_end(td->c)) {
1368  return AVERROR_INVALIDDATA;
1369  }
1370  decode_sb(td, row, col, lflvl_ptr,
1371  yoff2, uvoff2, BL_64X64);
1372  }
1373  }
1374  }
1375 
1376  if (s->pass == 1)
1377  continue;
1378 
1379  // backup pre-loopfilter reconstruction data for intra
1380  // prediction of next row of sb64s
1381  if (row + 8 < s->rows) {
1382  memcpy(s->intra_pred_data[0],
1383  f->data[0] + yoff + 63 * ls_y,
1384  8 * s->cols * bytesperpixel);
1385  memcpy(s->intra_pred_data[1],
1386  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1387  8 * s->cols * bytesperpixel >> s->ss_h);
1388  memcpy(s->intra_pred_data[2],
1389  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1390  8 * s->cols * bytesperpixel >> s->ss_h);
1391  }
1392 
1393  // loopfilter one row
1394  if (s->s.h.filter.level) {
1395  yoff2 = yoff;
1396  uvoff2 = uvoff;
1397  lflvl_ptr = s->lflvl;
1398  for (col = 0; col < s->cols;
1399  col += 8, yoff2 += 64 * bytesperpixel,
1400  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1401  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1402  yoff2, uvoff2);
1403  }
1404  }
1405 
1406  // FIXME maybe we can make this more finegrained by running the
1407  // loopfilter per-block instead of after each sbrow
1408  // In fact that would also make intra pred left preparation easier?
1409  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, row >> 3);
1410  }
1411  }
1412  return 0;
1413 }
1414 
1415 #if HAVE_THREADS
1416 static av_always_inline
1417 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1418  int threadnr)
1419 {
1420  VP9Context *s = avctx->priv_data;
1421  VP9TileData *td = &s->td[jobnr];
1422  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1423  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1424  unsigned tile_cols_len;
1425  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1426  VP9Filter *lflvl_ptr_base;
1427  AVFrame *f;
1428 
1429  f = s->s.frames[CUR_FRAME].tf.f;
1430  ls_y = f->linesize[0];
1431  ls_uv =f->linesize[1];
1432 
1433  set_tile_offset(&tile_col_start, &tile_col_end,
1434  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1435  td->tile_col_start = tile_col_start;
1436  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1437  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1438  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1439 
1440  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1441  set_tile_offset(&tile_row_start, &tile_row_end,
1442  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1443 
1444  td->c = &td->c_b[tile_row];
1445  for (row = tile_row_start; row < tile_row_end;
1446  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1447  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1448  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1449 
1450  memset(td->left_partition_ctx, 0, 8);
1451  memset(td->left_skip_ctx, 0, 8);
1452  if (s->s.h.keyframe || s->s.h.intraonly) {
1453  memset(td->left_mode_ctx, DC_PRED, 16);
1454  } else {
1455  memset(td->left_mode_ctx, NEARESTMV, 8);
1456  }
1457  memset(td->left_y_nnz_ctx, 0, 16);
1458  memset(td->left_uv_nnz_ctx, 0, 32);
1459  memset(td->left_segpred_ctx, 0, 8);
1460 
1461  for (col = tile_col_start;
1462  col < tile_col_end;
1463  col += 8, yoff2 += 64 * bytesperpixel,
1464  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1465  // FIXME integrate with lf code (i.e. zero after each
1466  // use, similar to invtxfm coefficients, or similar)
1467  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1468  decode_sb(td, row, col, lflvl_ptr,
1469  yoff2, uvoff2, BL_64X64);
1470  }
1471 
1472  // backup pre-loopfilter reconstruction data for intra
1473  // prediction of next row of sb64s
1474  tile_cols_len = tile_col_end - tile_col_start;
1475  if (row + 8 < s->rows) {
1476  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1477  f->data[0] + yoff + 63 * ls_y,
1478  8 * tile_cols_len * bytesperpixel);
1479  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1480  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1481  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1482  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1483  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1484  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1485  }
1486 
1487  vp9_report_tile_progress(s, row >> 3, 1);
1488  }
1489  }
1490  return 0;
1491 }
1492 
1493 static av_always_inline
1494 int loopfilter_proc(AVCodecContext *avctx)
1495 {
1496  VP9Context *s = avctx->priv_data;
1497  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1498  VP9Filter *lflvl_ptr;
1499  int bytesperpixel = s->bytesperpixel, col, i;
1500  AVFrame *f;
1501 
1502  f = s->s.frames[CUR_FRAME].tf.f;
1503  ls_y = f->linesize[0];
1504  ls_uv =f->linesize[1];
1505 
1506  for (i = 0; i < s->sb_rows; i++) {
1507  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1508 
1509  if (s->s.h.filter.level) {
1510  yoff = (ls_y * 64)*i;
1511  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1512  lflvl_ptr = s->lflvl+s->sb_cols*i;
1513  for (col = 0; col < s->cols;
1514  col += 8, yoff += 64 * bytesperpixel,
1515  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1516  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1517  yoff, uvoff);
1518  }
1519  }
1520  }
1521  return 0;
1522 }
1523 #endif
1524 
1526 {
1527  AVVideoEncParams *par;
1528  unsigned int tile, nb_blocks = 0;
1529 
1530  if (s->s.h.segmentation.enabled) {
1531  for (tile = 0; tile < s->active_tile_cols; tile++)
1532  nb_blocks += s->td[tile].nb_block_structure;
1533  }
1534 
1536  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1537  if (!par)
1538  return AVERROR(ENOMEM);
1539 
1540  par->qp = s->s.h.yac_qi;
1541  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1542  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1543  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1544  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1545  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1546 
1547  if (nb_blocks) {
1548  unsigned int block = 0;
1549  unsigned int tile, block_tile;
1550 
1551  for (tile = 0; tile < s->active_tile_cols; tile++) {
1552  VP9TileData *td = &s->td[tile];
1553 
1554  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1556  unsigned int row = td->block_structure[block_tile].row;
1557  unsigned int col = td->block_structure[block_tile].col;
1558  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1559 
1560  b->src_x = col * 8;
1561  b->src_y = row * 8;
1562  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1563  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1564 
1565  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1566  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1567  if (s->s.h.segmentation.absolute_vals)
1568  b->delta_qp -= par->qp;
1569  }
1570  }
1571  }
1572  }
1573 
1574  return 0;
1575 }
1576 
1578  int *got_frame, AVPacket *pkt)
1579 {
1580  const uint8_t *data = pkt->data;
1581  int size = pkt->size;
1582  VP9Context *s = avctx->priv_data;
1583  int ret, i, j, ref;
1584  CodedBitstreamUnit *unit;
1585  VP9RawFrame *rf;
1586 
1587  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1588  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1589  const VP9Frame *src;
1590  AVFrame *f;
1591 
1592  ret = ff_cbs_read_packet(s->cbc, &s->current_frag, pkt);
1593  if (ret < 0) {
1594  ff_cbs_fragment_reset(&s->current_frag);
1595  av_log(avctx, AV_LOG_ERROR, "Failed to read frame header.\n");
1596  return ret;
1597  }
1598 
1599  unit = &s->current_frag.units[0];
1600  rf = unit->content;
1601 
1602  av_refstruct_replace(&s->header_ref, unit->content_ref);
1603  s->frame_header = &rf->header;
1604 
1605  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1606  return ret;
1607  } else if (ret == 0) {
1608  if (!s->s.refs[ref].f) {
1609  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1610  return AVERROR_INVALIDDATA;
1611  }
1612  for (int i = 0; i < 8; i++)
1613  ff_progress_frame_replace(&s->next_refs[i], &s->s.refs[i]);
1614  ff_thread_finish_setup(avctx);
1615  ff_progress_frame_await(&s->s.refs[ref], INT_MAX);
1616  ff_cbs_fragment_reset(&s->current_frag);
1617 
1618  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1619  return ret;
1620  frame->pts = pkt->pts;
1621  frame->pkt_dts = pkt->dts;
1622  *got_frame = 1;
1623  return pkt->size;
1624  }
1625  data += ret;
1626  size -= ret;
1627 
1628  src = !s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres ?
1629  &s->s.frames[CUR_FRAME] : &s->s.frames[BLANK_FRAME];
1630  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly)
1631  vp9_frame_replace(&s->s.frames[REF_FRAME_SEGMAP], src);
1632  vp9_frame_replace(&s->s.frames[REF_FRAME_MVPAIR], src);
1633  vp9_frame_unref(&s->s.frames[CUR_FRAME]);
1634  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1635  return ret;
1636 
1637  s->s.frames[CUR_FRAME].header_ref = av_refstruct_ref(s->header_ref);
1638  s->s.frames[CUR_FRAME].frame_header = s->frame_header;
1639 
1640  f = s->s.frames[CUR_FRAME].tf.f;
1641  if (s->s.h.keyframe)
1642  f->flags |= AV_FRAME_FLAG_KEY;
1643  else
1644  f->flags &= ~AV_FRAME_FLAG_KEY;
1645  if (s->s.h.lossless)
1646  f->flags |= AV_FRAME_FLAG_LOSSLESS;
1647  else
1648  f->flags &= ~AV_FRAME_FLAG_LOSSLESS;
1649  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1650 
1651  // Non-existent frames have the implicit dimension 0x0 != CUR_FRAME
1652  if (!s->s.frames[REF_FRAME_MVPAIR].tf.f ||
1653  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1654  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1655  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1656  }
1657 
1658  // ref frame setup
1659  for (i = 0; i < 8; i++) {
1660  ff_progress_frame_replace(&s->next_refs[i],
1661  s->s.h.refreshrefmask & (1 << i) ?
1662  &s->s.frames[CUR_FRAME].tf : &s->s.refs[i]);
1663  }
1664 
1665  if (avctx->hwaccel) {
1666  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1667  ret = hwaccel->start_frame(avctx, pkt->buf, pkt->data, pkt->size);
1668  if (ret < 0)
1669  return ret;
1670  ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1671  if (ret < 0)
1672  return ret;
1673  ret = hwaccel->end_frame(avctx);
1674  if (ret < 0)
1675  return ret;
1676 
1677  for (i = 0; i < 8; i++) {
1678  vp9_frame_replace(&s->s.ref_frames[i],
1679  s->s.h.refreshrefmask & (1 << i) ?
1680  &s->s.frames[CUR_FRAME] : &s->s.ref_frames[i]);
1681  }
1682 
1683  goto finish;
1684  }
1685 
1686  // main tile decode loop
1687  memset(s->above_partition_ctx, 0, s->cols);
1688  memset(s->above_skip_ctx, 0, s->cols);
1689  if (s->s.h.keyframe || s->s.h.intraonly) {
1690  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1691  } else {
1692  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1693  }
1694  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1695  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1696  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1697  memset(s->above_segpred_ctx, 0, s->cols);
1698  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1699  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1700  if ((ret = update_block_buffers(avctx)) < 0) {
1701  av_log(avctx, AV_LOG_ERROR,
1702  "Failed to allocate block buffers\n");
1703  return ret;
1704  }
1705  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1706  int j, k, l, m;
1707 
1708  for (i = 0; i < 4; i++) {
1709  for (j = 0; j < 2; j++)
1710  for (k = 0; k < 2; k++)
1711  for (l = 0; l < 6; l++)
1712  for (m = 0; m < 6; m++)
1713  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1714  s->prob.coef[i][j][k][l][m], 3);
1715  if (s->s.h.txfmmode == i)
1716  break;
1717  }
1718  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1719  ff_thread_finish_setup(avctx);
1720  } else if (!s->s.h.refreshctx) {
1721  ff_thread_finish_setup(avctx);
1722  }
1723 
1724 #if HAVE_THREADS
1725  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1726  for (i = 0; i < s->sb_rows; i++)
1727  atomic_init(&s->entries[i], 0);
1728  }
1729 #endif
1730 
1731  do {
1732  for (i = 0; i < s->active_tile_cols; i++) {
1733  s->td[i].b = s->td[i].b_base;
1734  s->td[i].block = s->td[i].block_base;
1735  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1736  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1737  s->td[i].eob = s->td[i].eob_base;
1738  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1739  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1740  s->td[i].error_info = 0;
1741  }
1742 
1743 #if HAVE_THREADS
1744  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1745  int tile_row, tile_col;
1746 
1747  av_assert1(!s->pass);
1748 
1749  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1750  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1751  int64_t tile_size;
1752 
1753  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1754  tile_row == s->s.h.tiling.tile_rows - 1) {
1755  tile_size = size;
1756  } else {
1757  tile_size = AV_RB32(data);
1758  data += 4;
1759  size -= 4;
1760  }
1761  if (tile_size > size)
1762  return AVERROR_INVALIDDATA;
1763  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1764  if (ret < 0)
1765  return ret;
1766  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1767  return AVERROR_INVALIDDATA;
1768  data += tile_size;
1769  size -= tile_size;
1770  }
1771  }
1772 
1773  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1774  } else
1775 #endif
1776  {
1777  ret = decode_tiles(avctx, data, size);
1778  if (ret < 0)
1779  goto fail;
1780  }
1781 
1782  // Sum all counts fields into td[0].counts for tile threading
1783  if (avctx->active_thread_type == FF_THREAD_SLICE)
1784  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1785  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1786  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1787 
1788  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1790  ff_thread_finish_setup(avctx);
1791  }
1792  } while (s->pass++ == 1);
1793 
1794  if (s->td->error_info < 0) {
1795  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1796  s->td->error_info = 0;
1798  goto fail;
1799  }
1801  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1802  if (ret < 0)
1803  goto fail;
1804  }
1805 
1806 finish:
1807  ff_cbs_fragment_reset(&s->current_frag);
1808 
1809  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1810  // ref frame setup
1811  for (int i = 0; i < 8; i++)
1812  ff_progress_frame_replace(&s->s.refs[i], &s->next_refs[i]);
1813 
1814  if (!s->s.h.invisible) {
1815  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1816  return ret;
1817  *got_frame = 1;
1818  }
1819 
1820  return pkt->size;
1821 fail:
1822  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1823  return ret;
1824 }
1825 
1827 {
1828  VP9Context *s = avctx->priv_data;
1829  int i;
1830 
1831  for (i = 0; i < 3; i++)
1832  vp9_frame_unref(&s->s.frames[i]);
1833 
1834  for (i = 0; i < 8; i++) {
1835  ff_progress_frame_unref(&s->s.refs[i]);
1836  vp9_frame_unref(&s->s.ref_frames[i]);
1837  }
1838 
1839  ff_cbs_fragment_reset(&s->current_frag);
1840  ff_cbs_flush(s->cbc);
1841 
1842  if (FF_HW_HAS_CB(avctx, flush))
1843  FF_HW_SIMPLE_CALL(avctx, flush);
1844 }
1845 
1847 {
1848  VP9Context *s = avctx->priv_data;
1849  int ret;
1850 
1851  s->last_bpp = 0;
1852  s->s.h.filter.sharpness = -1;
1853 
1854  ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_VP9, avctx);
1855  if (ret < 0)
1856  return ret;
1857 
1858 #if HAVE_THREADS
1859  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1860  ret = ff_pthread_init(s, vp9_context_offsets);
1861  if (ret < 0)
1862  return ret;
1863  }
1864 #endif
1865 
1866  return 0;
1867 }
1868 
1869 #if HAVE_THREADS
1870 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1871 {
1872  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1873 
1874  for (int i = 0; i < 3; i++)
1875  vp9_frame_replace(&s->s.frames[i], &ssrc->s.frames[i]);
1876  for (int i = 0; i < 8; i++)
1877  ff_progress_frame_replace(&s->s.refs[i], &ssrc->next_refs[i]);
1878  av_refstruct_replace(&s->frame_extradata_pool, ssrc->frame_extradata_pool);
1879  s->frame_extradata_pool_size = ssrc->frame_extradata_pool_size;
1880 
1881  av_refstruct_replace(&s->header_ref, ssrc->header_ref);
1882  for (int i = 0; i < 8; i++)
1883  vp9_frame_replace(&s->s.ref_frames[i], &ssrc->s.ref_frames[i]);
1884 
1885  s->frame_header = ssrc->frame_header;
1886  memcpy(s->cbc->priv_data, ssrc->cbc->priv_data, sizeof(CodedBitstreamVP9Context));
1887 
1888  s->s.h.invisible = ssrc->s.h.invisible;
1889  s->s.h.keyframe = ssrc->s.h.keyframe;
1890  s->s.h.intraonly = ssrc->s.h.intraonly;
1891  s->ss_v = ssrc->ss_v;
1892  s->ss_h = ssrc->ss_h;
1893  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1894  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1895  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1896  s->bytesperpixel = ssrc->bytesperpixel;
1897  s->gf_fmt = ssrc->gf_fmt;
1898  s->w = ssrc->w;
1899  s->h = ssrc->h;
1900  s->s.h.bpp = ssrc->s.h.bpp;
1901  s->bpp_index = ssrc->bpp_index;
1902  s->pix_fmt = ssrc->pix_fmt;
1903  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1904  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1905  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1906  sizeof(s->s.h.segmentation.feat));
1907 
1908  return 0;
1909 }
1910 #endif
1911 
1913  .p.name = "vp9",
1914  CODEC_LONG_NAME("Google VP9"),
1915  .p.type = AVMEDIA_TYPE_VIDEO,
1916  .p.id = AV_CODEC_ID_VP9,
1917  .priv_data_size = sizeof(VP9Context),
1918  .init = vp9_decode_init,
1922  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1925  .flush = vp9_decode_flush,
1926  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1927  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1928  .bsfs = "vp9_superframe_split",
1929  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1930 #if CONFIG_VP9_DXVA2_HWACCEL
1931  HWACCEL_DXVA2(vp9),
1932 #endif
1933 #if CONFIG_VP9_D3D11VA_HWACCEL
1934  HWACCEL_D3D11VA(vp9),
1935 #endif
1936 #if CONFIG_VP9_D3D11VA2_HWACCEL
1937  HWACCEL_D3D11VA2(vp9),
1938 #endif
1939 #if CONFIG_VP9_D3D12VA_HWACCEL
1940  HWACCEL_D3D12VA(vp9),
1941 #endif
1942 #if CONFIG_VP9_NVDEC_HWACCEL
1943  HWACCEL_NVDEC(vp9),
1944 #endif
1945 #if CONFIG_VP9_VAAPI_HWACCEL
1946  HWACCEL_VAAPI(vp9),
1947 #endif
1948 #if CONFIG_VP9_VDPAU_HWACCEL
1949  HWACCEL_VDPAU(vp9),
1950 #endif
1951 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1952  HWACCEL_VIDEOTOOLBOX(vp9),
1953 #endif
1954 #if CONFIG_VP9_VULKAN_HWACCEL
1955  HWACCEL_VULKAN(vp9),
1956 #endif
1957  NULL
1958  },
1959 };
VP9TileData::left_y_nnz_ctx
uint8_t left_y_nnz_ctx[16]
Definition: vp9dec.h:216
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1913
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:64
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1286
CodedBitstreamUnit::content_ref
void * content_ref
If content is reference counted, a RefStruct reference backing content.
Definition: cbs.h:119
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:107
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:53
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:51
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:659
VP9TileData::block_structure
struct VP9TileData::@312 * block_structure
VP9TileData::uvblock_base
int16_t * uvblock_base[2]
Definition: vp9dec.h:232
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1203
VP9TileData::partition
unsigned partition[4][4][4]
Definition: vp9dec.h:207
VP9Frame
Definition: vp9shared.h:66
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1912
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1112
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
CodedBitstreamUnit::content
void * content
Pointer to the decomposed form of this unit.
Definition: cbs.h:114
int64_t
long long int64_t
Definition: coverity.c:34
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
VP9TileData::left_skip_ctx
uint8_t left_skip_ctx[8]
Definition: vp9dec.h:221
VP9TileData::row
int row
Definition: vp9dec.h:177
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:767
BlockPartition
BlockPartition
Definition: vp9shared.h:36
AVPacket::data
uint8_t * data
Definition: packet.h:558
DC_PRED
@ DC_PRED
Definition: vp9.h:48
pthread_mutex_lock
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
Definition: os2threads.h:119
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
b
#define b
Definition: input.c:42
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
Definition: decode.c:1873
data
const char data[16]
Definition: mxf.c:149
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:165
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1192
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:174
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:516
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:539
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
VP9TileData::c_b
VPXRangeCoder * c_b
Definition: vp9dec.h:175
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:691
VP9TileData::left_segpred_ctx
uint8_t left_segpred_ctx[8]
Definition: vp9dec.h:223
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:50
VP9Block::bl
enum BlockLevel bl
Definition: vp9dec.h:91
vp89_rac.h
VP9Filter
Definition: vp9dec.h:79
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
VP9TileData::b
VP9Block * b
Definition: vp9dec.h:180
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:92
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:379
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
CodedBitstreamUnit
Coded bitstream unit structure.
Definition: cbs.h:77
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
VP9Block
Definition: vp9dec.h:85
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:696
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:694
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
FFHWAccel
Definition: hwaccel_internal.h:34
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:200
AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:558
GetBitContext
Definition: get_bits.h:109
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:37
vp9_frame_unref
static void vp9_frame_unref(VP9Frame *f)
Definition: vp9.c:98
progressframe.h
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
VP9TileData::col
int col
Definition: vp9dec.h:177
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1258
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:542
avassert.h
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:68
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1638
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
BL_8X8
@ BL_8X8
Definition: vp9shared.h:83
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:39
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:346
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2266
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
pthread_mutex_unlock
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
Definition: os2threads.h:126
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:697
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1896
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
VP9TileData::block_size_idx_x
unsigned int block_size_idx_x
Definition: vp9dec.h:240
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1577
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:331
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:541
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:669
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:97
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:108
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:173
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:541
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts.c:370
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
pthread_internal.h
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:340
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
VP9mv
Definition: vp9shared.h:56
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:40
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:179
VP9RawFrame
Definition: cbs_vp9.h:164
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp9_frame_replace
static void vp9_frame_replace(VP9Frame *dst, const VP9Frame *src)
Definition: vp9.c:148
VP9RawFrame::header
VP9RawFrameHeader header
Definition: cbs_vp9.h:165
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
f
f
Definition: af_crystalizer.c:122
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:559
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
VP9TileData::eob_base
uint8_t * eob_base
Definition: vp9dec.h:233
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:68
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:43
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:454
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:544
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:88
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
VP9TileData::b_base
VP9Block * b_base
Definition: vp9dec.h:180
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1249
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:546
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1573
AVCodecHWConfigInternal
Definition: hwconfig.h:25
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:319
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:557
av_refstruct_ref
void * av_refstruct_ref(void *obj)
Create a new reference to an object managed via this API, i.e.
Definition: refstruct.c:140
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
CodedBitstreamVP9Context
Definition: cbs_vp9.h:192
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
VP9TileData::block_base
int16_t * block_base
Definition: vp9dec.h:232
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:386
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1572
VP9TileData::left_uv_nnz_ctx
uint8_t left_uv_nnz_ctx[2][16]
Definition: vp9dec.h:219
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:104
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:698
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:396
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:551
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:701
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:444
VP9TileData::block_size_idx_y
unsigned int block_size_idx_y
Definition: vp9dec.h:241
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:559
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:690
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:380
VP9TileData::left_mode_ctx
uint8_t left_mode_ctx[16]
Definition: vp9dec.h:217
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:693
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:750
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VP9TileData::c
VPXRangeCoder * c
Definition: vp9dec.h:176
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
VP9TileData::s
const VP9Context * s
Definition: vp9dec.h:174
BL_64X64
@ BL_64X64
Definition: vp9shared.h:80
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1846
tile
static int FUNC() tile(CodedBitstreamContext *ctx, RWContext *rw, APVRawTile *current, int tile_idx, uint32_t tile_size)
Definition: cbs_apv_syntax_template.c:224
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:555
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
av_refstruct_pool_alloc
AVRefStructPool * av_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to av_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:91
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:61
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:543
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:395
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1903
VP9TileData
Definition: vp9dec.h:173
VP9TileData::uveob_base
uint8_t * uveob_base[2]
Definition: vp9dec.h:233
HWACCEL_VULKAN
#define HWACCEL_VULKAN(codec)
Definition: hwconfig.h:76
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1580
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:82
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1618
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1264
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:44
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
BlockLevel
BlockLevel
Definition: vp9shared.h:79
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1774
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:105
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
VP9TileData::counts
struct VP9TileData::@310 counts
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:172
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(struct AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:179
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1525
AVPacket
This structure stores compressed data.
Definition: packet.h:535
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:38
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
BLANK_FRAME
#define BLANK_FRAME
Definition: vp9shared.h:175
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1637
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:545
h
h
Definition: vp9dsp_template.c:2070
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
VP9TileData::nb_block_structure
unsigned int nb_block_structure
Definition: vp9dec.h:243
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:692
VP9TileData::tile_col_start
unsigned tile_col_start
Definition: vp9dec.h:181
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
src
#define src
Definition: vp8dsp.c:248
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:155
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1241
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3367
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540
vp9_decode_flush
static av_cold void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1826
VP9TileData::left_partition_ctx
uint8_t left_partition_ctx[8]
Definition: vp9dec.h:220