FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwaccel_internal.h"
31 #include "hwconfig.h"
32 #include "profiles.h"
33 #include "refstruct.h"
34 #include "thread.h"
35 #include "threadframe.h"
36 #include "pthread_internal.h"
37 
38 #include "videodsp.h"
39 #include "vp89_rac.h"
40 #include "vp9.h"
41 #include "vp9data.h"
42 #include "vp9dec.h"
43 #include "vpx_rac.h"
44 #include "libavutil/avassert.h"
45 #include "libavutil/pixdesc.h"
47 
48 #define VP9_SYNCCODE 0x498342
49 
50 #if HAVE_THREADS
51 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
52  (offsetof(VP9Context, progress_mutex)),
53  (offsetof(VP9Context, progress_cond)));
54 
55 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
56  VP9Context *s = avctx->priv_data;
57  int i;
58 
59  if (avctx->active_thread_type & FF_THREAD_SLICE) {
60  if (s->entries)
61  av_freep(&s->entries);
62 
63  s->entries = av_malloc_array(n, sizeof(atomic_int));
64  if (!s->entries)
65  return AVERROR(ENOMEM);
66 
67  for (i = 0; i < n; i++)
68  atomic_init(&s->entries[i], 0);
69  }
70  return 0;
71 }
72 
73 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
74  pthread_mutex_lock(&s->progress_mutex);
75  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
76  pthread_cond_signal(&s->progress_cond);
77  pthread_mutex_unlock(&s->progress_mutex);
78 }
79 
80 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
81  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
82  return;
83 
84  pthread_mutex_lock(&s->progress_mutex);
85  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
86  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
87  pthread_mutex_unlock(&s->progress_mutex);
88 }
89 #else
90 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
91 #endif
92 
94 {
95  av_freep(&td->b_base);
96  av_freep(&td->block_base);
97  av_freep(&td->block_structure);
98 }
99 
101 {
103  ff_refstruct_unref(&f->extradata);
104  ff_refstruct_unref(&f->hwaccel_picture_private);
105  f->segmentation_map = NULL;
106 }
107 
109 {
110  VP9Context *s = avctx->priv_data;
111  int ret, sz;
112 
114  if (ret < 0)
115  return ret;
116 
117  sz = 64 * s->sb_cols * s->sb_rows;
118  if (sz != s->frame_extradata_pool_size) {
119  ff_refstruct_pool_uninit(&s->frame_extradata_pool);
120  s->frame_extradata_pool = ff_refstruct_pool_alloc(sz * (1 + sizeof(VP9mvrefPair)),
122  if (!s->frame_extradata_pool) {
123  s->frame_extradata_pool_size = 0;
124  ret = AVERROR(ENOMEM);
125  goto fail;
126  }
127  s->frame_extradata_pool_size = sz;
128  }
129  f->extradata = ff_refstruct_pool_get(s->frame_extradata_pool);
130  if (!f->extradata) {
131  ret = AVERROR(ENOMEM);
132  goto fail;
133  }
134 
135  f->segmentation_map = f->extradata;
136  f->mv = (VP9mvrefPair *) ((char*)f->extradata + sz);
137 
138  ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private);
139  if (ret < 0)
140  goto fail;
141 
142  return 0;
143 
144 fail:
146  return ret;
147 }
148 
149 static int vp9_frame_ref(VP9Frame *dst, VP9Frame *src)
150 {
151  int ret;
152 
153  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
154  if (ret < 0)
155  return ret;
156 
157  dst->extradata = ff_refstruct_ref(src->extradata);
158 
159  dst->segmentation_map = src->segmentation_map;
160  dst->mv = src->mv;
161  dst->uses_2pass = src->uses_2pass;
162 
164  src->hwaccel_picture_private);
165 
166  return 0;
167 }
168 
169 static int update_size(AVCodecContext *avctx, int w, int h)
170 {
171 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
172  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
173  CONFIG_VP9_D3D12VA_HWACCEL + \
174  CONFIG_VP9_NVDEC_HWACCEL + \
175  CONFIG_VP9_VAAPI_HWACCEL + \
176  CONFIG_VP9_VDPAU_HWACCEL + \
177  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
178  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
179  VP9Context *s = avctx->priv_data;
180  uint8_t *p;
181  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
182  int lflvl_len, i;
183 
184  av_assert0(w > 0 && h > 0);
185 
186  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
187  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
188  return ret;
189 
190  switch (s->pix_fmt) {
191  case AV_PIX_FMT_YUV420P:
193 #if CONFIG_VP9_DXVA2_HWACCEL
194  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
195 #endif
196 #if CONFIG_VP9_D3D11VA_HWACCEL
197  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
198  *fmtp++ = AV_PIX_FMT_D3D11;
199 #endif
200 #if CONFIG_VP9_D3D12VA_HWACCEL
201  *fmtp++ = AV_PIX_FMT_D3D12;
202 #endif
203 #if CONFIG_VP9_NVDEC_HWACCEL
204  *fmtp++ = AV_PIX_FMT_CUDA;
205 #endif
206 #if CONFIG_VP9_VAAPI_HWACCEL
207  *fmtp++ = AV_PIX_FMT_VAAPI;
208 #endif
209 #if CONFIG_VP9_VDPAU_HWACCEL
210  *fmtp++ = AV_PIX_FMT_VDPAU;
211 #endif
212 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
213  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
214 #endif
215  break;
217 #if CONFIG_VP9_NVDEC_HWACCEL
218  *fmtp++ = AV_PIX_FMT_CUDA;
219 #endif
220 #if CONFIG_VP9_VAAPI_HWACCEL
221  *fmtp++ = AV_PIX_FMT_VAAPI;
222 #endif
223 #if CONFIG_VP9_VDPAU_HWACCEL
224  *fmtp++ = AV_PIX_FMT_VDPAU;
225 #endif
226  break;
227  case AV_PIX_FMT_YUV444P:
230 #if CONFIG_VP9_VAAPI_HWACCEL
231  *fmtp++ = AV_PIX_FMT_VAAPI;
232 #endif
233  break;
234  case AV_PIX_FMT_GBRP:
235  case AV_PIX_FMT_GBRP10:
236  case AV_PIX_FMT_GBRP12:
237 #if CONFIG_VP9_VAAPI_HWACCEL
238  *fmtp++ = AV_PIX_FMT_VAAPI;
239 #endif
240  break;
241  }
242 
243  *fmtp++ = s->pix_fmt;
244  *fmtp = AV_PIX_FMT_NONE;
245 
246  ret = ff_get_format(avctx, pix_fmts);
247  if (ret < 0)
248  return ret;
249 
250  avctx->pix_fmt = ret;
251  s->gf_fmt = s->pix_fmt;
252  s->w = w;
253  s->h = h;
254  }
255 
256  cols = (w + 7) >> 3;
257  rows = (h + 7) >> 3;
258 
259  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
260  return 0;
261 
262  s->last_fmt = s->pix_fmt;
263  s->sb_cols = (w + 63) >> 6;
264  s->sb_rows = (h + 63) >> 6;
265  s->cols = (w + 7) >> 3;
266  s->rows = (h + 7) >> 3;
267  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
268 
269 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
270  av_freep(&s->intra_pred_data[0]);
271  // FIXME we slightly over-allocate here for subsampled chroma, but a little
272  // bit of padding shouldn't affect performance...
273  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
274  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
275  if (!p)
276  return AVERROR(ENOMEM);
277  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
278  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
279  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
280  assign(s->above_y_nnz_ctx, uint8_t *, 16);
281  assign(s->above_mode_ctx, uint8_t *, 16);
282  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
283  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
284  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
285  assign(s->above_partition_ctx, uint8_t *, 8);
286  assign(s->above_skip_ctx, uint8_t *, 8);
287  assign(s->above_txfm_ctx, uint8_t *, 8);
288  assign(s->above_segpred_ctx, uint8_t *, 8);
289  assign(s->above_intra_ctx, uint8_t *, 8);
290  assign(s->above_comp_ctx, uint8_t *, 8);
291  assign(s->above_ref_ctx, uint8_t *, 8);
292  assign(s->above_filter_ctx, uint8_t *, 8);
293  assign(s->lflvl, VP9Filter *, lflvl_len);
294 #undef assign
295 
296  if (s->td) {
297  for (i = 0; i < s->active_tile_cols; i++)
298  vp9_tile_data_free(&s->td[i]);
299  }
300 
301  if (s->s.h.bpp != s->last_bpp) {
302  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
303  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
304  s->last_bpp = s->s.h.bpp;
305  }
306 
307  return 0;
308 }
309 
311 {
312  int i;
313  VP9Context *s = avctx->priv_data;
314  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
315  VP9TileData *td = &s->td[0];
316 
317  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
318  return 0;
319 
321  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
322  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
323  if (s->s.frames[CUR_FRAME].uses_2pass) {
324  int sbs = s->sb_cols * s->sb_rows;
325 
326  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
327  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
328  16 * 16 + 2 * chroma_eobs) * sbs);
329  if (!td->b_base || !td->block_base)
330  return AVERROR(ENOMEM);
331  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
332  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
333  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
334  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
335  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
336 
338  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
339  if (!td->block_structure)
340  return AVERROR(ENOMEM);
341  }
342  } else {
343  for (i = 1; i < s->active_tile_cols; i++)
344  vp9_tile_data_free(&s->td[i]);
345 
346  for (i = 0; i < s->active_tile_cols; i++) {
347  s->td[i].b_base = av_malloc(sizeof(VP9Block));
348  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
349  16 * 16 + 2 * chroma_eobs);
350  if (!s->td[i].b_base || !s->td[i].block_base)
351  return AVERROR(ENOMEM);
352  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
353  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
354  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
355  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
356  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
357 
359  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
360  if (!s->td[i].block_structure)
361  return AVERROR(ENOMEM);
362  }
363  }
364  }
365  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
366 
367  return 0;
368 }
369 
370 // The sign bit is at the end, not the start, of a bit sequence
372 {
373  int v = get_bits(gb, n);
374  return get_bits1(gb) ? -v : v;
375 }
376 
377 static av_always_inline int inv_recenter_nonneg(int v, int m)
378 {
379  if (v > 2 * m)
380  return v;
381  if (v & 1)
382  return m - ((v + 1) >> 1);
383  return m + (v >> 1);
384 }
385 
386 // differential forward probability updates
387 static int update_prob(VPXRangeCoder *c, int p)
388 {
389  static const uint8_t inv_map_table[255] = {
390  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
391  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
392  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
393  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
394  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
395  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
396  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
397  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
398  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
399  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
400  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
401  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
402  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
403  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
404  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
405  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
406  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
407  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
408  252, 253, 253,
409  };
410  int d;
411 
412  /* This code is trying to do a differential probability update. For a
413  * current probability A in the range [1, 255], the difference to a new
414  * probability of any value can be expressed differentially as 1-A, 255-A
415  * where some part of this (absolute range) exists both in positive as
416  * well as the negative part, whereas another part only exists in one
417  * half. We're trying to code this shared part differentially, i.e.
418  * times two where the value of the lowest bit specifies the sign, and
419  * the single part is then coded on top of this. This absolute difference
420  * then again has a value of [0, 254], but a bigger value in this range
421  * indicates that we're further away from the original value A, so we
422  * can code this as a VLC code, since higher values are increasingly
423  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
424  * updates vs. the 'fine, exact' updates further down the range, which
425  * adds one extra dimension to this differential update model. */
426 
427  if (!vp89_rac_get(c)) {
428  d = vp89_rac_get_uint(c, 4) + 0;
429  } else if (!vp89_rac_get(c)) {
430  d = vp89_rac_get_uint(c, 4) + 16;
431  } else if (!vp89_rac_get(c)) {
432  d = vp89_rac_get_uint(c, 5) + 32;
433  } else {
434  d = vp89_rac_get_uint(c, 7);
435  if (d >= 65)
436  d = (d << 1) - 65 + vp89_rac_get(c);
437  d += 64;
438  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
439  }
440 
441  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
442  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
443 }
444 
446 {
447  static const enum AVColorSpace colorspaces[8] = {
450  };
451  VP9Context *s = avctx->priv_data;
452  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
453 
454  s->bpp_index = bits;
455  s->s.h.bpp = 8 + bits * 2;
456  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
457  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
458  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
459  static const enum AVPixelFormat pix_fmt_rgb[3] = {
461  };
462  s->ss_h = s->ss_v = 0;
463  avctx->color_range = AVCOL_RANGE_JPEG;
464  s->pix_fmt = pix_fmt_rgb[bits];
465  if (avctx->profile & 1) {
466  if (get_bits1(&s->gb)) {
467  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
468  return AVERROR_INVALIDDATA;
469  }
470  } else {
471  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
472  avctx->profile);
473  return AVERROR_INVALIDDATA;
474  }
475  } else {
476  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
483  };
485  if (avctx->profile & 1) {
486  s->ss_h = get_bits1(&s->gb);
487  s->ss_v = get_bits1(&s->gb);
488  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
489  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
490  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
491  avctx->profile);
492  return AVERROR_INVALIDDATA;
493  } else if (get_bits1(&s->gb)) {
494  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
495  avctx->profile);
496  return AVERROR_INVALIDDATA;
497  }
498  } else {
499  s->ss_h = s->ss_v = 1;
500  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
501  }
502  }
503 
504  return 0;
505 }
506 
508  const uint8_t *data, int size, int *ref)
509 {
510  VP9Context *s = avctx->priv_data;
511  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
512  int last_invisible;
513  const uint8_t *data2;
514 
515  /* general header */
516  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
517  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
518  return ret;
519  }
520  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
521  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
522  return AVERROR_INVALIDDATA;
523  }
524  avctx->profile = get_bits1(&s->gb);
525  avctx->profile |= get_bits1(&s->gb) << 1;
526  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
527  if (avctx->profile > 3) {
528  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
529  return AVERROR_INVALIDDATA;
530  }
531  s->s.h.profile = avctx->profile;
532  if (get_bits1(&s->gb)) {
533  *ref = get_bits(&s->gb, 3);
534  return 0;
535  }
536 
537  s->last_keyframe = s->s.h.keyframe;
538  s->s.h.keyframe = !get_bits1(&s->gb);
539 
540  last_invisible = s->s.h.invisible;
541  s->s.h.invisible = !get_bits1(&s->gb);
542  s->s.h.errorres = get_bits1(&s->gb);
543  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
544 
545  if (s->s.h.keyframe) {
546  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
547  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
548  return AVERROR_INVALIDDATA;
549  }
550  if ((ret = read_colorspace_details(avctx)) < 0)
551  return ret;
552  // for profile 1, here follows the subsampling bits
553  s->s.h.refreshrefmask = 0xff;
554  w = get_bits(&s->gb, 16) + 1;
555  h = get_bits(&s->gb, 16) + 1;
556  if (get_bits1(&s->gb)) // display size
557  skip_bits(&s->gb, 32);
558  } else {
559  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
560  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
561  if (s->s.h.intraonly) {
562  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
563  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
564  return AVERROR_INVALIDDATA;
565  }
566  if (avctx->profile >= 1) {
567  if ((ret = read_colorspace_details(avctx)) < 0)
568  return ret;
569  } else {
570  s->ss_h = s->ss_v = 1;
571  s->s.h.bpp = 8;
572  s->bpp_index = 0;
573  s->bytesperpixel = 1;
574  s->pix_fmt = AV_PIX_FMT_YUV420P;
575  avctx->colorspace = AVCOL_SPC_BT470BG;
576  avctx->color_range = AVCOL_RANGE_MPEG;
577  }
578  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
579  w = get_bits(&s->gb, 16) + 1;
580  h = get_bits(&s->gb, 16) + 1;
581  if (get_bits1(&s->gb)) // display size
582  skip_bits(&s->gb, 32);
583  } else {
584  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
585  s->s.h.refidx[0] = get_bits(&s->gb, 3);
586  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
587  s->s.h.refidx[1] = get_bits(&s->gb, 3);
588  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
589  s->s.h.refidx[2] = get_bits(&s->gb, 3);
590  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
591  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
592  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
593  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
594  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
595  return AVERROR_INVALIDDATA;
596  }
597  if (get_bits1(&s->gb)) {
598  w = s->s.refs[s->s.h.refidx[0]].f->width;
599  h = s->s.refs[s->s.h.refidx[0]].f->height;
600  } else if (get_bits1(&s->gb)) {
601  w = s->s.refs[s->s.h.refidx[1]].f->width;
602  h = s->s.refs[s->s.h.refidx[1]].f->height;
603  } else if (get_bits1(&s->gb)) {
604  w = s->s.refs[s->s.h.refidx[2]].f->width;
605  h = s->s.refs[s->s.h.refidx[2]].f->height;
606  } else {
607  w = get_bits(&s->gb, 16) + 1;
608  h = get_bits(&s->gb, 16) + 1;
609  }
610  // Note that in this code, "CUR_FRAME" is actually before we
611  // have formally allocated a frame, and thus actually represents
612  // the _last_ frame
613  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
614  s->s.frames[CUR_FRAME].tf.f->height == h;
615  if (get_bits1(&s->gb)) // display size
616  skip_bits(&s->gb, 32);
617  s->s.h.highprecisionmvs = get_bits1(&s->gb);
618  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
619  get_bits(&s->gb, 2);
620  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
621  s->s.h.signbias[0] != s->s.h.signbias[2];
622  if (s->s.h.allowcompinter) {
623  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
624  s->s.h.fixcompref = 2;
625  s->s.h.varcompref[0] = 0;
626  s->s.h.varcompref[1] = 1;
627  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
628  s->s.h.fixcompref = 1;
629  s->s.h.varcompref[0] = 0;
630  s->s.h.varcompref[1] = 2;
631  } else {
632  s->s.h.fixcompref = 0;
633  s->s.h.varcompref[0] = 1;
634  s->s.h.varcompref[1] = 2;
635  }
636  }
637  }
638  }
639  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
640  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
641  s->s.h.framectxid = c = get_bits(&s->gb, 2);
642  if (s->s.h.keyframe || s->s.h.intraonly)
643  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
644 
645  /* loopfilter header data */
646  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
647  // reset loopfilter defaults
648  s->s.h.lf_delta.ref[0] = 1;
649  s->s.h.lf_delta.ref[1] = 0;
650  s->s.h.lf_delta.ref[2] = -1;
651  s->s.h.lf_delta.ref[3] = -1;
652  s->s.h.lf_delta.mode[0] = 0;
653  s->s.h.lf_delta.mode[1] = 0;
654  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
655  }
656  s->s.h.filter.level = get_bits(&s->gb, 6);
657  sharp = get_bits(&s->gb, 3);
658  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
659  // the old cache values since they are still valid
660  if (s->s.h.filter.sharpness != sharp) {
661  for (i = 1; i <= 63; i++) {
662  int limit = i;
663 
664  if (sharp > 0) {
665  limit >>= (sharp + 3) >> 2;
666  limit = FFMIN(limit, 9 - sharp);
667  }
668  limit = FFMAX(limit, 1);
669 
670  s->filter_lut.lim_lut[i] = limit;
671  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
672  }
673  }
674  s->s.h.filter.sharpness = sharp;
675  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
676  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
677  for (i = 0; i < 4; i++)
678  if (get_bits1(&s->gb))
679  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
680  for (i = 0; i < 2; i++)
681  if (get_bits1(&s->gb))
682  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
683  }
684  }
685 
686  /* quantization header data */
687  s->s.h.yac_qi = get_bits(&s->gb, 8);
688  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
689  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
690  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
691  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
692  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
693  if (s->s.h.lossless)
695 
696  /* segmentation header info */
697  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
698  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
699  for (i = 0; i < 7; i++)
700  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
701  get_bits(&s->gb, 8) : 255;
702  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
703  for (i = 0; i < 3; i++)
704  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
705  get_bits(&s->gb, 8) : 255;
706  }
707 
708  if (get_bits1(&s->gb)) {
709  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
710  for (i = 0; i < 8; i++) {
711  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
712  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
713  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
714  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
715  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
716  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
717  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
718  }
719  }
720  } else {
721  // Reset fields under segmentation switch if segmentation is disabled.
722  // This is necessary because some hwaccels don't ignore these fields
723  // if segmentation is disabled.
724  s->s.h.segmentation.temporal = 0;
725  s->s.h.segmentation.update_map = 0;
726  }
727 
728  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
729  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
730  int qyac, qydc, quvac, quvdc, lflvl, sh;
731 
732  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
733  if (s->s.h.segmentation.absolute_vals)
734  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
735  else
736  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
737  } else {
738  qyac = s->s.h.yac_qi;
739  }
740  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
741  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
742  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
743  qyac = av_clip_uintp2(qyac, 8);
744 
745  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
746  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
747  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
748  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
749 
750  sh = s->s.h.filter.level >= 32;
751  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
752  if (s->s.h.segmentation.absolute_vals)
753  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
754  else
755  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
756  } else {
757  lflvl = s->s.h.filter.level;
758  }
759  if (s->s.h.lf_delta.enabled) {
760  s->s.h.segmentation.feat[i].lflvl[0][0] =
761  s->s.h.segmentation.feat[i].lflvl[0][1] =
762  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
763  for (j = 1; j < 4; j++) {
764  s->s.h.segmentation.feat[i].lflvl[j][0] =
765  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
766  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
767  s->s.h.segmentation.feat[i].lflvl[j][1] =
768  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
769  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
770  }
771  } else {
772  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
773  sizeof(s->s.h.segmentation.feat[i].lflvl));
774  }
775  }
776 
777  /* tiling info */
778  if ((ret = update_size(avctx, w, h)) < 0) {
779  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
780  w, h, s->pix_fmt);
781  return ret;
782  }
783  for (s->s.h.tiling.log2_tile_cols = 0;
784  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
785  s->s.h.tiling.log2_tile_cols++) ;
786  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
787  max = FFMAX(0, max - 1);
788  while (max > s->s.h.tiling.log2_tile_cols) {
789  if (get_bits1(&s->gb))
790  s->s.h.tiling.log2_tile_cols++;
791  else
792  break;
793  }
794  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
795  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
796  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
797  int n_range_coders;
798  VPXRangeCoder *rc;
799 
800  if (s->td) {
801  for (i = 0; i < s->active_tile_cols; i++)
802  vp9_tile_data_free(&s->td[i]);
803  av_freep(&s->td);
804  }
805 
806  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
807  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
808  s->s.h.tiling.tile_cols : 1;
809  vp9_alloc_entries(avctx, s->sb_rows);
810  if (avctx->active_thread_type == FF_THREAD_SLICE) {
811  n_range_coders = 4; // max_tile_rows
812  } else {
813  n_range_coders = s->s.h.tiling.tile_cols;
814  }
815  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
816  n_range_coders * sizeof(VPXRangeCoder));
817  if (!s->td)
818  return AVERROR(ENOMEM);
819  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
820  for (i = 0; i < s->active_tile_cols; i++) {
821  s->td[i].s = s;
822  s->td[i].c_b = rc;
823  rc += n_range_coders;
824  }
825  }
826 
827  /* check reference frames */
828  if (!s->s.h.keyframe && !s->s.h.intraonly) {
829  int valid_ref_frame = 0;
830  for (i = 0; i < 3; i++) {
831  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
832  int refw = ref->width, refh = ref->height;
833 
834  if (ref->format != avctx->pix_fmt) {
835  av_log(avctx, AV_LOG_ERROR,
836  "Ref pixfmt (%s) did not match current frame (%s)",
837  av_get_pix_fmt_name(ref->format),
838  av_get_pix_fmt_name(avctx->pix_fmt));
839  return AVERROR_INVALIDDATA;
840  } else if (refw == w && refh == h) {
841  s->mvscale[i][0] = s->mvscale[i][1] = 0;
842  } else {
843  /* Check to make sure at least one of frames that */
844  /* this frame references has valid dimensions */
845  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
846  av_log(avctx, AV_LOG_WARNING,
847  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
848  refw, refh, w, h);
849  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
850  continue;
851  }
852  s->mvscale[i][0] = (refw << 14) / w;
853  s->mvscale[i][1] = (refh << 14) / h;
854  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
855  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
856  }
857  valid_ref_frame++;
858  }
859  if (!valid_ref_frame) {
860  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
861  return AVERROR_INVALIDDATA;
862  }
863  }
864 
865  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
866  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
867  s->prob_ctx[3].p = ff_vp9_default_probs;
868  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
869  sizeof(ff_vp9_default_coef_probs));
870  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
871  sizeof(ff_vp9_default_coef_probs));
872  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
873  sizeof(ff_vp9_default_coef_probs));
874  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
875  sizeof(ff_vp9_default_coef_probs));
876  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
877  s->prob_ctx[c].p = ff_vp9_default_probs;
878  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
879  sizeof(ff_vp9_default_coef_probs));
880  }
881 
882  // next 16 bits is size of the rest of the header (arith-coded)
883  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
884  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
885 
886  data2 = align_get_bits(&s->gb);
887  if (size2 > size - (data2 - data)) {
888  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
889  return AVERROR_INVALIDDATA;
890  }
891  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
892  if (ret < 0)
893  return ret;
894 
895  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
896  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
897  return AVERROR_INVALIDDATA;
898  }
899 
900  for (i = 0; i < s->active_tile_cols; i++) {
901  if (s->s.h.keyframe || s->s.h.intraonly) {
902  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
903  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
904  } else {
905  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
906  }
907  s->td[i].nb_block_structure = 0;
908  }
909 
910  /* FIXME is it faster to not copy here, but do it down in the fw updates
911  * as explicit copies if the fw update is missing (and skip the copy upon
912  * fw update)? */
913  s->prob.p = s->prob_ctx[c].p;
914 
915  // txfm updates
916  if (s->s.h.lossless) {
917  s->s.h.txfmmode = TX_4X4;
918  } else {
919  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
920  if (s->s.h.txfmmode == 3)
921  s->s.h.txfmmode += vp89_rac_get(&s->c);
922 
923  if (s->s.h.txfmmode == TX_SWITCHABLE) {
924  for (i = 0; i < 2; i++)
925  if (vpx_rac_get_prob_branchy(&s->c, 252))
926  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
927  for (i = 0; i < 2; i++)
928  for (j = 0; j < 2; j++)
929  if (vpx_rac_get_prob_branchy(&s->c, 252))
930  s->prob.p.tx16p[i][j] =
931  update_prob(&s->c, s->prob.p.tx16p[i][j]);
932  for (i = 0; i < 2; i++)
933  for (j = 0; j < 3; j++)
934  if (vpx_rac_get_prob_branchy(&s->c, 252))
935  s->prob.p.tx32p[i][j] =
936  update_prob(&s->c, s->prob.p.tx32p[i][j]);
937  }
938  }
939 
940  // coef updates
941  for (i = 0; i < 4; i++) {
942  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
943  if (vp89_rac_get(&s->c)) {
944  for (j = 0; j < 2; j++)
945  for (k = 0; k < 2; k++)
946  for (l = 0; l < 6; l++)
947  for (m = 0; m < 6; m++) {
948  uint8_t *p = s->prob.coef[i][j][k][l][m];
949  uint8_t *r = ref[j][k][l][m];
950  if (m >= 3 && l == 0) // dc only has 3 pt
951  break;
952  for (n = 0; n < 3; n++) {
953  if (vpx_rac_get_prob_branchy(&s->c, 252))
954  p[n] = update_prob(&s->c, r[n]);
955  else
956  p[n] = r[n];
957  }
958  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
959  }
960  } else {
961  for (j = 0; j < 2; j++)
962  for (k = 0; k < 2; k++)
963  for (l = 0; l < 6; l++)
964  for (m = 0; m < 6; m++) {
965  uint8_t *p = s->prob.coef[i][j][k][l][m];
966  uint8_t *r = ref[j][k][l][m];
967  if (m > 3 && l == 0) // dc only has 3 pt
968  break;
969  memcpy(p, r, 3);
970  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
971  }
972  }
973  if (s->s.h.txfmmode == i)
974  break;
975  }
976 
977  // mode updates
978  for (i = 0; i < 3; i++)
979  if (vpx_rac_get_prob_branchy(&s->c, 252))
980  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
981  if (!s->s.h.keyframe && !s->s.h.intraonly) {
982  for (i = 0; i < 7; i++)
983  for (j = 0; j < 3; j++)
984  if (vpx_rac_get_prob_branchy(&s->c, 252))
985  s->prob.p.mv_mode[i][j] =
986  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
987 
988  if (s->s.h.filtermode == FILTER_SWITCHABLE)
989  for (i = 0; i < 4; i++)
990  for (j = 0; j < 2; j++)
991  if (vpx_rac_get_prob_branchy(&s->c, 252))
992  s->prob.p.filter[i][j] =
993  update_prob(&s->c, s->prob.p.filter[i][j]);
994 
995  for (i = 0; i < 4; i++)
996  if (vpx_rac_get_prob_branchy(&s->c, 252))
997  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
998 
999  if (s->s.h.allowcompinter) {
1000  s->s.h.comppredmode = vp89_rac_get(&s->c);
1001  if (s->s.h.comppredmode)
1002  s->s.h.comppredmode += vp89_rac_get(&s->c);
1003  if (s->s.h.comppredmode == PRED_SWITCHABLE)
1004  for (i = 0; i < 5; i++)
1005  if (vpx_rac_get_prob_branchy(&s->c, 252))
1006  s->prob.p.comp[i] =
1007  update_prob(&s->c, s->prob.p.comp[i]);
1008  } else {
1009  s->s.h.comppredmode = PRED_SINGLEREF;
1010  }
1011 
1012  if (s->s.h.comppredmode != PRED_COMPREF) {
1013  for (i = 0; i < 5; i++) {
1014  if (vpx_rac_get_prob_branchy(&s->c, 252))
1015  s->prob.p.single_ref[i][0] =
1016  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1017  if (vpx_rac_get_prob_branchy(&s->c, 252))
1018  s->prob.p.single_ref[i][1] =
1019  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1020  }
1021  }
1022 
1023  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1024  for (i = 0; i < 5; i++)
1025  if (vpx_rac_get_prob_branchy(&s->c, 252))
1026  s->prob.p.comp_ref[i] =
1027  update_prob(&s->c, s->prob.p.comp_ref[i]);
1028  }
1029 
1030  for (i = 0; i < 4; i++)
1031  for (j = 0; j < 9; j++)
1032  if (vpx_rac_get_prob_branchy(&s->c, 252))
1033  s->prob.p.y_mode[i][j] =
1034  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1035 
1036  for (i = 0; i < 4; i++)
1037  for (j = 0; j < 4; j++)
1038  for (k = 0; k < 3; k++)
1039  if (vpx_rac_get_prob_branchy(&s->c, 252))
1040  s->prob.p.partition[3 - i][j][k] =
1041  update_prob(&s->c,
1042  s->prob.p.partition[3 - i][j][k]);
1043 
1044  // mv fields don't use the update_prob subexp model for some reason
1045  for (i = 0; i < 3; i++)
1046  if (vpx_rac_get_prob_branchy(&s->c, 252))
1047  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1048 
1049  for (i = 0; i < 2; i++) {
1050  if (vpx_rac_get_prob_branchy(&s->c, 252))
1051  s->prob.p.mv_comp[i].sign =
1052  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1053 
1054  for (j = 0; j < 10; j++)
1055  if (vpx_rac_get_prob_branchy(&s->c, 252))
1056  s->prob.p.mv_comp[i].classes[j] =
1057  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1058 
1059  if (vpx_rac_get_prob_branchy(&s->c, 252))
1060  s->prob.p.mv_comp[i].class0 =
1061  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1062 
1063  for (j = 0; j < 10; j++)
1064  if (vpx_rac_get_prob_branchy(&s->c, 252))
1065  s->prob.p.mv_comp[i].bits[j] =
1066  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1067  }
1068 
1069  for (i = 0; i < 2; i++) {
1070  for (j = 0; j < 2; j++)
1071  for (k = 0; k < 3; k++)
1072  if (vpx_rac_get_prob_branchy(&s->c, 252))
1073  s->prob.p.mv_comp[i].class0_fp[j][k] =
1074  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1075 
1076  for (j = 0; j < 3; j++)
1077  if (vpx_rac_get_prob_branchy(&s->c, 252))
1078  s->prob.p.mv_comp[i].fp[j] =
1079  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1080  }
1081 
1082  if (s->s.h.highprecisionmvs) {
1083  for (i = 0; i < 2; i++) {
1084  if (vpx_rac_get_prob_branchy(&s->c, 252))
1085  s->prob.p.mv_comp[i].class0_hp =
1086  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1087 
1088  if (vpx_rac_get_prob_branchy(&s->c, 252))
1089  s->prob.p.mv_comp[i].hp =
1090  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1091  }
1092  }
1093  }
1094 
1095  return (data2 - data) + size2;
1096 }
1097 
1098 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1099  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1100 {
1101  const VP9Context *s = td->s;
1102  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1103  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1104  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1105  s->prob.p.partition[bl][c];
1106  enum BlockPartition bp;
1107  ptrdiff_t hbs = 4 >> bl;
1108  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1109  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1110  int bytesperpixel = s->bytesperpixel;
1111 
1112  if (bl == BL_8X8) {
1114  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1115  } else if (col + hbs < s->cols) { // FIXME why not <=?
1116  if (row + hbs < s->rows) { // FIXME why not <=?
1118  switch (bp) {
1119  case PARTITION_NONE:
1120  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1121  break;
1122  case PARTITION_H:
1123  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1124  yoff += hbs * 8 * y_stride;
1125  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1126  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1127  break;
1128  case PARTITION_V:
1129  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1130  yoff += hbs * 8 * bytesperpixel;
1131  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1132  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1133  break;
1134  case PARTITION_SPLIT:
1135  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1136  decode_sb(td, row, col + hbs, lflvl,
1137  yoff + 8 * hbs * bytesperpixel,
1138  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1139  yoff += hbs * 8 * y_stride;
1140  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1141  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1142  decode_sb(td, row + hbs, col + hbs, lflvl,
1143  yoff + 8 * hbs * bytesperpixel,
1144  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1145  break;
1146  default:
1147  av_assert0(0);
1148  }
1149  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1150  bp = PARTITION_SPLIT;
1151  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1152  decode_sb(td, row, col + hbs, lflvl,
1153  yoff + 8 * hbs * bytesperpixel,
1154  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1155  } else {
1156  bp = PARTITION_H;
1157  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1158  }
1159  } else if (row + hbs < s->rows) { // FIXME why not <=?
1160  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1161  bp = PARTITION_SPLIT;
1162  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1163  yoff += hbs * 8 * y_stride;
1164  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1165  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1166  } else {
1167  bp = PARTITION_V;
1168  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1169  }
1170  } else {
1171  bp = PARTITION_SPLIT;
1172  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1173  }
1174  td->counts.partition[bl][c][bp]++;
1175 }
1176 
1177 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1178  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1179 {
1180  const VP9Context *s = td->s;
1181  VP9Block *b = td->b;
1182  ptrdiff_t hbs = 4 >> bl;
1183  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1184  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1185  int bytesperpixel = s->bytesperpixel;
1186 
1187  if (bl == BL_8X8) {
1188  av_assert2(b->bl == BL_8X8);
1189  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1190  } else if (td->b->bl == bl) {
1191  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1192  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1193  yoff += hbs * 8 * y_stride;
1194  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1195  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1196  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1197  yoff += hbs * 8 * bytesperpixel;
1198  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1199  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1200  }
1201  } else {
1202  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1203  if (col + hbs < s->cols) { // FIXME why not <=?
1204  if (row + hbs < s->rows) {
1205  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1206  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1207  yoff += hbs * 8 * y_stride;
1208  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1209  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1210  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1211  yoff + 8 * hbs * bytesperpixel,
1212  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1213  } else {
1214  yoff += hbs * 8 * bytesperpixel;
1215  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1216  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1217  }
1218  } else if (row + hbs < s->rows) {
1219  yoff += hbs * 8 * y_stride;
1220  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1221  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1222  }
1223  }
1224 }
1225 
1226 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1227 {
1228  int sb_start = ( idx * n) >> log2_n;
1229  int sb_end = ((idx + 1) * n) >> log2_n;
1230  *start = FFMIN(sb_start, n) << 3;
1231  *end = FFMIN(sb_end, n) << 3;
1232 }
1233 
1235 {
1236  int i;
1237 
1238  av_freep(&s->intra_pred_data[0]);
1239  for (i = 0; i < s->active_tile_cols; i++)
1240  vp9_tile_data_free(&s->td[i]);
1241 }
1242 
1244 {
1245  VP9Context *s = avctx->priv_data;
1246  int i;
1247 
1248  for (i = 0; i < 3; i++) {
1249  vp9_frame_unref(&s->s.frames[i]);
1250  av_frame_free(&s->s.frames[i].tf.f);
1251  }
1252  ff_refstruct_pool_uninit(&s->frame_extradata_pool);
1253  for (i = 0; i < 8; i++) {
1254  ff_thread_release_ext_buffer(&s->s.refs[i]);
1255  av_frame_free(&s->s.refs[i].f);
1256  ff_thread_release_ext_buffer(&s->next_refs[i]);
1257  av_frame_free(&s->next_refs[i].f);
1258  }
1259 
1260  free_buffers(s);
1261 #if HAVE_THREADS
1262  av_freep(&s->entries);
1263  ff_pthread_free(s, vp9_context_offsets);
1264 #endif
1265  av_freep(&s->td);
1266  return 0;
1267 }
1268 
1269 static int decode_tiles(AVCodecContext *avctx,
1270  const uint8_t *data, int size)
1271 {
1272  VP9Context *s = avctx->priv_data;
1273  VP9TileData *td = &s->td[0];
1274  int row, col, tile_row, tile_col, ret;
1275  int bytesperpixel;
1276  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1277  AVFrame *f;
1278  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1279 
1280  f = s->s.frames[CUR_FRAME].tf.f;
1281  ls_y = f->linesize[0];
1282  ls_uv =f->linesize[1];
1283  bytesperpixel = s->bytesperpixel;
1284 
1285  yoff = uvoff = 0;
1286  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1287  set_tile_offset(&tile_row_start, &tile_row_end,
1288  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1289 
1290  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1291  int64_t tile_size;
1292 
1293  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1294  tile_row == s->s.h.tiling.tile_rows - 1) {
1295  tile_size = size;
1296  } else {
1297  tile_size = AV_RB32(data);
1298  data += 4;
1299  size -= 4;
1300  }
1301  if (tile_size > size)
1302  return AVERROR_INVALIDDATA;
1303  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1304  if (ret < 0)
1305  return ret;
1306  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1307  return AVERROR_INVALIDDATA;
1308  data += tile_size;
1309  size -= tile_size;
1310  }
1311 
1312  for (row = tile_row_start; row < tile_row_end;
1313  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1314  VP9Filter *lflvl_ptr = s->lflvl;
1315  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1316 
1317  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1318  set_tile_offset(&tile_col_start, &tile_col_end,
1319  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1320  td->tile_col_start = tile_col_start;
1321  if (s->pass != 2) {
1322  memset(td->left_partition_ctx, 0, 8);
1323  memset(td->left_skip_ctx, 0, 8);
1324  if (s->s.h.keyframe || s->s.h.intraonly) {
1325  memset(td->left_mode_ctx, DC_PRED, 16);
1326  } else {
1327  memset(td->left_mode_ctx, NEARESTMV, 8);
1328  }
1329  memset(td->left_y_nnz_ctx, 0, 16);
1330  memset(td->left_uv_nnz_ctx, 0, 32);
1331  memset(td->left_segpred_ctx, 0, 8);
1332 
1333  td->c = &td->c_b[tile_col];
1334  }
1335 
1336  for (col = tile_col_start;
1337  col < tile_col_end;
1338  col += 8, yoff2 += 64 * bytesperpixel,
1339  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1340  // FIXME integrate with lf code (i.e. zero after each
1341  // use, similar to invtxfm coefficients, or similar)
1342  if (s->pass != 1) {
1343  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1344  }
1345 
1346  if (s->pass == 2) {
1347  decode_sb_mem(td, row, col, lflvl_ptr,
1348  yoff2, uvoff2, BL_64X64);
1349  } else {
1350  if (vpx_rac_is_end(td->c)) {
1351  return AVERROR_INVALIDDATA;
1352  }
1353  decode_sb(td, row, col, lflvl_ptr,
1354  yoff2, uvoff2, BL_64X64);
1355  }
1356  }
1357  }
1358 
1359  if (s->pass == 1)
1360  continue;
1361 
1362  // backup pre-loopfilter reconstruction data for intra
1363  // prediction of next row of sb64s
1364  if (row + 8 < s->rows) {
1365  memcpy(s->intra_pred_data[0],
1366  f->data[0] + yoff + 63 * ls_y,
1367  8 * s->cols * bytesperpixel);
1368  memcpy(s->intra_pred_data[1],
1369  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1370  8 * s->cols * bytesperpixel >> s->ss_h);
1371  memcpy(s->intra_pred_data[2],
1372  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1373  8 * s->cols * bytesperpixel >> s->ss_h);
1374  }
1375 
1376  // loopfilter one row
1377  if (s->s.h.filter.level) {
1378  yoff2 = yoff;
1379  uvoff2 = uvoff;
1380  lflvl_ptr = s->lflvl;
1381  for (col = 0; col < s->cols;
1382  col += 8, yoff2 += 64 * bytesperpixel,
1383  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1384  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1385  yoff2, uvoff2);
1386  }
1387  }
1388 
1389  // FIXME maybe we can make this more finegrained by running the
1390  // loopfilter per-block instead of after each sbrow
1391  // In fact that would also make intra pred left preparation easier?
1392  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1393  }
1394  }
1395  return 0;
1396 }
1397 
1398 #if HAVE_THREADS
1399 static av_always_inline
1400 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1401  int threadnr)
1402 {
1403  VP9Context *s = avctx->priv_data;
1404  VP9TileData *td = &s->td[jobnr];
1405  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1406  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1407  unsigned tile_cols_len;
1408  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1409  VP9Filter *lflvl_ptr_base;
1410  AVFrame *f;
1411 
1412  f = s->s.frames[CUR_FRAME].tf.f;
1413  ls_y = f->linesize[0];
1414  ls_uv =f->linesize[1];
1415 
1416  set_tile_offset(&tile_col_start, &tile_col_end,
1417  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1418  td->tile_col_start = tile_col_start;
1419  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1420  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1421  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1422 
1423  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1424  set_tile_offset(&tile_row_start, &tile_row_end,
1425  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1426 
1427  td->c = &td->c_b[tile_row];
1428  for (row = tile_row_start; row < tile_row_end;
1429  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1430  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1431  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1432 
1433  memset(td->left_partition_ctx, 0, 8);
1434  memset(td->left_skip_ctx, 0, 8);
1435  if (s->s.h.keyframe || s->s.h.intraonly) {
1436  memset(td->left_mode_ctx, DC_PRED, 16);
1437  } else {
1438  memset(td->left_mode_ctx, NEARESTMV, 8);
1439  }
1440  memset(td->left_y_nnz_ctx, 0, 16);
1441  memset(td->left_uv_nnz_ctx, 0, 32);
1442  memset(td->left_segpred_ctx, 0, 8);
1443 
1444  for (col = tile_col_start;
1445  col < tile_col_end;
1446  col += 8, yoff2 += 64 * bytesperpixel,
1447  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1448  // FIXME integrate with lf code (i.e. zero after each
1449  // use, similar to invtxfm coefficients, or similar)
1450  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1451  decode_sb(td, row, col, lflvl_ptr,
1452  yoff2, uvoff2, BL_64X64);
1453  }
1454 
1455  // backup pre-loopfilter reconstruction data for intra
1456  // prediction of next row of sb64s
1457  tile_cols_len = tile_col_end - tile_col_start;
1458  if (row + 8 < s->rows) {
1459  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1460  f->data[0] + yoff + 63 * ls_y,
1461  8 * tile_cols_len * bytesperpixel);
1462  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1463  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1464  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1465  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1466  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1467  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1468  }
1469 
1470  vp9_report_tile_progress(s, row >> 3, 1);
1471  }
1472  }
1473  return 0;
1474 }
1475 
1476 static av_always_inline
1477 int loopfilter_proc(AVCodecContext *avctx)
1478 {
1479  VP9Context *s = avctx->priv_data;
1480  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1481  VP9Filter *lflvl_ptr;
1482  int bytesperpixel = s->bytesperpixel, col, i;
1483  AVFrame *f;
1484 
1485  f = s->s.frames[CUR_FRAME].tf.f;
1486  ls_y = f->linesize[0];
1487  ls_uv =f->linesize[1];
1488 
1489  for (i = 0; i < s->sb_rows; i++) {
1490  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1491 
1492  if (s->s.h.filter.level) {
1493  yoff = (ls_y * 64)*i;
1494  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1495  lflvl_ptr = s->lflvl+s->sb_cols*i;
1496  for (col = 0; col < s->cols;
1497  col += 8, yoff += 64 * bytesperpixel,
1498  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1499  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1500  yoff, uvoff);
1501  }
1502  }
1503  }
1504  return 0;
1505 }
1506 #endif
1507 
1509 {
1510  AVVideoEncParams *par;
1511  unsigned int tile, nb_blocks = 0;
1512 
1513  if (s->s.h.segmentation.enabled) {
1514  for (tile = 0; tile < s->active_tile_cols; tile++)
1515  nb_blocks += s->td[tile].nb_block_structure;
1516  }
1517 
1519  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1520  if (!par)
1521  return AVERROR(ENOMEM);
1522 
1523  par->qp = s->s.h.yac_qi;
1524  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1525  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1526  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1527  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1528  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1529 
1530  if (nb_blocks) {
1531  unsigned int block = 0;
1532  unsigned int tile, block_tile;
1533 
1534  for (tile = 0; tile < s->active_tile_cols; tile++) {
1535  VP9TileData *td = &s->td[tile];
1536 
1537  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1539  unsigned int row = td->block_structure[block_tile].row;
1540  unsigned int col = td->block_structure[block_tile].col;
1541  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1542 
1543  b->src_x = col * 8;
1544  b->src_y = row * 8;
1545  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1546  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1547 
1548  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1549  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1550  if (s->s.h.segmentation.absolute_vals)
1551  b->delta_qp -= par->qp;
1552  }
1553  }
1554  }
1555  }
1556 
1557  return 0;
1558 }
1559 
1561  int *got_frame, AVPacket *pkt)
1562 {
1563  const uint8_t *data = pkt->data;
1564  int size = pkt->size;
1565  VP9Context *s = avctx->priv_data;
1566  int ret, i, j, ref;
1567  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1568  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1569  AVFrame *f;
1570 
1571  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1572  return ret;
1573  } else if (ret == 0) {
1574  if (!s->s.refs[ref].f->buf[0]) {
1575  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1576  return AVERROR_INVALIDDATA;
1577  }
1578  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1579  return ret;
1580  frame->pts = pkt->pts;
1581  frame->pkt_dts = pkt->dts;
1582  for (i = 0; i < 8; i++) {
1583  if (s->next_refs[i].f->buf[0])
1584  ff_thread_release_ext_buffer(&s->next_refs[i]);
1585  if (s->s.refs[i].f->buf[0] &&
1586  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1587  return ret;
1588  }
1589  *got_frame = 1;
1590  return pkt->size;
1591  }
1592  data += ret;
1593  size -= ret;
1594 
1595  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1596  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1597  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1598  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1599  (ret = vp9_frame_ref(&s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1600  return ret;
1601  }
1602  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1603  vp9_frame_unref(&s->s.frames[REF_FRAME_MVPAIR]);
1604  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1605  (ret = vp9_frame_ref(&s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1606  return ret;
1607  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1608  vp9_frame_unref(&s->s.frames[CUR_FRAME]);
1609  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1610  return ret;
1611  f = s->s.frames[CUR_FRAME].tf.f;
1612  if (s->s.h.keyframe)
1613  f->flags |= AV_FRAME_FLAG_KEY;
1614  else
1615  f->flags &= ~AV_FRAME_FLAG_KEY;
1616  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1617 
1618  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1619  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1620  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1621  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1622  }
1623 
1624  // ref frame setup
1625  for (i = 0; i < 8; i++) {
1626  if (s->next_refs[i].f->buf[0])
1627  ff_thread_release_ext_buffer(&s->next_refs[i]);
1628  if (s->s.h.refreshrefmask & (1 << i)) {
1629  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1630  } else if (s->s.refs[i].f->buf[0]) {
1631  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1632  }
1633  if (ret < 0)
1634  return ret;
1635  }
1636 
1637  if (avctx->hwaccel) {
1638  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1639  ret = hwaccel->start_frame(avctx, NULL, 0);
1640  if (ret < 0)
1641  return ret;
1642  ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1643  if (ret < 0)
1644  return ret;
1645  ret = hwaccel->end_frame(avctx);
1646  if (ret < 0)
1647  return ret;
1648  goto finish;
1649  }
1650 
1651  // main tile decode loop
1652  memset(s->above_partition_ctx, 0, s->cols);
1653  memset(s->above_skip_ctx, 0, s->cols);
1654  if (s->s.h.keyframe || s->s.h.intraonly) {
1655  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1656  } else {
1657  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1658  }
1659  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1660  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1661  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1662  memset(s->above_segpred_ctx, 0, s->cols);
1663  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1664  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1665  if ((ret = update_block_buffers(avctx)) < 0) {
1666  av_log(avctx, AV_LOG_ERROR,
1667  "Failed to allocate block buffers\n");
1668  return ret;
1669  }
1670  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1671  int j, k, l, m;
1672 
1673  for (i = 0; i < 4; i++) {
1674  for (j = 0; j < 2; j++)
1675  for (k = 0; k < 2; k++)
1676  for (l = 0; l < 6; l++)
1677  for (m = 0; m < 6; m++)
1678  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1679  s->prob.coef[i][j][k][l][m], 3);
1680  if (s->s.h.txfmmode == i)
1681  break;
1682  }
1683  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1684  ff_thread_finish_setup(avctx);
1685  } else if (!s->s.h.refreshctx) {
1686  ff_thread_finish_setup(avctx);
1687  }
1688 
1689 #if HAVE_THREADS
1690  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1691  for (i = 0; i < s->sb_rows; i++)
1692  atomic_store(&s->entries[i], 0);
1693  }
1694 #endif
1695 
1696  do {
1697  for (i = 0; i < s->active_tile_cols; i++) {
1698  s->td[i].b = s->td[i].b_base;
1699  s->td[i].block = s->td[i].block_base;
1700  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1701  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1702  s->td[i].eob = s->td[i].eob_base;
1703  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1704  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1705  s->td[i].error_info = 0;
1706  }
1707 
1708 #if HAVE_THREADS
1709  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1710  int tile_row, tile_col;
1711 
1712  av_assert1(!s->pass);
1713 
1714  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1715  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1716  int64_t tile_size;
1717 
1718  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1719  tile_row == s->s.h.tiling.tile_rows - 1) {
1720  tile_size = size;
1721  } else {
1722  tile_size = AV_RB32(data);
1723  data += 4;
1724  size -= 4;
1725  }
1726  if (tile_size > size)
1727  return AVERROR_INVALIDDATA;
1728  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1729  if (ret < 0)
1730  return ret;
1731  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1732  return AVERROR_INVALIDDATA;
1733  data += tile_size;
1734  size -= tile_size;
1735  }
1736  }
1737 
1738  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1739  } else
1740 #endif
1741  {
1742  ret = decode_tiles(avctx, data, size);
1743  if (ret < 0) {
1744  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1745  return ret;
1746  }
1747  }
1748 
1749  // Sum all counts fields into td[0].counts for tile threading
1750  if (avctx->active_thread_type == FF_THREAD_SLICE)
1751  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1752  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1753  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1754 
1755  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1757  ff_thread_finish_setup(avctx);
1758  }
1759  } while (s->pass++ == 1);
1760  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1761 
1762  if (s->td->error_info < 0) {
1763  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1764  s->td->error_info = 0;
1765  return AVERROR_INVALIDDATA;
1766  }
1768  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1769  if (ret < 0)
1770  return ret;
1771  }
1772 
1773 finish:
1774  // ref frame setup
1775  for (i = 0; i < 8; i++) {
1776  if (s->s.refs[i].f->buf[0])
1777  ff_thread_release_ext_buffer(&s->s.refs[i]);
1778  if (s->next_refs[i].f->buf[0] &&
1779  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1780  return ret;
1781  }
1782 
1783  if (!s->s.h.invisible) {
1784  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1785  return ret;
1786  *got_frame = 1;
1787  }
1788 
1789  return pkt->size;
1790 }
1791 
1793 {
1794  VP9Context *s = avctx->priv_data;
1795  int i;
1796 
1797  for (i = 0; i < 3; i++)
1798  vp9_frame_unref(&s->s.frames[i]);
1799  for (i = 0; i < 8; i++)
1800  ff_thread_release_ext_buffer(&s->s.refs[i]);
1801 
1802  if (FF_HW_HAS_CB(avctx, flush))
1803  FF_HW_SIMPLE_CALL(avctx, flush);
1804 }
1805 
1807 {
1808  VP9Context *s = avctx->priv_data;
1809  int ret;
1810 
1811  s->last_bpp = 0;
1812  s->s.h.filter.sharpness = -1;
1813 
1814 #if HAVE_THREADS
1815  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1816  ret = ff_pthread_init(s, vp9_context_offsets);
1817  if (ret < 0)
1818  return ret;
1819  }
1820 #endif
1821 
1822  for (int i = 0; i < 3; i++) {
1823  s->s.frames[i].tf.f = av_frame_alloc();
1824  if (!s->s.frames[i].tf.f)
1825  return AVERROR(ENOMEM);
1826  }
1827  for (int i = 0; i < 8; i++) {
1828  s->s.refs[i].f = av_frame_alloc();
1829  s->next_refs[i].f = av_frame_alloc();
1830  if (!s->s.refs[i].f || !s->next_refs[i].f)
1831  return AVERROR(ENOMEM);
1832  }
1833  return 0;
1834 }
1835 
1836 #if HAVE_THREADS
1837 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1838 {
1839  int i, ret;
1840  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1841 
1842  for (i = 0; i < 3; i++) {
1843  if (s->s.frames[i].tf.f->buf[0])
1844  vp9_frame_unref(&s->s.frames[i]);
1845  if (ssrc->s.frames[i].tf.f->buf[0]) {
1846  if ((ret = vp9_frame_ref(&s->s.frames[i], &ssrc->s.frames[i])) < 0)
1847  return ret;
1848  }
1849  }
1850  for (i = 0; i < 8; i++) {
1851  if (s->s.refs[i].f->buf[0])
1852  ff_thread_release_ext_buffer(&s->s.refs[i]);
1853  if (ssrc->next_refs[i].f->buf[0]) {
1854  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1855  return ret;
1856  }
1857  }
1858  ff_refstruct_replace(&s->frame_extradata_pool, ssrc->frame_extradata_pool);
1859  s->frame_extradata_pool_size = ssrc->frame_extradata_pool_size;
1860 
1861  s->s.h.invisible = ssrc->s.h.invisible;
1862  s->s.h.keyframe = ssrc->s.h.keyframe;
1863  s->s.h.intraonly = ssrc->s.h.intraonly;
1864  s->ss_v = ssrc->ss_v;
1865  s->ss_h = ssrc->ss_h;
1866  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1867  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1868  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1869  s->bytesperpixel = ssrc->bytesperpixel;
1870  s->gf_fmt = ssrc->gf_fmt;
1871  s->w = ssrc->w;
1872  s->h = ssrc->h;
1873  s->s.h.bpp = ssrc->s.h.bpp;
1874  s->bpp_index = ssrc->bpp_index;
1875  s->pix_fmt = ssrc->pix_fmt;
1876  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1877  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1878  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1879  sizeof(s->s.h.segmentation.feat));
1880 
1881  return 0;
1882 }
1883 #endif
1884 
1886  .p.name = "vp9",
1887  CODEC_LONG_NAME("Google VP9"),
1888  .p.type = AVMEDIA_TYPE_VIDEO,
1889  .p.id = AV_CODEC_ID_VP9,
1890  .priv_data_size = sizeof(VP9Context),
1891  .init = vp9_decode_init,
1892  .close = vp9_decode_free,
1895  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1898  .flush = vp9_decode_flush,
1899  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1900  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1901  .bsfs = "vp9_superframe_split",
1902  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1903 #if CONFIG_VP9_DXVA2_HWACCEL
1904  HWACCEL_DXVA2(vp9),
1905 #endif
1906 #if CONFIG_VP9_D3D11VA_HWACCEL
1907  HWACCEL_D3D11VA(vp9),
1908 #endif
1909 #if CONFIG_VP9_D3D11VA2_HWACCEL
1910  HWACCEL_D3D11VA2(vp9),
1911 #endif
1912 #if CONFIG_VP9_D3D12VA_HWACCEL
1913  HWACCEL_D3D12VA(vp9),
1914 #endif
1915 #if CONFIG_VP9_NVDEC_HWACCEL
1916  HWACCEL_NVDEC(vp9),
1917 #endif
1918 #if CONFIG_VP9_VAAPI_HWACCEL
1919  HWACCEL_VAAPI(vp9),
1920 #endif
1921 #if CONFIG_VP9_VDPAU_HWACCEL
1922  HWACCEL_VDPAU(vp9),
1923 #endif
1924 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1925  HWACCEL_VIDEOTOOLBOX(vp9),
1926 #endif
1927  NULL
1928  },
1929 };
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1427
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:64
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1269
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:108
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
ff_refstruct_ref
void * ff_refstruct_ref(void *obj)
Create a new reference to an object managed via this API, i.e.
Definition: refstruct.c:140
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:52
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:50
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1220
ff_refstruct_pool_alloc
FFRefStructPool * ff_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
VP9Frame::segmentation_map
uint8_t * segmentation_map
Definition: vp9shared.h:68
VP9Frame
Definition: vp9shared.h:65
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:122
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1885
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1098
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
int64_t
long long int64_t
Definition: coverity.c:34
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1792
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:51
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:456
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:683
BlockPartition
BlockPartition
Definition: vp9shared.h:35
AVPacket::data
uint8_t * data
Definition: packet.h:522
DC_PRED
@ DC_PRED
Definition: vp9.h:48
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:148
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:169
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1177
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:170
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:507
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:610
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:48
vp89_rac.h
VP9Filter
Definition: vp9dec.h:78
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:91
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
VP9Block
Definition: vp9dec.h:84
VP9Frame::tf
ThreadFrame tf
Definition: vp9shared.h:66
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:613
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:342
FFHWAccel
Definition: hwaccel_internal.h:34
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:179
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
GetBitContext
Definition: get_bits.h:108
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:36
vp9_frame_unref
static void vp9_frame_unref(VP9Frame *f)
Definition: vp9.c:100
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference.
Definition: vp9shared.h:72
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1243
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:118
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
avassert.h
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1796
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:595
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:573
BL_8X8
@ BL_8X8
Definition: vp9shared.h:79
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:38
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:1901
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:616
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1560
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:853
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
vp9_frame_ref
static int vp9_frame_ref(VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:149
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:96
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:169
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:480
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:996
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts.c:367
pthread_internal.h
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:463
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
VP9mv
Definition: vp9shared.h:55
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:39
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:177
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:81
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
VP9Frame::uses_2pass
int uses_2pass
Definition: vp9shared.h:70
f
f
Definition: af_crystalizer.c:121
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:523
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:354
codec_internal.h
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:68
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:42
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:445
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:90
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1234
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1594
AVCodecHWConfigInternal
Definition: hwconfig.h:25
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:310
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:521
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:377
VP9Frame::extradata
void * extradata
RefStruct reference.
Definition: vp9shared.h:67
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1593
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:126
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:617
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:387
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:515
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1795
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:620
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:436
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:371
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:968
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:666
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
BL_64X64
@ BL_64X64
Definition: vp9shared.h:76
ret
ret
Definition: filter_design.txt:187
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1806
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
ff_refstruct_replace
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:93
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:60
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:415
VP9TileData
Definition: vp9dec.h:167
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1601
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:81
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VP9Frame::mv
VP9mvrefPair * mv
Definition: vp9shared.h:69
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1639
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:166
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1263
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:43
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
BlockLevel
BlockLevel
Definition: vp9shared.h:75
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1926
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:104
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:342
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:168
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1508
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:37
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
d
d
Definition: ffmpeg_filter.c:409
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:484
h
h
Definition: vp9dsp_template.c:2038
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
ff_refstruct_unref
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:152
ff_refstruct_pool_get
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1226
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2882
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:77
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540