FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwaccel_internal.h"
31 #include "hwconfig.h"
32 #include "profiles.h"
33 #include "refstruct.h"
34 #include "thread.h"
35 #include "threadframe.h"
36 #include "pthread_internal.h"
37 
38 #include "videodsp.h"
39 #include "vp89_rac.h"
40 #include "vp9.h"
41 #include "vp9data.h"
42 #include "vp9dec.h"
43 #include "vpx_rac.h"
44 #include "libavutil/avassert.h"
45 #include "libavutil/pixdesc.h"
47 
48 #define VP9_SYNCCODE 0x498342
49 
50 #if HAVE_THREADS
51 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
52  (offsetof(VP9Context, progress_mutex)),
53  (offsetof(VP9Context, progress_cond)));
54 
55 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
56  VP9Context *s = avctx->priv_data;
57  int i;
58 
59  if (avctx->active_thread_type & FF_THREAD_SLICE) {
60  if (s->entries)
61  av_freep(&s->entries);
62 
63  s->entries = av_malloc_array(n, sizeof(atomic_int));
64  if (!s->entries)
65  return AVERROR(ENOMEM);
66 
67  for (i = 0; i < n; i++)
68  atomic_init(&s->entries[i], 0);
69  }
70  return 0;
71 }
72 
73 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
74  pthread_mutex_lock(&s->progress_mutex);
75  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
76  pthread_cond_signal(&s->progress_cond);
77  pthread_mutex_unlock(&s->progress_mutex);
78 }
79 
80 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
81  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
82  return;
83 
84  pthread_mutex_lock(&s->progress_mutex);
85  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
86  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
87  pthread_mutex_unlock(&s->progress_mutex);
88 }
89 #else
90 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
91 #endif
92 
94 {
95  av_freep(&td->b_base);
96  av_freep(&td->block_base);
97  av_freep(&td->block_structure);
98 }
99 
101 {
103  av_buffer_unref(&f->extradata);
104  ff_refstruct_unref(&f->hwaccel_picture_private);
105  f->segmentation_map = NULL;
106 }
107 
109 {
110  VP9Context *s = avctx->priv_data;
111  int ret, sz;
112 
114  if (ret < 0)
115  return ret;
116 
117  sz = 64 * s->sb_cols * s->sb_rows;
118  if (sz != s->frame_extradata_pool_size) {
119  av_buffer_pool_uninit(&s->frame_extradata_pool);
120  s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
121  if (!s->frame_extradata_pool) {
122  s->frame_extradata_pool_size = 0;
123  ret = AVERROR(ENOMEM);
124  goto fail;
125  }
126  s->frame_extradata_pool_size = sz;
127  }
128  f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
129  if (!f->extradata) {
130  ret = AVERROR(ENOMEM);
131  goto fail;
132  }
133  memset(f->extradata->data, 0, f->extradata->size);
134 
135  f->segmentation_map = f->extradata->data;
136  f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
137 
138  ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private);
139  if (ret < 0)
140  goto fail;
141 
142  return 0;
143 
144 fail:
146  return ret;
147 }
148 
149 static int vp9_frame_ref(VP9Frame *dst, VP9Frame *src)
150 {
151  int ret;
152 
153  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
154  if (ret < 0)
155  return ret;
156 
157  dst->extradata = av_buffer_ref(src->extradata);
158  if (!dst->extradata)
159  goto fail;
160 
161  dst->segmentation_map = src->segmentation_map;
162  dst->mv = src->mv;
163  dst->uses_2pass = src->uses_2pass;
164 
166  src->hwaccel_picture_private);
167 
168  return 0;
169 
170 fail:
171  vp9_frame_unref(dst);
172  return AVERROR(ENOMEM);
173 }
174 
175 static int update_size(AVCodecContext *avctx, int w, int h)
176 {
177 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
178  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
179  CONFIG_VP9_NVDEC_HWACCEL + \
180  CONFIG_VP9_VAAPI_HWACCEL + \
181  CONFIG_VP9_VDPAU_HWACCEL + \
182  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
183  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
184  VP9Context *s = avctx->priv_data;
185  uint8_t *p;
186  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
187  int lflvl_len, i;
188 
189  av_assert0(w > 0 && h > 0);
190 
191  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
192  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
193  return ret;
194 
195  switch (s->pix_fmt) {
196  case AV_PIX_FMT_YUV420P:
198 #if CONFIG_VP9_DXVA2_HWACCEL
199  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
200 #endif
201 #if CONFIG_VP9_D3D11VA_HWACCEL
202  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
203  *fmtp++ = AV_PIX_FMT_D3D11;
204 #endif
205 #if CONFIG_VP9_NVDEC_HWACCEL
206  *fmtp++ = AV_PIX_FMT_CUDA;
207 #endif
208 #if CONFIG_VP9_VAAPI_HWACCEL
209  *fmtp++ = AV_PIX_FMT_VAAPI;
210 #endif
211 #if CONFIG_VP9_VDPAU_HWACCEL
212  *fmtp++ = AV_PIX_FMT_VDPAU;
213 #endif
214 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
215  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
216 #endif
217  break;
219 #if CONFIG_VP9_NVDEC_HWACCEL
220  *fmtp++ = AV_PIX_FMT_CUDA;
221 #endif
222 #if CONFIG_VP9_VAAPI_HWACCEL
223  *fmtp++ = AV_PIX_FMT_VAAPI;
224 #endif
225 #if CONFIG_VP9_VDPAU_HWACCEL
226  *fmtp++ = AV_PIX_FMT_VDPAU;
227 #endif
228  break;
229  case AV_PIX_FMT_YUV444P:
232 #if CONFIG_VP9_VAAPI_HWACCEL
233  *fmtp++ = AV_PIX_FMT_VAAPI;
234 #endif
235  break;
236  case AV_PIX_FMT_GBRP:
237  case AV_PIX_FMT_GBRP10:
238  case AV_PIX_FMT_GBRP12:
239 #if CONFIG_VP9_VAAPI_HWACCEL
240  *fmtp++ = AV_PIX_FMT_VAAPI;
241 #endif
242  break;
243  }
244 
245  *fmtp++ = s->pix_fmt;
246  *fmtp = AV_PIX_FMT_NONE;
247 
248  ret = ff_get_format(avctx, pix_fmts);
249  if (ret < 0)
250  return ret;
251 
252  avctx->pix_fmt = ret;
253  s->gf_fmt = s->pix_fmt;
254  s->w = w;
255  s->h = h;
256  }
257 
258  cols = (w + 7) >> 3;
259  rows = (h + 7) >> 3;
260 
261  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
262  return 0;
263 
264  s->last_fmt = s->pix_fmt;
265  s->sb_cols = (w + 63) >> 6;
266  s->sb_rows = (h + 63) >> 6;
267  s->cols = (w + 7) >> 3;
268  s->rows = (h + 7) >> 3;
269  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
270 
271 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
272  av_freep(&s->intra_pred_data[0]);
273  // FIXME we slightly over-allocate here for subsampled chroma, but a little
274  // bit of padding shouldn't affect performance...
275  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
276  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
277  if (!p)
278  return AVERROR(ENOMEM);
279  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
280  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
281  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
282  assign(s->above_y_nnz_ctx, uint8_t *, 16);
283  assign(s->above_mode_ctx, uint8_t *, 16);
284  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
285  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
286  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
287  assign(s->above_partition_ctx, uint8_t *, 8);
288  assign(s->above_skip_ctx, uint8_t *, 8);
289  assign(s->above_txfm_ctx, uint8_t *, 8);
290  assign(s->above_segpred_ctx, uint8_t *, 8);
291  assign(s->above_intra_ctx, uint8_t *, 8);
292  assign(s->above_comp_ctx, uint8_t *, 8);
293  assign(s->above_ref_ctx, uint8_t *, 8);
294  assign(s->above_filter_ctx, uint8_t *, 8);
295  assign(s->lflvl, VP9Filter *, lflvl_len);
296 #undef assign
297 
298  if (s->td) {
299  for (i = 0; i < s->active_tile_cols; i++)
300  vp9_tile_data_free(&s->td[i]);
301  }
302 
303  if (s->s.h.bpp != s->last_bpp) {
304  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
305  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
306  s->last_bpp = s->s.h.bpp;
307  }
308 
309  return 0;
310 }
311 
313 {
314  int i;
315  VP9Context *s = avctx->priv_data;
316  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
317  VP9TileData *td = &s->td[0];
318 
319  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
320  return 0;
321 
323  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
324  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
325  if (s->s.frames[CUR_FRAME].uses_2pass) {
326  int sbs = s->sb_cols * s->sb_rows;
327 
328  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
329  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
330  16 * 16 + 2 * chroma_eobs) * sbs);
331  if (!td->b_base || !td->block_base)
332  return AVERROR(ENOMEM);
333  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
334  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
335  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
336  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
337  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
338 
340  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
341  if (!td->block_structure)
342  return AVERROR(ENOMEM);
343  }
344  } else {
345  for (i = 1; i < s->active_tile_cols; i++)
346  vp9_tile_data_free(&s->td[i]);
347 
348  for (i = 0; i < s->active_tile_cols; i++) {
349  s->td[i].b_base = av_malloc(sizeof(VP9Block));
350  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
351  16 * 16 + 2 * chroma_eobs);
352  if (!s->td[i].b_base || !s->td[i].block_base)
353  return AVERROR(ENOMEM);
354  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
355  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
356  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
357  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
358  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
359 
361  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
362  if (!s->td[i].block_structure)
363  return AVERROR(ENOMEM);
364  }
365  }
366  }
367  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
368 
369  return 0;
370 }
371 
372 // The sign bit is at the end, not the start, of a bit sequence
374 {
375  int v = get_bits(gb, n);
376  return get_bits1(gb) ? -v : v;
377 }
378 
379 static av_always_inline int inv_recenter_nonneg(int v, int m)
380 {
381  if (v > 2 * m)
382  return v;
383  if (v & 1)
384  return m - ((v + 1) >> 1);
385  return m + (v >> 1);
386 }
387 
388 // differential forward probability updates
389 static int update_prob(VPXRangeCoder *c, int p)
390 {
391  static const uint8_t inv_map_table[255] = {
392  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
393  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
394  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
395  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
396  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
397  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
398  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
399  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
400  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
401  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
402  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
403  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
404  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
405  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
406  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
407  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
408  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
409  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
410  252, 253, 253,
411  };
412  int d;
413 
414  /* This code is trying to do a differential probability update. For a
415  * current probability A in the range [1, 255], the difference to a new
416  * probability of any value can be expressed differentially as 1-A, 255-A
417  * where some part of this (absolute range) exists both in positive as
418  * well as the negative part, whereas another part only exists in one
419  * half. We're trying to code this shared part differentially, i.e.
420  * times two where the value of the lowest bit specifies the sign, and
421  * the single part is then coded on top of this. This absolute difference
422  * then again has a value of [0, 254], but a bigger value in this range
423  * indicates that we're further away from the original value A, so we
424  * can code this as a VLC code, since higher values are increasingly
425  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
426  * updates vs. the 'fine, exact' updates further down the range, which
427  * adds one extra dimension to this differential update model. */
428 
429  if (!vp89_rac_get(c)) {
430  d = vp89_rac_get_uint(c, 4) + 0;
431  } else if (!vp89_rac_get(c)) {
432  d = vp89_rac_get_uint(c, 4) + 16;
433  } else if (!vp89_rac_get(c)) {
434  d = vp89_rac_get_uint(c, 5) + 32;
435  } else {
436  d = vp89_rac_get_uint(c, 7);
437  if (d >= 65)
438  d = (d << 1) - 65 + vp89_rac_get(c);
439  d += 64;
440  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
441  }
442 
443  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
444  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
445 }
446 
448 {
449  static const enum AVColorSpace colorspaces[8] = {
452  };
453  VP9Context *s = avctx->priv_data;
454  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
455 
456  s->bpp_index = bits;
457  s->s.h.bpp = 8 + bits * 2;
458  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
459  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
460  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
461  static const enum AVPixelFormat pix_fmt_rgb[3] = {
463  };
464  s->ss_h = s->ss_v = 0;
465  avctx->color_range = AVCOL_RANGE_JPEG;
466  s->pix_fmt = pix_fmt_rgb[bits];
467  if (avctx->profile & 1) {
468  if (get_bits1(&s->gb)) {
469  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
470  return AVERROR_INVALIDDATA;
471  }
472  } else {
473  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
474  avctx->profile);
475  return AVERROR_INVALIDDATA;
476  }
477  } else {
478  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
485  };
487  if (avctx->profile & 1) {
488  s->ss_h = get_bits1(&s->gb);
489  s->ss_v = get_bits1(&s->gb);
490  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
491  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
492  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
493  avctx->profile);
494  return AVERROR_INVALIDDATA;
495  } else if (get_bits1(&s->gb)) {
496  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
497  avctx->profile);
498  return AVERROR_INVALIDDATA;
499  }
500  } else {
501  s->ss_h = s->ss_v = 1;
502  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
503  }
504  }
505 
506  return 0;
507 }
508 
510  const uint8_t *data, int size, int *ref)
511 {
512  VP9Context *s = avctx->priv_data;
513  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
514  int last_invisible;
515  const uint8_t *data2;
516 
517  /* general header */
518  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
519  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
520  return ret;
521  }
522  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
523  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
524  return AVERROR_INVALIDDATA;
525  }
526  avctx->profile = get_bits1(&s->gb);
527  avctx->profile |= get_bits1(&s->gb) << 1;
528  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
529  if (avctx->profile > 3) {
530  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
531  return AVERROR_INVALIDDATA;
532  }
533  s->s.h.profile = avctx->profile;
534  if (get_bits1(&s->gb)) {
535  *ref = get_bits(&s->gb, 3);
536  return 0;
537  }
538 
539  s->last_keyframe = s->s.h.keyframe;
540  s->s.h.keyframe = !get_bits1(&s->gb);
541 
542  last_invisible = s->s.h.invisible;
543  s->s.h.invisible = !get_bits1(&s->gb);
544  s->s.h.errorres = get_bits1(&s->gb);
545  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
546 
547  if (s->s.h.keyframe) {
548  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
549  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
550  return AVERROR_INVALIDDATA;
551  }
552  if ((ret = read_colorspace_details(avctx)) < 0)
553  return ret;
554  // for profile 1, here follows the subsampling bits
555  s->s.h.refreshrefmask = 0xff;
556  w = get_bits(&s->gb, 16) + 1;
557  h = get_bits(&s->gb, 16) + 1;
558  if (get_bits1(&s->gb)) // display size
559  skip_bits(&s->gb, 32);
560  } else {
561  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
562  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
563  if (s->s.h.intraonly) {
564  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
565  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
566  return AVERROR_INVALIDDATA;
567  }
568  if (avctx->profile >= 1) {
569  if ((ret = read_colorspace_details(avctx)) < 0)
570  return ret;
571  } else {
572  s->ss_h = s->ss_v = 1;
573  s->s.h.bpp = 8;
574  s->bpp_index = 0;
575  s->bytesperpixel = 1;
576  s->pix_fmt = AV_PIX_FMT_YUV420P;
577  avctx->colorspace = AVCOL_SPC_BT470BG;
578  avctx->color_range = AVCOL_RANGE_MPEG;
579  }
580  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
581  w = get_bits(&s->gb, 16) + 1;
582  h = get_bits(&s->gb, 16) + 1;
583  if (get_bits1(&s->gb)) // display size
584  skip_bits(&s->gb, 32);
585  } else {
586  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
587  s->s.h.refidx[0] = get_bits(&s->gb, 3);
588  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
589  s->s.h.refidx[1] = get_bits(&s->gb, 3);
590  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
591  s->s.h.refidx[2] = get_bits(&s->gb, 3);
592  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
593  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
594  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
595  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
596  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
597  return AVERROR_INVALIDDATA;
598  }
599  if (get_bits1(&s->gb)) {
600  w = s->s.refs[s->s.h.refidx[0]].f->width;
601  h = s->s.refs[s->s.h.refidx[0]].f->height;
602  } else if (get_bits1(&s->gb)) {
603  w = s->s.refs[s->s.h.refidx[1]].f->width;
604  h = s->s.refs[s->s.h.refidx[1]].f->height;
605  } else if (get_bits1(&s->gb)) {
606  w = s->s.refs[s->s.h.refidx[2]].f->width;
607  h = s->s.refs[s->s.h.refidx[2]].f->height;
608  } else {
609  w = get_bits(&s->gb, 16) + 1;
610  h = get_bits(&s->gb, 16) + 1;
611  }
612  // Note that in this code, "CUR_FRAME" is actually before we
613  // have formally allocated a frame, and thus actually represents
614  // the _last_ frame
615  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
616  s->s.frames[CUR_FRAME].tf.f->height == h;
617  if (get_bits1(&s->gb)) // display size
618  skip_bits(&s->gb, 32);
619  s->s.h.highprecisionmvs = get_bits1(&s->gb);
620  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
621  get_bits(&s->gb, 2);
622  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
623  s->s.h.signbias[0] != s->s.h.signbias[2];
624  if (s->s.h.allowcompinter) {
625  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
626  s->s.h.fixcompref = 2;
627  s->s.h.varcompref[0] = 0;
628  s->s.h.varcompref[1] = 1;
629  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
630  s->s.h.fixcompref = 1;
631  s->s.h.varcompref[0] = 0;
632  s->s.h.varcompref[1] = 2;
633  } else {
634  s->s.h.fixcompref = 0;
635  s->s.h.varcompref[0] = 1;
636  s->s.h.varcompref[1] = 2;
637  }
638  }
639  }
640  }
641  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
642  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
643  s->s.h.framectxid = c = get_bits(&s->gb, 2);
644  if (s->s.h.keyframe || s->s.h.intraonly)
645  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
646 
647  /* loopfilter header data */
648  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
649  // reset loopfilter defaults
650  s->s.h.lf_delta.ref[0] = 1;
651  s->s.h.lf_delta.ref[1] = 0;
652  s->s.h.lf_delta.ref[2] = -1;
653  s->s.h.lf_delta.ref[3] = -1;
654  s->s.h.lf_delta.mode[0] = 0;
655  s->s.h.lf_delta.mode[1] = 0;
656  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
657  }
658  s->s.h.filter.level = get_bits(&s->gb, 6);
659  sharp = get_bits(&s->gb, 3);
660  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
661  // the old cache values since they are still valid
662  if (s->s.h.filter.sharpness != sharp) {
663  for (i = 1; i <= 63; i++) {
664  int limit = i;
665 
666  if (sharp > 0) {
667  limit >>= (sharp + 3) >> 2;
668  limit = FFMIN(limit, 9 - sharp);
669  }
670  limit = FFMAX(limit, 1);
671 
672  s->filter_lut.lim_lut[i] = limit;
673  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
674  }
675  }
676  s->s.h.filter.sharpness = sharp;
677  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
678  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
679  for (i = 0; i < 4; i++)
680  if (get_bits1(&s->gb))
681  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
682  for (i = 0; i < 2; i++)
683  if (get_bits1(&s->gb))
684  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
685  }
686  }
687 
688  /* quantization header data */
689  s->s.h.yac_qi = get_bits(&s->gb, 8);
690  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
691  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
692  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
693  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
694  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
695  if (s->s.h.lossless)
697 
698  /* segmentation header info */
699  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
700  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
701  for (i = 0; i < 7; i++)
702  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
703  get_bits(&s->gb, 8) : 255;
704  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
705  for (i = 0; i < 3; i++)
706  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
707  get_bits(&s->gb, 8) : 255;
708  }
709 
710  if (get_bits1(&s->gb)) {
711  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
712  for (i = 0; i < 8; i++) {
713  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
714  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
715  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
716  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
717  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
718  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
719  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
720  }
721  }
722  }
723 
724  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
725  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
726  int qyac, qydc, quvac, quvdc, lflvl, sh;
727 
728  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
729  if (s->s.h.segmentation.absolute_vals)
730  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
731  else
732  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
733  } else {
734  qyac = s->s.h.yac_qi;
735  }
736  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
737  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
738  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
739  qyac = av_clip_uintp2(qyac, 8);
740 
741  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
742  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
743  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
744  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
745 
746  sh = s->s.h.filter.level >= 32;
747  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
748  if (s->s.h.segmentation.absolute_vals)
749  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
750  else
751  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
752  } else {
753  lflvl = s->s.h.filter.level;
754  }
755  if (s->s.h.lf_delta.enabled) {
756  s->s.h.segmentation.feat[i].lflvl[0][0] =
757  s->s.h.segmentation.feat[i].lflvl[0][1] =
758  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
759  for (j = 1; j < 4; j++) {
760  s->s.h.segmentation.feat[i].lflvl[j][0] =
761  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
762  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
763  s->s.h.segmentation.feat[i].lflvl[j][1] =
764  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
765  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
766  }
767  } else {
768  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
769  sizeof(s->s.h.segmentation.feat[i].lflvl));
770  }
771  }
772 
773  /* tiling info */
774  if ((ret = update_size(avctx, w, h)) < 0) {
775  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
776  w, h, s->pix_fmt);
777  return ret;
778  }
779  for (s->s.h.tiling.log2_tile_cols = 0;
780  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
781  s->s.h.tiling.log2_tile_cols++) ;
782  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
783  max = FFMAX(0, max - 1);
784  while (max > s->s.h.tiling.log2_tile_cols) {
785  if (get_bits1(&s->gb))
786  s->s.h.tiling.log2_tile_cols++;
787  else
788  break;
789  }
790  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
791  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
792  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
793  int n_range_coders;
794  VPXRangeCoder *rc;
795 
796  if (s->td) {
797  for (i = 0; i < s->active_tile_cols; i++)
798  vp9_tile_data_free(&s->td[i]);
799  av_freep(&s->td);
800  }
801 
802  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
803  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
804  s->s.h.tiling.tile_cols : 1;
805  vp9_alloc_entries(avctx, s->sb_rows);
806  if (avctx->active_thread_type == FF_THREAD_SLICE) {
807  n_range_coders = 4; // max_tile_rows
808  } else {
809  n_range_coders = s->s.h.tiling.tile_cols;
810  }
811  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
812  n_range_coders * sizeof(VPXRangeCoder));
813  if (!s->td)
814  return AVERROR(ENOMEM);
815  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
816  for (i = 0; i < s->active_tile_cols; i++) {
817  s->td[i].s = s;
818  s->td[i].c_b = rc;
819  rc += n_range_coders;
820  }
821  }
822 
823  /* check reference frames */
824  if (!s->s.h.keyframe && !s->s.h.intraonly) {
825  int valid_ref_frame = 0;
826  for (i = 0; i < 3; i++) {
827  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
828  int refw = ref->width, refh = ref->height;
829 
830  if (ref->format != avctx->pix_fmt) {
831  av_log(avctx, AV_LOG_ERROR,
832  "Ref pixfmt (%s) did not match current frame (%s)",
833  av_get_pix_fmt_name(ref->format),
834  av_get_pix_fmt_name(avctx->pix_fmt));
835  return AVERROR_INVALIDDATA;
836  } else if (refw == w && refh == h) {
837  s->mvscale[i][0] = s->mvscale[i][1] = 0;
838  } else {
839  /* Check to make sure at least one of frames that */
840  /* this frame references has valid dimensions */
841  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
842  av_log(avctx, AV_LOG_WARNING,
843  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
844  refw, refh, w, h);
845  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
846  continue;
847  }
848  s->mvscale[i][0] = (refw << 14) / w;
849  s->mvscale[i][1] = (refh << 14) / h;
850  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
851  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
852  }
853  valid_ref_frame++;
854  }
855  if (!valid_ref_frame) {
856  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
857  return AVERROR_INVALIDDATA;
858  }
859  }
860 
861  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
862  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
863  s->prob_ctx[3].p = ff_vp9_default_probs;
864  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
865  sizeof(ff_vp9_default_coef_probs));
866  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
867  sizeof(ff_vp9_default_coef_probs));
868  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
869  sizeof(ff_vp9_default_coef_probs));
870  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
871  sizeof(ff_vp9_default_coef_probs));
872  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
873  s->prob_ctx[c].p = ff_vp9_default_probs;
874  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
875  sizeof(ff_vp9_default_coef_probs));
876  }
877 
878  // next 16 bits is size of the rest of the header (arith-coded)
879  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
880  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
881 
882  data2 = align_get_bits(&s->gb);
883  if (size2 > size - (data2 - data)) {
884  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
885  return AVERROR_INVALIDDATA;
886  }
887  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
888  if (ret < 0)
889  return ret;
890 
891  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
892  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
893  return AVERROR_INVALIDDATA;
894  }
895 
896  for (i = 0; i < s->active_tile_cols; i++) {
897  if (s->s.h.keyframe || s->s.h.intraonly) {
898  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
899  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
900  } else {
901  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
902  }
903  s->td[i].nb_block_structure = 0;
904  }
905 
906  /* FIXME is it faster to not copy here, but do it down in the fw updates
907  * as explicit copies if the fw update is missing (and skip the copy upon
908  * fw update)? */
909  s->prob.p = s->prob_ctx[c].p;
910 
911  // txfm updates
912  if (s->s.h.lossless) {
913  s->s.h.txfmmode = TX_4X4;
914  } else {
915  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
916  if (s->s.h.txfmmode == 3)
917  s->s.h.txfmmode += vp89_rac_get(&s->c);
918 
919  if (s->s.h.txfmmode == TX_SWITCHABLE) {
920  for (i = 0; i < 2; i++)
921  if (vpx_rac_get_prob_branchy(&s->c, 252))
922  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
923  for (i = 0; i < 2; i++)
924  for (j = 0; j < 2; j++)
925  if (vpx_rac_get_prob_branchy(&s->c, 252))
926  s->prob.p.tx16p[i][j] =
927  update_prob(&s->c, s->prob.p.tx16p[i][j]);
928  for (i = 0; i < 2; i++)
929  for (j = 0; j < 3; j++)
930  if (vpx_rac_get_prob_branchy(&s->c, 252))
931  s->prob.p.tx32p[i][j] =
932  update_prob(&s->c, s->prob.p.tx32p[i][j]);
933  }
934  }
935 
936  // coef updates
937  for (i = 0; i < 4; i++) {
938  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
939  if (vp89_rac_get(&s->c)) {
940  for (j = 0; j < 2; j++)
941  for (k = 0; k < 2; k++)
942  for (l = 0; l < 6; l++)
943  for (m = 0; m < 6; m++) {
944  uint8_t *p = s->prob.coef[i][j][k][l][m];
945  uint8_t *r = ref[j][k][l][m];
946  if (m >= 3 && l == 0) // dc only has 3 pt
947  break;
948  for (n = 0; n < 3; n++) {
949  if (vpx_rac_get_prob_branchy(&s->c, 252))
950  p[n] = update_prob(&s->c, r[n]);
951  else
952  p[n] = r[n];
953  }
954  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
955  }
956  } else {
957  for (j = 0; j < 2; j++)
958  for (k = 0; k < 2; k++)
959  for (l = 0; l < 6; l++)
960  for (m = 0; m < 6; m++) {
961  uint8_t *p = s->prob.coef[i][j][k][l][m];
962  uint8_t *r = ref[j][k][l][m];
963  if (m > 3 && l == 0) // dc only has 3 pt
964  break;
965  memcpy(p, r, 3);
966  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
967  }
968  }
969  if (s->s.h.txfmmode == i)
970  break;
971  }
972 
973  // mode updates
974  for (i = 0; i < 3; i++)
975  if (vpx_rac_get_prob_branchy(&s->c, 252))
976  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
977  if (!s->s.h.keyframe && !s->s.h.intraonly) {
978  for (i = 0; i < 7; i++)
979  for (j = 0; j < 3; j++)
980  if (vpx_rac_get_prob_branchy(&s->c, 252))
981  s->prob.p.mv_mode[i][j] =
982  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
983 
984  if (s->s.h.filtermode == FILTER_SWITCHABLE)
985  for (i = 0; i < 4; i++)
986  for (j = 0; j < 2; j++)
987  if (vpx_rac_get_prob_branchy(&s->c, 252))
988  s->prob.p.filter[i][j] =
989  update_prob(&s->c, s->prob.p.filter[i][j]);
990 
991  for (i = 0; i < 4; i++)
992  if (vpx_rac_get_prob_branchy(&s->c, 252))
993  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
994 
995  if (s->s.h.allowcompinter) {
996  s->s.h.comppredmode = vp89_rac_get(&s->c);
997  if (s->s.h.comppredmode)
998  s->s.h.comppredmode += vp89_rac_get(&s->c);
999  if (s->s.h.comppredmode == PRED_SWITCHABLE)
1000  for (i = 0; i < 5; i++)
1001  if (vpx_rac_get_prob_branchy(&s->c, 252))
1002  s->prob.p.comp[i] =
1003  update_prob(&s->c, s->prob.p.comp[i]);
1004  } else {
1005  s->s.h.comppredmode = PRED_SINGLEREF;
1006  }
1007 
1008  if (s->s.h.comppredmode != PRED_COMPREF) {
1009  for (i = 0; i < 5; i++) {
1010  if (vpx_rac_get_prob_branchy(&s->c, 252))
1011  s->prob.p.single_ref[i][0] =
1012  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1013  if (vpx_rac_get_prob_branchy(&s->c, 252))
1014  s->prob.p.single_ref[i][1] =
1015  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1016  }
1017  }
1018 
1019  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1020  for (i = 0; i < 5; i++)
1021  if (vpx_rac_get_prob_branchy(&s->c, 252))
1022  s->prob.p.comp_ref[i] =
1023  update_prob(&s->c, s->prob.p.comp_ref[i]);
1024  }
1025 
1026  for (i = 0; i < 4; i++)
1027  for (j = 0; j < 9; j++)
1028  if (vpx_rac_get_prob_branchy(&s->c, 252))
1029  s->prob.p.y_mode[i][j] =
1030  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1031 
1032  for (i = 0; i < 4; i++)
1033  for (j = 0; j < 4; j++)
1034  for (k = 0; k < 3; k++)
1035  if (vpx_rac_get_prob_branchy(&s->c, 252))
1036  s->prob.p.partition[3 - i][j][k] =
1037  update_prob(&s->c,
1038  s->prob.p.partition[3 - i][j][k]);
1039 
1040  // mv fields don't use the update_prob subexp model for some reason
1041  for (i = 0; i < 3; i++)
1042  if (vpx_rac_get_prob_branchy(&s->c, 252))
1043  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1044 
1045  for (i = 0; i < 2; i++) {
1046  if (vpx_rac_get_prob_branchy(&s->c, 252))
1047  s->prob.p.mv_comp[i].sign =
1048  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1049 
1050  for (j = 0; j < 10; j++)
1051  if (vpx_rac_get_prob_branchy(&s->c, 252))
1052  s->prob.p.mv_comp[i].classes[j] =
1053  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1054 
1055  if (vpx_rac_get_prob_branchy(&s->c, 252))
1056  s->prob.p.mv_comp[i].class0 =
1057  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1058 
1059  for (j = 0; j < 10; j++)
1060  if (vpx_rac_get_prob_branchy(&s->c, 252))
1061  s->prob.p.mv_comp[i].bits[j] =
1062  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1063  }
1064 
1065  for (i = 0; i < 2; i++) {
1066  for (j = 0; j < 2; j++)
1067  for (k = 0; k < 3; k++)
1068  if (vpx_rac_get_prob_branchy(&s->c, 252))
1069  s->prob.p.mv_comp[i].class0_fp[j][k] =
1070  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1071 
1072  for (j = 0; j < 3; j++)
1073  if (vpx_rac_get_prob_branchy(&s->c, 252))
1074  s->prob.p.mv_comp[i].fp[j] =
1075  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1076  }
1077 
1078  if (s->s.h.highprecisionmvs) {
1079  for (i = 0; i < 2; i++) {
1080  if (vpx_rac_get_prob_branchy(&s->c, 252))
1081  s->prob.p.mv_comp[i].class0_hp =
1082  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1083 
1084  if (vpx_rac_get_prob_branchy(&s->c, 252))
1085  s->prob.p.mv_comp[i].hp =
1086  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1087  }
1088  }
1089  }
1090 
1091  return (data2 - data) + size2;
1092 }
1093 
1094 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1095  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1096 {
1097  const VP9Context *s = td->s;
1098  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1099  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1100  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1101  s->prob.p.partition[bl][c];
1102  enum BlockPartition bp;
1103  ptrdiff_t hbs = 4 >> bl;
1104  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1105  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1106  int bytesperpixel = s->bytesperpixel;
1107 
1108  if (bl == BL_8X8) {
1110  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1111  } else if (col + hbs < s->cols) { // FIXME why not <=?
1112  if (row + hbs < s->rows) { // FIXME why not <=?
1114  switch (bp) {
1115  case PARTITION_NONE:
1116  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1117  break;
1118  case PARTITION_H:
1119  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1120  yoff += hbs * 8 * y_stride;
1121  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1122  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1123  break;
1124  case PARTITION_V:
1125  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1126  yoff += hbs * 8 * bytesperpixel;
1127  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1128  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1129  break;
1130  case PARTITION_SPLIT:
1131  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1132  decode_sb(td, row, col + hbs, lflvl,
1133  yoff + 8 * hbs * bytesperpixel,
1134  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1135  yoff += hbs * 8 * y_stride;
1136  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1137  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1138  decode_sb(td, row + hbs, col + hbs, lflvl,
1139  yoff + 8 * hbs * bytesperpixel,
1140  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1141  break;
1142  default:
1143  av_assert0(0);
1144  }
1145  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1146  bp = PARTITION_SPLIT;
1147  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1148  decode_sb(td, row, col + hbs, lflvl,
1149  yoff + 8 * hbs * bytesperpixel,
1150  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1151  } else {
1152  bp = PARTITION_H;
1153  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1154  }
1155  } else if (row + hbs < s->rows) { // FIXME why not <=?
1156  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1157  bp = PARTITION_SPLIT;
1158  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1159  yoff += hbs * 8 * y_stride;
1160  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1161  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1162  } else {
1163  bp = PARTITION_V;
1164  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1165  }
1166  } else {
1167  bp = PARTITION_SPLIT;
1168  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1169  }
1170  td->counts.partition[bl][c][bp]++;
1171 }
1172 
1173 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1174  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1175 {
1176  const VP9Context *s = td->s;
1177  VP9Block *b = td->b;
1178  ptrdiff_t hbs = 4 >> bl;
1179  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1180  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1181  int bytesperpixel = s->bytesperpixel;
1182 
1183  if (bl == BL_8X8) {
1184  av_assert2(b->bl == BL_8X8);
1185  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1186  } else if (td->b->bl == bl) {
1187  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1188  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1189  yoff += hbs * 8 * y_stride;
1190  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1191  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1192  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1193  yoff += hbs * 8 * bytesperpixel;
1194  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1195  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1196  }
1197  } else {
1198  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1199  if (col + hbs < s->cols) { // FIXME why not <=?
1200  if (row + hbs < s->rows) {
1201  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1202  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1203  yoff += hbs * 8 * y_stride;
1204  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1205  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1206  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1207  yoff + 8 * hbs * bytesperpixel,
1208  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1209  } else {
1210  yoff += hbs * 8 * bytesperpixel;
1211  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1212  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1213  }
1214  } else if (row + hbs < s->rows) {
1215  yoff += hbs * 8 * y_stride;
1216  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1217  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1218  }
1219  }
1220 }
1221 
1222 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1223 {
1224  int sb_start = ( idx * n) >> log2_n;
1225  int sb_end = ((idx + 1) * n) >> log2_n;
1226  *start = FFMIN(sb_start, n) << 3;
1227  *end = FFMIN(sb_end, n) << 3;
1228 }
1229 
1231 {
1232  int i;
1233 
1234  av_freep(&s->intra_pred_data[0]);
1235  for (i = 0; i < s->active_tile_cols; i++)
1236  vp9_tile_data_free(&s->td[i]);
1237 }
1238 
1240 {
1241  VP9Context *s = avctx->priv_data;
1242  int i;
1243 
1244  for (i = 0; i < 3; i++) {
1245  vp9_frame_unref(&s->s.frames[i]);
1246  av_frame_free(&s->s.frames[i].tf.f);
1247  }
1248  av_buffer_pool_uninit(&s->frame_extradata_pool);
1249  for (i = 0; i < 8; i++) {
1250  ff_thread_release_ext_buffer(&s->s.refs[i]);
1251  av_frame_free(&s->s.refs[i].f);
1252  ff_thread_release_ext_buffer(&s->next_refs[i]);
1253  av_frame_free(&s->next_refs[i].f);
1254  }
1255 
1256  free_buffers(s);
1257 #if HAVE_THREADS
1258  av_freep(&s->entries);
1259  ff_pthread_free(s, vp9_context_offsets);
1260 #endif
1261  av_freep(&s->td);
1262  return 0;
1263 }
1264 
1265 static int decode_tiles(AVCodecContext *avctx,
1266  const uint8_t *data, int size)
1267 {
1268  VP9Context *s = avctx->priv_data;
1269  VP9TileData *td = &s->td[0];
1270  int row, col, tile_row, tile_col, ret;
1271  int bytesperpixel;
1272  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1273  AVFrame *f;
1274  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1275 
1276  f = s->s.frames[CUR_FRAME].tf.f;
1277  ls_y = f->linesize[0];
1278  ls_uv =f->linesize[1];
1279  bytesperpixel = s->bytesperpixel;
1280 
1281  yoff = uvoff = 0;
1282  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1283  set_tile_offset(&tile_row_start, &tile_row_end,
1284  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1285 
1286  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1287  int64_t tile_size;
1288 
1289  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1290  tile_row == s->s.h.tiling.tile_rows - 1) {
1291  tile_size = size;
1292  } else {
1293  tile_size = AV_RB32(data);
1294  data += 4;
1295  size -= 4;
1296  }
1297  if (tile_size > size)
1298  return AVERROR_INVALIDDATA;
1299  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1300  if (ret < 0)
1301  return ret;
1302  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1303  return AVERROR_INVALIDDATA;
1304  data += tile_size;
1305  size -= tile_size;
1306  }
1307 
1308  for (row = tile_row_start; row < tile_row_end;
1309  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1310  VP9Filter *lflvl_ptr = s->lflvl;
1311  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1312 
1313  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1314  set_tile_offset(&tile_col_start, &tile_col_end,
1315  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1316  td->tile_col_start = tile_col_start;
1317  if (s->pass != 2) {
1318  memset(td->left_partition_ctx, 0, 8);
1319  memset(td->left_skip_ctx, 0, 8);
1320  if (s->s.h.keyframe || s->s.h.intraonly) {
1321  memset(td->left_mode_ctx, DC_PRED, 16);
1322  } else {
1323  memset(td->left_mode_ctx, NEARESTMV, 8);
1324  }
1325  memset(td->left_y_nnz_ctx, 0, 16);
1326  memset(td->left_uv_nnz_ctx, 0, 32);
1327  memset(td->left_segpred_ctx, 0, 8);
1328 
1329  td->c = &td->c_b[tile_col];
1330  }
1331 
1332  for (col = tile_col_start;
1333  col < tile_col_end;
1334  col += 8, yoff2 += 64 * bytesperpixel,
1335  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1336  // FIXME integrate with lf code (i.e. zero after each
1337  // use, similar to invtxfm coefficients, or similar)
1338  if (s->pass != 1) {
1339  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1340  }
1341 
1342  if (s->pass == 2) {
1343  decode_sb_mem(td, row, col, lflvl_ptr,
1344  yoff2, uvoff2, BL_64X64);
1345  } else {
1346  if (vpx_rac_is_end(td->c)) {
1347  return AVERROR_INVALIDDATA;
1348  }
1349  decode_sb(td, row, col, lflvl_ptr,
1350  yoff2, uvoff2, BL_64X64);
1351  }
1352  }
1353  }
1354 
1355  if (s->pass == 1)
1356  continue;
1357 
1358  // backup pre-loopfilter reconstruction data for intra
1359  // prediction of next row of sb64s
1360  if (row + 8 < s->rows) {
1361  memcpy(s->intra_pred_data[0],
1362  f->data[0] + yoff + 63 * ls_y,
1363  8 * s->cols * bytesperpixel);
1364  memcpy(s->intra_pred_data[1],
1365  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1366  8 * s->cols * bytesperpixel >> s->ss_h);
1367  memcpy(s->intra_pred_data[2],
1368  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1369  8 * s->cols * bytesperpixel >> s->ss_h);
1370  }
1371 
1372  // loopfilter one row
1373  if (s->s.h.filter.level) {
1374  yoff2 = yoff;
1375  uvoff2 = uvoff;
1376  lflvl_ptr = s->lflvl;
1377  for (col = 0; col < s->cols;
1378  col += 8, yoff2 += 64 * bytesperpixel,
1379  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1380  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1381  yoff2, uvoff2);
1382  }
1383  }
1384 
1385  // FIXME maybe we can make this more finegrained by running the
1386  // loopfilter per-block instead of after each sbrow
1387  // In fact that would also make intra pred left preparation easier?
1388  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1389  }
1390  }
1391  return 0;
1392 }
1393 
1394 #if HAVE_THREADS
1395 static av_always_inline
1396 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1397  int threadnr)
1398 {
1399  VP9Context *s = avctx->priv_data;
1400  VP9TileData *td = &s->td[jobnr];
1401  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1402  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1403  unsigned tile_cols_len;
1404  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1405  VP9Filter *lflvl_ptr_base;
1406  AVFrame *f;
1407 
1408  f = s->s.frames[CUR_FRAME].tf.f;
1409  ls_y = f->linesize[0];
1410  ls_uv =f->linesize[1];
1411 
1412  set_tile_offset(&tile_col_start, &tile_col_end,
1413  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1414  td->tile_col_start = tile_col_start;
1415  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1416  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1417  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1418 
1419  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1420  set_tile_offset(&tile_row_start, &tile_row_end,
1421  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1422 
1423  td->c = &td->c_b[tile_row];
1424  for (row = tile_row_start; row < tile_row_end;
1425  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1426  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1427  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1428 
1429  memset(td->left_partition_ctx, 0, 8);
1430  memset(td->left_skip_ctx, 0, 8);
1431  if (s->s.h.keyframe || s->s.h.intraonly) {
1432  memset(td->left_mode_ctx, DC_PRED, 16);
1433  } else {
1434  memset(td->left_mode_ctx, NEARESTMV, 8);
1435  }
1436  memset(td->left_y_nnz_ctx, 0, 16);
1437  memset(td->left_uv_nnz_ctx, 0, 32);
1438  memset(td->left_segpred_ctx, 0, 8);
1439 
1440  for (col = tile_col_start;
1441  col < tile_col_end;
1442  col += 8, yoff2 += 64 * bytesperpixel,
1443  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1444  // FIXME integrate with lf code (i.e. zero after each
1445  // use, similar to invtxfm coefficients, or similar)
1446  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1447  decode_sb(td, row, col, lflvl_ptr,
1448  yoff2, uvoff2, BL_64X64);
1449  }
1450 
1451  // backup pre-loopfilter reconstruction data for intra
1452  // prediction of next row of sb64s
1453  tile_cols_len = tile_col_end - tile_col_start;
1454  if (row + 8 < s->rows) {
1455  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1456  f->data[0] + yoff + 63 * ls_y,
1457  8 * tile_cols_len * bytesperpixel);
1458  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1459  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1460  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1461  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1462  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1463  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1464  }
1465 
1466  vp9_report_tile_progress(s, row >> 3, 1);
1467  }
1468  }
1469  return 0;
1470 }
1471 
1472 static av_always_inline
1473 int loopfilter_proc(AVCodecContext *avctx)
1474 {
1475  VP9Context *s = avctx->priv_data;
1476  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1477  VP9Filter *lflvl_ptr;
1478  int bytesperpixel = s->bytesperpixel, col, i;
1479  AVFrame *f;
1480 
1481  f = s->s.frames[CUR_FRAME].tf.f;
1482  ls_y = f->linesize[0];
1483  ls_uv =f->linesize[1];
1484 
1485  for (i = 0; i < s->sb_rows; i++) {
1486  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1487 
1488  if (s->s.h.filter.level) {
1489  yoff = (ls_y * 64)*i;
1490  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1491  lflvl_ptr = s->lflvl+s->sb_cols*i;
1492  for (col = 0; col < s->cols;
1493  col += 8, yoff += 64 * bytesperpixel,
1494  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1495  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1496  yoff, uvoff);
1497  }
1498  }
1499  }
1500  return 0;
1501 }
1502 #endif
1503 
1505 {
1506  AVVideoEncParams *par;
1507  unsigned int tile, nb_blocks = 0;
1508 
1509  if (s->s.h.segmentation.enabled) {
1510  for (tile = 0; tile < s->active_tile_cols; tile++)
1511  nb_blocks += s->td[tile].nb_block_structure;
1512  }
1513 
1515  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1516  if (!par)
1517  return AVERROR(ENOMEM);
1518 
1519  par->qp = s->s.h.yac_qi;
1520  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1521  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1522  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1523  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1524  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1525 
1526  if (nb_blocks) {
1527  unsigned int block = 0;
1528  unsigned int tile, block_tile;
1529 
1530  for (tile = 0; tile < s->active_tile_cols; tile++) {
1531  VP9TileData *td = &s->td[tile];
1532 
1533  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1535  unsigned int row = td->block_structure[block_tile].row;
1536  unsigned int col = td->block_structure[block_tile].col;
1537  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1538 
1539  b->src_x = col * 8;
1540  b->src_y = row * 8;
1541  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1542  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1543 
1544  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1545  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1546  if (s->s.h.segmentation.absolute_vals)
1547  b->delta_qp -= par->qp;
1548  }
1549  }
1550  }
1551  }
1552 
1553  return 0;
1554 }
1555 
1557  int *got_frame, AVPacket *pkt)
1558 {
1559  const uint8_t *data = pkt->data;
1560  int size = pkt->size;
1561  VP9Context *s = avctx->priv_data;
1562  int ret, i, j, ref;
1563  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1564  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1565  AVFrame *f;
1566 
1567  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1568  return ret;
1569  } else if (ret == 0) {
1570  if (!s->s.refs[ref].f->buf[0]) {
1571  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1572  return AVERROR_INVALIDDATA;
1573  }
1574  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1575  return ret;
1576  frame->pts = pkt->pts;
1577  frame->pkt_dts = pkt->dts;
1578  for (i = 0; i < 8; i++) {
1579  if (s->next_refs[i].f->buf[0])
1580  ff_thread_release_ext_buffer(&s->next_refs[i]);
1581  if (s->s.refs[i].f->buf[0] &&
1582  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1583  return ret;
1584  }
1585  *got_frame = 1;
1586  return pkt->size;
1587  }
1588  data += ret;
1589  size -= ret;
1590 
1591  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1592  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1593  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1594  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1595  (ret = vp9_frame_ref(&s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1596  return ret;
1597  }
1598  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1599  vp9_frame_unref(&s->s.frames[REF_FRAME_MVPAIR]);
1600  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1601  (ret = vp9_frame_ref(&s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1602  return ret;
1603  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1604  vp9_frame_unref(&s->s.frames[CUR_FRAME]);
1605  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1606  return ret;
1607  f = s->s.frames[CUR_FRAME].tf.f;
1608  if (s->s.h.keyframe)
1609  f->flags |= AV_FRAME_FLAG_KEY;
1610  else
1611  f->flags &= ~AV_FRAME_FLAG_KEY;
1612  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1613 
1614  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1615  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1616  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1617  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1618  }
1619 
1620  // ref frame setup
1621  for (i = 0; i < 8; i++) {
1622  if (s->next_refs[i].f->buf[0])
1623  ff_thread_release_ext_buffer(&s->next_refs[i]);
1624  if (s->s.h.refreshrefmask & (1 << i)) {
1625  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1626  } else if (s->s.refs[i].f->buf[0]) {
1627  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1628  }
1629  if (ret < 0)
1630  return ret;
1631  }
1632 
1633  if (avctx->hwaccel) {
1634  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1635  ret = hwaccel->start_frame(avctx, NULL, 0);
1636  if (ret < 0)
1637  return ret;
1638  ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1639  if (ret < 0)
1640  return ret;
1641  ret = hwaccel->end_frame(avctx);
1642  if (ret < 0)
1643  return ret;
1644  goto finish;
1645  }
1646 
1647  // main tile decode loop
1648  memset(s->above_partition_ctx, 0, s->cols);
1649  memset(s->above_skip_ctx, 0, s->cols);
1650  if (s->s.h.keyframe || s->s.h.intraonly) {
1651  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1652  } else {
1653  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1654  }
1655  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1656  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1657  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1658  memset(s->above_segpred_ctx, 0, s->cols);
1659  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1660  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1661  if ((ret = update_block_buffers(avctx)) < 0) {
1662  av_log(avctx, AV_LOG_ERROR,
1663  "Failed to allocate block buffers\n");
1664  return ret;
1665  }
1666  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1667  int j, k, l, m;
1668 
1669  for (i = 0; i < 4; i++) {
1670  for (j = 0; j < 2; j++)
1671  for (k = 0; k < 2; k++)
1672  for (l = 0; l < 6; l++)
1673  for (m = 0; m < 6; m++)
1674  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1675  s->prob.coef[i][j][k][l][m], 3);
1676  if (s->s.h.txfmmode == i)
1677  break;
1678  }
1679  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1680  ff_thread_finish_setup(avctx);
1681  } else if (!s->s.h.refreshctx) {
1682  ff_thread_finish_setup(avctx);
1683  }
1684 
1685 #if HAVE_THREADS
1686  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1687  for (i = 0; i < s->sb_rows; i++)
1688  atomic_store(&s->entries[i], 0);
1689  }
1690 #endif
1691 
1692  do {
1693  for (i = 0; i < s->active_tile_cols; i++) {
1694  s->td[i].b = s->td[i].b_base;
1695  s->td[i].block = s->td[i].block_base;
1696  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1697  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1698  s->td[i].eob = s->td[i].eob_base;
1699  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1700  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1701  s->td[i].error_info = 0;
1702  }
1703 
1704 #if HAVE_THREADS
1705  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1706  int tile_row, tile_col;
1707 
1708  av_assert1(!s->pass);
1709 
1710  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1711  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1712  int64_t tile_size;
1713 
1714  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1715  tile_row == s->s.h.tiling.tile_rows - 1) {
1716  tile_size = size;
1717  } else {
1718  tile_size = AV_RB32(data);
1719  data += 4;
1720  size -= 4;
1721  }
1722  if (tile_size > size)
1723  return AVERROR_INVALIDDATA;
1724  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1725  if (ret < 0)
1726  return ret;
1727  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1728  return AVERROR_INVALIDDATA;
1729  data += tile_size;
1730  size -= tile_size;
1731  }
1732  }
1733 
1734  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1735  } else
1736 #endif
1737  {
1738  ret = decode_tiles(avctx, data, size);
1739  if (ret < 0) {
1740  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1741  return ret;
1742  }
1743  }
1744 
1745  // Sum all counts fields into td[0].counts for tile threading
1746  if (avctx->active_thread_type == FF_THREAD_SLICE)
1747  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1748  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1749  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1750 
1751  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1753  ff_thread_finish_setup(avctx);
1754  }
1755  } while (s->pass++ == 1);
1756  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1757 
1758  if (s->td->error_info < 0) {
1759  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1760  s->td->error_info = 0;
1761  return AVERROR_INVALIDDATA;
1762  }
1764  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1765  if (ret < 0)
1766  return ret;
1767  }
1768 
1769 finish:
1770  // ref frame setup
1771  for (i = 0; i < 8; i++) {
1772  if (s->s.refs[i].f->buf[0])
1773  ff_thread_release_ext_buffer(&s->s.refs[i]);
1774  if (s->next_refs[i].f->buf[0] &&
1775  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1776  return ret;
1777  }
1778 
1779  if (!s->s.h.invisible) {
1780  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1781  return ret;
1782  *got_frame = 1;
1783  }
1784 
1785  return pkt->size;
1786 }
1787 
1789 {
1790  VP9Context *s = avctx->priv_data;
1791  int i;
1792 
1793  for (i = 0; i < 3; i++)
1794  vp9_frame_unref(&s->s.frames[i]);
1795  for (i = 0; i < 8; i++)
1796  ff_thread_release_ext_buffer(&s->s.refs[i]);
1797 
1798  if (FF_HW_HAS_CB(avctx, flush))
1799  FF_HW_SIMPLE_CALL(avctx, flush);
1800 }
1801 
1803 {
1804  VP9Context *s = avctx->priv_data;
1805  int ret;
1806 
1807  s->last_bpp = 0;
1808  s->s.h.filter.sharpness = -1;
1809 
1810 #if HAVE_THREADS
1811  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1812  ret = ff_pthread_init(s, vp9_context_offsets);
1813  if (ret < 0)
1814  return ret;
1815  }
1816 #endif
1817 
1818  for (int i = 0; i < 3; i++) {
1819  s->s.frames[i].tf.f = av_frame_alloc();
1820  if (!s->s.frames[i].tf.f)
1821  return AVERROR(ENOMEM);
1822  }
1823  for (int i = 0; i < 8; i++) {
1824  s->s.refs[i].f = av_frame_alloc();
1825  s->next_refs[i].f = av_frame_alloc();
1826  if (!s->s.refs[i].f || !s->next_refs[i].f)
1827  return AVERROR(ENOMEM);
1828  }
1829  return 0;
1830 }
1831 
1832 #if HAVE_THREADS
1833 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1834 {
1835  int i, ret;
1836  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1837 
1838  for (i = 0; i < 3; i++) {
1839  if (s->s.frames[i].tf.f->buf[0])
1840  vp9_frame_unref(&s->s.frames[i]);
1841  if (ssrc->s.frames[i].tf.f->buf[0]) {
1842  if ((ret = vp9_frame_ref(&s->s.frames[i], &ssrc->s.frames[i])) < 0)
1843  return ret;
1844  }
1845  }
1846  for (i = 0; i < 8; i++) {
1847  if (s->s.refs[i].f->buf[0])
1848  ff_thread_release_ext_buffer(&s->s.refs[i]);
1849  if (ssrc->next_refs[i].f->buf[0]) {
1850  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1851  return ret;
1852  }
1853  }
1854 
1855  s->s.h.invisible = ssrc->s.h.invisible;
1856  s->s.h.keyframe = ssrc->s.h.keyframe;
1857  s->s.h.intraonly = ssrc->s.h.intraonly;
1858  s->ss_v = ssrc->ss_v;
1859  s->ss_h = ssrc->ss_h;
1860  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1861  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1862  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1863  s->bytesperpixel = ssrc->bytesperpixel;
1864  s->gf_fmt = ssrc->gf_fmt;
1865  s->w = ssrc->w;
1866  s->h = ssrc->h;
1867  s->s.h.bpp = ssrc->s.h.bpp;
1868  s->bpp_index = ssrc->bpp_index;
1869  s->pix_fmt = ssrc->pix_fmt;
1870  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1871  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1872  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1873  sizeof(s->s.h.segmentation.feat));
1874 
1875  return 0;
1876 }
1877 #endif
1878 
1880  .p.name = "vp9",
1881  CODEC_LONG_NAME("Google VP9"),
1882  .p.type = AVMEDIA_TYPE_VIDEO,
1883  .p.id = AV_CODEC_ID_VP9,
1884  .priv_data_size = sizeof(VP9Context),
1885  .init = vp9_decode_init,
1886  .close = vp9_decode_free,
1889  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1892  .flush = vp9_decode_flush,
1893  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1894  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1895  .bsfs = "vp9_superframe_split",
1896  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1897 #if CONFIG_VP9_DXVA2_HWACCEL
1898  HWACCEL_DXVA2(vp9),
1899 #endif
1900 #if CONFIG_VP9_D3D11VA_HWACCEL
1901  HWACCEL_D3D11VA(vp9),
1902 #endif
1903 #if CONFIG_VP9_D3D11VA2_HWACCEL
1904  HWACCEL_D3D11VA2(vp9),
1905 #endif
1906 #if CONFIG_VP9_NVDEC_HWACCEL
1907  HWACCEL_NVDEC(vp9),
1908 #endif
1909 #if CONFIG_VP9_VAAPI_HWACCEL
1910  HWACCEL_VAAPI(vp9),
1911 #endif
1912 #if CONFIG_VP9_VDPAU_HWACCEL
1913  HWACCEL_VDPAU(vp9),
1914 #endif
1915 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1916  HWACCEL_VIDEOTOOLBOX(vp9),
1917 #endif
1918  NULL
1919  },
1920 };
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1435
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:64
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1265
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:108
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:52
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:50
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1029
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1264
VP9Frame::segmentation_map
uint8_t * segmentation_map
Definition: vp9shared.h:68
VP9Frame
Definition: vp9shared.h:65
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1879
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1094
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1788
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:51
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
BlockPartition
BlockPartition
Definition: vp9shared.h:35
AVPacket::data
uint8_t * data
Definition: packet.h:491
DC_PRED
@ DC_PRED
Definition: vp9.h:48
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:148
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:175
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1173
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:170
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:509
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
FFCodec
Definition: codec_internal.h:127
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:600
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:48
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
vp89_rac.h
VP9Filter
Definition: vp9dec.h:79
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:91
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
VP9Block
Definition: vp9dec.h:85
VP9Frame::tf
ThreadFrame tf
Definition: vp9shared.h:66
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:605
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:603
VP9Frame::extradata
AVBufferRef * extradata
Definition: vp9shared.h:67
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:342
FFHWAccel
Definition: hwaccel_internal.h:34
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:138
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:484
GetBitContext
Definition: get_bits.h:108
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:521
FFHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: hwaccel_internal.h:97
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:36
vp9_frame_unref
static void vp9_frame_unref(VP9Frame *f)
Definition: vp9.c:100
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference.
Definition: vp9shared.h:72
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1239
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
FFHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: hwaccel_internal.h:59
avassert.h
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1905
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:589
BL_8X8
@ BL_8X8
Definition: vp9shared.h:79
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:38
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:1838
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:606
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:421
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
vp9data.h
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1556
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:871
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
vp9_frame_ref
static int vp9_frame_ref(VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:149
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1039
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:97
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:169
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:470
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1012
pthread_internal.h
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:459
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:469
VP9mv
Definition: vp9shared.h:55
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:39
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:177
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:79
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
VP9Frame::uses_2pass
int uses_2pass
Definition: vp9shared.h:70
f
f
Definition: af_crystalizer.c:121
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:492
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
codec_internal.h
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:65
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:43
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:447
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:473
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:90
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1230
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:475
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1544
AVCodecHWConfigInternal
Definition: hwconfig.h:25
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:312
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:490
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:379
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1543
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:126
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:607
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:389
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:484
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1904
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:610
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:436
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:485
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:599
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:373
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:602
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:984
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:656
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
BL_64X64
@ BL_64X64
Definition: vp9shared.h:76
ret
ret
Definition: filter_design.txt:187
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1802
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
ff_refstruct_replace
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:156
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:93
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:60
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:472
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:411
VP9TileData
Definition: vp9dec.h:168
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1551
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:82
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
VP9Frame::mv
VP9mvrefPair * mv
Definition: vp9shared.h:69
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1596
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:166
FFHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: hwaccel_internal.h:86
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1263
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:43
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
BlockLevel
BlockLevel
Definition: vp9shared.h:75
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2057
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:104
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:367
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:338
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:168
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1504
AVPacket
This structure stores compressed data.
Definition: packet.h:468
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:37
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
d
d
Definition: ffmpeg_filter.c:368
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:474
h
h
Definition: vp9dsp_template.c:2038
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:601
ff_refstruct_unref
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:116
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:152
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1222
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2884
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:75
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540