38 #define VP9_SYNCCODE 0x498342
66 for (i = 0; i <
n; i++)
75 static void vp9_report_tile_progress(
VP9Context *s,
int field,
int n) {
82 static void vp9_await_tile_progress(
VP9Context *s,
int field,
int n) {
173 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
174 CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
175 CONFIG_VP9_NVDEC_HWACCEL + \
176 CONFIG_VP9_VAAPI_HWACCEL)
185 if (!(s->pix_fmt == s->
gf_fmt && w == s->
w && h == s->
h)) {
189 switch (s->pix_fmt) {
192 #if CONFIG_VP9_DXVA2_HWACCEL
195 #if CONFIG_VP9_D3D11VA_HWACCEL
199 #if CONFIG_VP9_NVDEC_HWACCEL
202 #if CONFIG_VP9_VAAPI_HWACCEL
207 #if CONFIG_VP9_NVDEC_HWACCEL
210 #if CONFIG_VP9_VAAPI_HWACCEL
216 *fmtp++ = s->pix_fmt;
235 s->last_fmt = s->pix_fmt;
238 s->
cols = (w + 7) >> 3;
239 s->
rows = (h + 7) >> 3;
242 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
289 int chroma_blocks, chroma_eobs, bytesperpixel = s->
bytesperpixel;
297 chroma_blocks = 64 * 64 >> (s->
ss_h + s->
ss_v);
298 chroma_eobs = 16 * 16 >> (s->
ss_h + s->
ss_v);
304 16 * 16 + 2 * chroma_eobs) * sbs);
322 16 * 16 + 2 * chroma_eobs);
349 return m - ((v + 1) >> 1);
356 static const int inv_map_table[255] = {
357 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
358 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
359 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
360 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
361 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
362 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
363 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
364 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
365 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
366 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
367 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
368 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
369 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
370 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
371 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
372 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
373 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
374 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
422 s->
s.
h.
bpp = 8 + bits * 2;
431 s->pix_fmt = pix_fmt_rgb[bits];
443 static const enum AVPixelFormat pix_fmt_for_ss[3][2 ][2 ] = {
455 s->pix_fmt = pix_fmt_for_ss[bits][s->
ss_v][s->
ss_h];
467 s->pix_fmt = pix_fmt_for_ss[bits][1][1];
478 int c, i, j, k, l, m,
n,
w,
h, max, size2, ret, sharp;
628 for (i = 1; i <= 63; i++) {
632 limit >>= (sharp + 3) >> 2;
633 limit =
FFMIN(limit, 9 - sharp);
635 limit =
FFMAX(limit, 1);
644 for (i = 0; i < 4; i++)
647 for (i = 0; i < 2; i++)
666 for (i = 0; i < 7; i++)
670 for (i = 0; i < 3; i++)
677 for (i = 0; i < 8; i++) {
691 int qyac, qydc, quvac, quvdc, lflvl, sh;
704 qyac = av_clip_uintp2(qyac, 8);
723 av_clip_uintp2(lflvl + (s->
s.
h.
lf_delta.
ref[0] * (1 << sh)), 6);
724 for (j = 1; j < 4; j++) {
747 for (max = 0; (s->
sb_cols >> max) >= 4; max++) ;
748 max =
FFMAX(0, max - 1);
787 rc += n_range_coders;
793 for (i = 0; i < 3; i++) {
799 "Ref pixfmt (%s) did not match current frame (%s)",
803 }
else if (refw == w && refh == h) {
806 if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
808 "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
812 s->
mvscale[i][0] = (refw << 14) / w;
813 s->
mvscale[i][1] = (refh << 14) / h;
842 if (size2 > size - (data2 - data)) {
878 for (i = 0; i < 2; i++)
881 for (i = 0; i < 2; i++)
882 for (j = 0; j < 2; j++)
886 for (i = 0; i < 2; i++)
887 for (j = 0; j < 3; j++)
895 for (i = 0; i < 4; i++) {
898 for (j = 0; j < 2; j++)
899 for (k = 0; k < 2; k++)
900 for (l = 0; l < 6; l++)
901 for (m = 0; m < 6; m++) {
904 if (m >= 3 && l == 0)
906 for (n = 0; n < 3; n++) {
915 for (j = 0; j < 2; j++)
916 for (k = 0; k < 2; k++)
917 for (l = 0; l < 6; l++)
918 for (m = 0; m < 6; m++) {
932 for (i = 0; i < 3; i++)
936 for (i = 0; i < 7; i++)
937 for (j = 0; j < 3; j++)
943 for (i = 0; i < 4; i++)
944 for (j = 0; j < 2; j++)
949 for (i = 0; i < 4; i++)
958 for (i = 0; i < 5; i++)
967 for (i = 0; i < 5; i++) {
978 for (i = 0; i < 5; i++)
984 for (i = 0; i < 4; i++)
985 for (j = 0; j < 9; j++)
990 for (i = 0; i < 4; i++)
991 for (j = 0; j < 4; j++)
992 for (k = 0; k < 3; k++)
999 for (i = 0; i < 3; i++)
1003 for (i = 0; i < 2; i++) {
1008 for (j = 0; j < 10; j++)
1017 for (j = 0; j < 10; j++)
1023 for (i = 0; i < 2; i++) {
1024 for (j = 0; j < 2; j++)
1025 for (k = 0; k < 3; k++)
1030 for (j = 0; j < 3; j++)
1037 for (i = 0; i < 2; i++) {
1049 return (data2 - data) + size2;
1053 ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl)
1061 ptrdiff_t hbs = 4 >> bl;
1069 }
else if (col + hbs < s->cols) {
1070 if (row + hbs < s->rows) {
1078 yoff += hbs * 8 * y_stride;
1079 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1084 yoff += hbs * 8 * bytesperpixel;
1085 uvoff += hbs * 8 * bytesperpixel >> s->
ss_h;
1089 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1091 yoff + 8 * hbs * bytesperpixel,
1092 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1093 yoff += hbs * 8 * y_stride;
1094 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1095 decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1096 decode_sb(td, row + hbs, col + hbs, lflvl,
1097 yoff + 8 * hbs * bytesperpixel,
1098 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1105 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1107 yoff + 8 * hbs * bytesperpixel,
1108 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1113 }
else if (row + hbs < s->rows) {
1116 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1117 yoff += hbs * 8 * y_stride;
1118 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1119 decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1126 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1132 ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl)
1136 ptrdiff_t hbs = 4 >> bl;
1144 }
else if (td->
b->
bl == bl) {
1147 yoff += hbs * 8 * y_stride;
1148 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1151 yoff += hbs * 8 * bytesperpixel;
1152 uvoff += hbs * 8 * bytesperpixel >> s->
ss_h;
1157 if (col + hbs < s->cols) {
1158 if (row + hbs < s->rows) {
1159 decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1160 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1161 yoff += hbs * 8 * y_stride;
1162 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1163 decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1165 yoff + 8 * hbs * bytesperpixel,
1166 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1168 yoff += hbs * 8 * bytesperpixel;
1169 uvoff += hbs * 8 * bytesperpixel >> s->
ss_h;
1170 decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1172 }
else if (row + hbs < s->rows) {
1173 yoff += hbs * 8 * y_stride;
1174 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1175 decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1182 int sb_start = ( idx *
n) >> log2_n;
1183 int sb_end = ((idx + 1) * n) >> log2_n;
1184 *start =
FFMIN(sb_start, n) << 3;
1185 *end =
FFMIN(sb_end, n) << 3;
1204 for (i = 0; i < 3; i++) {
1209 for (i = 0; i < 8; i++) {
1229 int row, col, tile_row, tile_col, ret;
1231 int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1233 ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1256 if (tile_size > size) {
1271 for (row = tile_row_start; row < tile_row_end;
1272 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->
ss_v) {
1274 ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1292 td->
c = &td->
c_b[tile_col];
1295 for (col = tile_col_start;
1297 col += 8, yoff2 += 64 * bytesperpixel,
1298 uvoff2 += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1302 memset(lflvl_ptr->
mask, 0,
sizeof(lflvl_ptr->
mask));
1320 if (row + 8 < s->
rows) {
1322 f->
data[0] + yoff + 63 * ls_y,
1323 8 * s->
cols * bytesperpixel);
1325 f->
data[1] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1326 8 * s->
cols * bytesperpixel >> s->
ss_h);
1328 f->
data[2] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1329 8 * s->
cols * bytesperpixel >> s->
ss_h);
1336 lflvl_ptr = s->
lflvl;
1337 for (col = 0; col < s->
cols;
1338 col += 8, yoff2 += 64 * bytesperpixel,
1339 uvoff2 += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1356 int decode_tiles_mt(
AVCodecContext *avctx,
void *tdata,
int jobnr,
1361 ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1363 unsigned tile_cols_len;
1364 int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1375 uvoff = (64 * bytesperpixel >> s->
ss_h)*(tile_col_start >> 3);
1376 yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1377 lflvl_ptr_base = s->
lflvl+(tile_col_start >> 3);
1383 td->
c = &td->
c_b[tile_row];
1384 for (row = tile_row_start; row < tile_row_end;
1385 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->
ss_v) {
1386 ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1400 for (col = tile_col_start;
1402 col += 8, yoff2 += 64 * bytesperpixel,
1403 uvoff2 += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1406 memset(lflvl_ptr->
mask, 0,
sizeof(lflvl_ptr->
mask));
1413 tile_cols_len = tile_col_end - tile_col_start;
1414 if (row + 8 < s->
rows) {
1416 f->
data[0] + yoff + 63 * ls_y,
1417 8 * tile_cols_len * bytesperpixel);
1419 f->
data[1] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1420 8 * tile_cols_len * bytesperpixel >> s->
ss_h);
1422 f->
data[2] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1423 8 * tile_cols_len * bytesperpixel >> s->
ss_h);
1426 vp9_report_tile_progress(s, row >> 3, 1);
1436 ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1445 for (i = 0; i < s->
sb_rows; i++) {
1449 yoff = (ls_y * 64)*i;
1450 uvoff = (ls_uv * 64 >> s->
ss_v)*i;
1452 for (col = 0; col < s->
cols;
1453 col += 8, yoff += 64 * bytesperpixel,
1454 uvoff += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1477 }
else if (ret == 0) {
1491 for (i = 0; i < 8; i++) {
1531 for (i = 0; i < 8; i++) {
1572 "Failed to allocate block buffers\n");
1578 for (i = 0; i < 4; i++) {
1579 for (j = 0; j < 2; j++)
1580 for (k = 0; k < 2; k++)
1581 for (l = 0; l < 6; l++)
1582 for (m = 0; m < 6; m++)
1596 for (i = 0; i < s->
sb_rows; i++)
1614 int tile_row, tile_col;
1630 if (tile_size > size)
1656 for (j = 0; j <
sizeof(s->
td[i].
counts) /
sizeof(
unsigned); j++)
1663 }
while (s->
pass++ == 1);
1668 for (i = 0; i < 8; i++) {
1690 for (i = 0; i < 3; i++)
1692 for (i = 0; i < 8; i++)
1701 for (i = 0; i < 3; i++) {
1709 for (i = 0; i < 8; i++) {
1744 for (i = 0; i < 3; i++) {
1747 if (ssrc->s.frames[i].tf.f->buf[0]) {
1752 for (i = 0; i < 8; i++) {
1755 if (ssrc->next_refs[i].f->buf[0]) {
1764 s->
ss_v = ssrc->ss_v;
1765 s->
ss_h = ssrc->ss_h;
1770 s->
gf_fmt = ssrc->gf_fmt;
1773 s->
s.
h.
bpp = ssrc->s.h.bpp;
1775 s->pix_fmt = ssrc->pix_fmt;
1800 .bsfs =
"vp9_superframe_split",
1802 #if CONFIG_VP9_DXVA2_HWACCEL
1805 #if CONFIG_VP9_D3D11VA_HWACCEL
1808 #if CONFIG_VP9_D3D11VA2_HWACCEL
1811 #if CONFIG_VP9_NVDEC_HWACCEL
1814 #if CONFIG_VP9_VAAPI_HWACCEL
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
uint8_t left_uv_nnz_ctx[2][16]
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
uint8_t * segmentation_map
#define AV_PIX_FMT_YUV440P10
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
static void vp9_decode_flush(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
#define pthread_mutex_lock(a)
#define HWACCEL_D3D11VA2(codec)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
#define atomic_store(object, desired)
ptrdiff_t const GLvoid * data
static void flush(AVCodecContext *avctx)
VP5 and VP6 compatible video decoder (common features)
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
#define HWACCEL_NVDEC(codec)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static int init_thread_copy(AVCodecContext *avctx)
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
static av_cold int init(AVCodecContext *avctx)
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
static av_cold int vp9_decode_init(AVCodecContext *avctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
#define AV_PIX_FMT_GBRP10
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PIX_FMT_YUV420P12
const int16_t ff_vp9_dc_qlookup[3][256]
unsigned coef[4][2][2][6][6][3]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
#define HWACCEL_D3D11VA(codec)
uint8_t left_segpred_ctx[8]
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
const int16_t ff_vp9_ac_qlookup[3][256]
uint8_t left_mode_ctx[16]
functionally identical to above
uint8_t * intra_pred_data[3]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
uint8_t coef[4][2][2][6][6][3]
unsigned partition[4][4][4]
const int8_t ff_vp9_partition_tree[3][2]
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static void vp9_free_entries(AVCodecContext *avctx)
struct VP9Context::@155 prob
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
AVColorSpace
YUV colorspace type.
void ff_vp9_adapt_probs(VP9Context *s)
static void free_buffers(VP9Context *s)
static av_cold int end(AVCodecContext *avctx)
Multithreading support functions.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define FF_CODEC_PROPERTY_LOSSLESS
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
static int update_size(AVCodecContext *avctx, int w, int h)
AVBufferRef * hwaccel_priv_buf
#define HWACCEL_DXVA2(codec)
static int get_bits_count(const GetBitContext *s)
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
uint8_t left_partition_ctx[8]
bitstream reader API header.
uint8_t * above_uv_nnz_ctx[2]
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
#define AV_PIX_FMT_YUV422P12
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
static av_cold int vp9_decode_free(AVCodecContext *avctx)
uint8_t partition[4][4][3]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
struct VP9Context::@154 prob_ctx[4]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
int flags
AV_CODEC_FLAG_*.
void * hwaccel_picture_private
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
uint8_t * above_filter_ctx
const uint8_t ff_vp9_model_pareto8[256][8]
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
uint8_t left_y_nnz_ctx[16]
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define FF_THREAD_FRAME
Decode more than one frame at once.
ITU-R BT2020 non-constant luminance system.
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
#define atomic_load_explicit(object, order)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
uint8_t * above_partition_ctx
#define pthread_mutex_unlock(a)
struct VP9TileData::@156 counts
HW acceleration through CUDA.
uint8_t * above_segpred_ctx
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define atomic_fetch_add_explicit(object, operand, order)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
static int init_frames(AVCodecContext *avctx)
VP56mv(* above_mv_ctx)[2]
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
uint8_t * data
The data buffer.
static int update_block_buffers(AVCodecContext *avctx)
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static unsigned int get_bits1(GetBitContext *s)
#define AV_PIX_FMT_YUV420P10
uint8_t * above_y_nnz_ctx
int16_t * uvblock_base[2]
static void skip_bits(GetBitContext *s, int n)
enum AVColorSpace colorspace
YUV colorspace type.
#define AV_PIX_FMT_YUV440P12
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
static int read_colorspace_details(AVCodecContext *avctx)
uint8_t * above_intra_ctx
int allocate_progress
Whether to allocate progress for frame threading.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
static enum AVPixelFormat pix_fmts[]
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
#define HWACCEL_VAAPI(codec)
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV422P10
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
static int vp9_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
struct VP9Context::@153 filter_lut
unsigned eob[4][2][2][6][6][2]
Hardware surfaces for Direct3D11.
the normal 219*2^(n-8) "MPEG" YUV ranges
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
static av_always_inline int inv_recenter_nonneg(int v, int m)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
static int ref[MAX_W *MAX_W]
#define assign(var, type, n)
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
enum AVPixelFormat pix_fmt last_fmt gf_fmt
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
unsigned properties
Properties of the stream that gets decoded.
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Core video DSP helper functions.
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
#define FF_ENABLE_DEPRECATION_WARNINGS
struct ProbContext::@152 mv_comp[2]
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
struct AVCodecInternal * internal
Private context used for internal data.
static int decode012(GetBitContext *gb)
int key_frame
1 -> keyframe, 0-> not
static const uint8_t * align_get_bits(GetBitContext *s)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
#define atomic_init(obj, value)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
static int update_prob(VP56RangeCoder *c, int p)
#define av_malloc_array(a, b)
const ProbContext ff_vp9_default_probs
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
const AVProfile ff_vp9_profiles[]
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
AVPixelFormat
Pixel format.
This structure stores compressed data.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int block_alloc_using_2pass
void * av_mallocz_array(size_t nmemb, size_t size)