37 #define VP9_SYNCCODE 0x498342
65 for (i = 0; i <
n; i++)
74 static void vp9_report_tile_progress(
VP9Context *s,
int field,
int n) {
81 static void vp9_await_tile_progress(
VP9Context *s,
int field,
int n) {
172 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + CONFIG_VP9_D3D11VA_HWACCEL * 2 + CONFIG_VP9_VAAPI_HWACCEL)
181 if (!(s->pix_fmt == s->
gf_fmt && w == s->
w && h == s->
h)) {
185 switch (s->pix_fmt) {
187 #if CONFIG_VP9_DXVA2_HWACCEL
190 #if CONFIG_VP9_D3D11VA_HWACCEL
194 #if CONFIG_VP9_VAAPI_HWACCEL
200 #if CONFIG_VP9_VAAPI_HWACCEL
206 *fmtp++ = s->pix_fmt;
225 s->last_fmt = s->pix_fmt;
228 s->
cols = (w + 7) >> 3;
229 s->
rows = (h + 7) >> 3;
232 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
279 int chroma_blocks, chroma_eobs, bytesperpixel = s->
bytesperpixel;
287 chroma_blocks = 64 * 64 >> (s->
ss_h + s->
ss_v);
288 chroma_eobs = 16 * 16 >> (s->
ss_h + s->
ss_v);
294 16 * 16 + 2 * chroma_eobs) * sbs);
312 16 * 16 + 2 * chroma_eobs);
339 return m - ((v + 1) >> 1);
346 static const int inv_map_table[255] = {
347 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
348 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
349 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
350 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
351 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
352 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
353 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
354 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
355 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
356 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
357 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
358 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
359 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
360 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
361 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
362 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
363 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
364 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
412 s->
s.
h.
bpp = 8 + bits * 2;
421 s->pix_fmt = pix_fmt_rgb[
bits];
433 static const enum AVPixelFormat pix_fmt_for_ss[3][2 ][2 ] = {
457 s->pix_fmt = pix_fmt_for_ss[
bits][1][1];
468 int c, i, j, k, l, m,
n, w,
h, max, size2, ret, sharp;
618 for (i = 1; i <= 63; i++) {
622 limit >>= (sharp + 3) >> 2;
623 limit =
FFMIN(limit, 9 - sharp);
625 limit =
FFMAX(limit, 1);
634 for (i = 0; i < 4; i++)
637 for (i = 0; i < 2; i++)
656 for (i = 0; i < 7; i++)
660 for (i = 0; i < 3; i++)
667 for (i = 0; i < 8; i++) {
681 int qyac, qydc, quvac, quvdc, lflvl, sh;
694 qyac = av_clip_uintp2(qyac, 8);
713 av_clip_uintp2(lflvl + (s->
s.
h.
lf_delta.
ref[0] * (1 << sh)), 6);
714 for (j = 1; j < 4; j++) {
737 for (max = 0; (s->
sb_cols >> max) >= 4; max++) ;
738 max =
FFMAX(0, max - 1);
777 rc += n_range_coders;
783 for (i = 0; i < 3; i++) {
789 "Ref pixfmt (%s) did not match current frame (%s)",
793 }
else if (refw == w && refh == h) {
796 if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
798 "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
802 s->
mvscale[i][0] = (refw << 14) / w;
803 s->
mvscale[i][1] = (refh << 14) / h;
832 if (size2 > size - (data2 - data)) {
868 for (i = 0; i < 2; i++)
871 for (i = 0; i < 2; i++)
872 for (j = 0; j < 2; j++)
876 for (i = 0; i < 2; i++)
877 for (j = 0; j < 3; j++)
885 for (i = 0; i < 4; i++) {
888 for (j = 0; j < 2; j++)
889 for (k = 0; k < 2; k++)
890 for (l = 0; l < 6; l++)
891 for (m = 0; m < 6; m++) {
894 if (m >= 3 && l == 0)
896 for (n = 0; n < 3; n++) {
905 for (j = 0; j < 2; j++)
906 for (k = 0; k < 2; k++)
907 for (l = 0; l < 6; l++)
908 for (m = 0; m < 6; m++) {
922 for (i = 0; i < 3; i++)
926 for (i = 0; i < 7; i++)
927 for (j = 0; j < 3; j++)
933 for (i = 0; i < 4; i++)
934 for (j = 0; j < 2; j++)
939 for (i = 0; i < 4; i++)
948 for (i = 0; i < 5; i++)
957 for (i = 0; i < 5; i++) {
968 for (i = 0; i < 5; i++)
974 for (i = 0; i < 4; i++)
975 for (j = 0; j < 9; j++)
980 for (i = 0; i < 4; i++)
981 for (j = 0; j < 4; j++)
982 for (k = 0; k < 3; k++)
989 for (i = 0; i < 3; i++)
993 for (i = 0; i < 2; i++) {
998 for (j = 0; j < 10; j++)
1007 for (j = 0; j < 10; j++)
1013 for (i = 0; i < 2; i++) {
1014 for (j = 0; j < 2; j++)
1015 for (k = 0; k < 3; k++)
1020 for (j = 0; j < 3; j++)
1027 for (i = 0; i < 2; i++) {
1039 return (data2 - data) + size2;
1043 ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl)
1051 ptrdiff_t hbs = 4 >> bl;
1059 }
else if (col + hbs < s->cols) {
1060 if (row + hbs < s->rows) {
1068 yoff += hbs * 8 * y_stride;
1069 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1074 yoff += hbs * 8 * bytesperpixel;
1075 uvoff += hbs * 8 * bytesperpixel >> s->
ss_h;
1079 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1081 yoff + 8 * hbs * bytesperpixel,
1082 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1083 yoff += hbs * 8 * y_stride;
1084 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1085 decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1086 decode_sb(td, row + hbs, col + hbs, lflvl,
1087 yoff + 8 * hbs * bytesperpixel,
1088 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1095 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1097 yoff + 8 * hbs * bytesperpixel,
1098 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1103 }
else if (row + hbs < s->rows) {
1106 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1107 yoff += hbs * 8 * y_stride;
1108 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1109 decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1116 decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1122 ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl)
1126 ptrdiff_t hbs = 4 >> bl;
1134 }
else if (td->
b->
bl == bl) {
1137 yoff += hbs * 8 * y_stride;
1138 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1141 yoff += hbs * 8 * bytesperpixel;
1142 uvoff += hbs * 8 * bytesperpixel >> s->
ss_h;
1147 if (col + hbs < s->cols) {
1148 if (row + hbs < s->rows) {
1149 decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1150 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1151 yoff += hbs * 8 * y_stride;
1152 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1153 decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1155 yoff + 8 * hbs * bytesperpixel,
1156 uvoff + (8 * hbs * bytesperpixel >> s->
ss_h), bl + 1);
1158 yoff += hbs * 8 * bytesperpixel;
1159 uvoff += hbs * 8 * bytesperpixel >> s->
ss_h;
1160 decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1162 }
else if (row + hbs < s->rows) {
1163 yoff += hbs * 8 * y_stride;
1164 uvoff += hbs * 8 * uv_stride >> s->
ss_v;
1165 decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1172 int sb_start = ( idx *
n) >> log2_n;
1173 int sb_end = ((idx + 1) * n) >> log2_n;
1174 *start =
FFMIN(sb_start, n) << 3;
1175 *end =
FFMIN(sb_end, n) << 3;
1194 for (i = 0; i < 3; i++) {
1199 for (i = 0; i < 8; i++) {
1219 int row, col, tile_row, tile_col, ret;
1221 int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1223 ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1246 if (tile_size > size) {
1261 for (row = tile_row_start; row < tile_row_end;
1262 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->
ss_v) {
1264 ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1282 td->
c = &td->
c_b[tile_col];
1285 for (col = tile_col_start;
1287 col += 8, yoff2 += 64 * bytesperpixel,
1288 uvoff2 += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1292 memset(lflvl_ptr->
mask, 0,
sizeof(lflvl_ptr->
mask));
1310 if (row + 8 < s->
rows) {
1312 f->
data[0] + yoff + 63 * ls_y,
1313 8 * s->
cols * bytesperpixel);
1315 f->
data[1] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1316 8 * s->
cols * bytesperpixel >> s->
ss_h);
1318 f->
data[2] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1319 8 * s->
cols * bytesperpixel >> s->
ss_h);
1326 lflvl_ptr = s->
lflvl;
1327 for (col = 0; col < s->
cols;
1328 col += 8, yoff2 += 64 * bytesperpixel,
1329 uvoff2 += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1346 int decode_tiles_mt(
AVCodecContext *avctx,
void *tdata,
int jobnr,
1351 ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1353 unsigned tile_cols_len;
1354 int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1365 uvoff = (64 * bytesperpixel >> s->
ss_h)*(tile_col_start >> 3);
1366 yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1367 lflvl_ptr_base = s->
lflvl+(tile_col_start >> 3);
1373 td->
c = &td->
c_b[tile_row];
1374 for (row = tile_row_start; row < tile_row_end;
1375 row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->
ss_v) {
1376 ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1390 for (col = tile_col_start;
1392 col += 8, yoff2 += 64 * bytesperpixel,
1393 uvoff2 += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1396 memset(lflvl_ptr->
mask, 0,
sizeof(lflvl_ptr->
mask));
1403 tile_cols_len = tile_col_end - tile_col_start;
1404 if (row + 8 < s->
rows) {
1406 f->
data[0] + yoff + 63 * ls_y,
1407 8 * tile_cols_len * bytesperpixel);
1409 f->
data[1] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1410 8 * tile_cols_len * bytesperpixel >> s->
ss_h);
1412 f->
data[2] + uvoff + ((64 >> s->
ss_v) - 1) * ls_uv,
1413 8 * tile_cols_len * bytesperpixel >> s->
ss_h);
1416 vp9_report_tile_progress(s, row >> 3, 1);
1426 ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1435 for (i = 0; i < s->
sb_rows; i++) {
1439 yoff = (ls_y * 64)*i;
1440 uvoff = (ls_uv * 64 >> s->
ss_v)*i;
1442 for (col = 0; col < s->
cols;
1443 col += 8, yoff += 64 * bytesperpixel,
1444 uvoff += 64 * bytesperpixel >> s->
ss_h, lflvl_ptr++) {
1467 }
else if (ret == 0) {
1481 for (i = 0; i < 8; i++) {
1521 for (i = 0; i < 8; i++) {
1562 "Failed to allocate block buffers\n");
1568 for (i = 0; i < 4; i++) {
1569 for (j = 0; j < 2; j++)
1570 for (k = 0; k < 2; k++)
1571 for (l = 0; l < 6; l++)
1572 for (m = 0; m < 6; m++)
1586 for (i = 0; i < s->
sb_rows; i++)
1604 int tile_row, tile_col;
1620 if (tile_size > size)
1644 for (j = 0; j <
sizeof(s->
td[i].
counts) /
sizeof(
unsigned); j++)
1651 }
while (s->
pass++ == 1);
1656 for (i = 0; i < 8; i++) {
1678 for (i = 0; i < 3; i++)
1680 for (i = 0; i < 8; i++)
1689 for (i = 0; i < 3; i++) {
1697 for (i = 0; i < 8; i++) {
1732 for (i = 0; i < 3; i++) {
1735 if (ssrc->s.frames[i].tf.f->buf[0]) {
1740 for (i = 0; i < 8; i++) {
1743 if (ssrc->next_refs[i].f->buf[0]) {
1752 s->
ss_v = ssrc->ss_v;
1753 s->
ss_h = ssrc->ss_h;
1758 s->
gf_fmt = ssrc->gf_fmt;
1761 s->
s.
h.
bpp = ssrc->s.h.bpp;
1763 s->pix_fmt = ssrc->pix_fmt;
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
uint8_t left_uv_nnz_ctx[2][16]
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
uint8_t * segmentation_map
#define AV_PIX_FMT_YUV440P10
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
static void vp9_decode_flush(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
#define pthread_mutex_lock(a)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
#define atomic_store(object, desired)
ptrdiff_t const GLvoid * data
static void flush(AVCodecContext *avctx)
VP5 and VP6 compatible video decoder (common features)
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static int init_thread_copy(AVCodecContext *avctx)
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
static av_cold int init(AVCodecContext *avctx)
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
struct VP9TileData::@150 counts
static av_cold int vp9_decode_init(AVCodecContext *avctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
#define AV_PIX_FMT_GBRP10
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PIX_FMT_YUV420P12
const int16_t ff_vp9_dc_qlookup[3][256]
unsigned coef[4][2][2][6][6][3]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
uint8_t left_segpred_ctx[8]
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
const int16_t ff_vp9_ac_qlookup[3][256]
uint8_t left_mode_ctx[16]
functionally identical to above
uint8_t * intra_pred_data[3]
struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
uint8_t coef[4][2][2][6][6][3]
unsigned partition[4][4][4]
const int8_t ff_vp9_partition_tree[3][2]
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static void vp9_free_entries(AVCodecContext *avctx)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
AVColorSpace
YUV colorspace type.
void ff_vp9_adapt_probs(VP9Context *s)
static void free_buffers(VP9Context *s)
static av_cold int end(AVCodecContext *avctx)
Multithreading support functions.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define FF_CODEC_PROPERTY_LOSSLESS
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
static int update_size(AVCodecContext *avctx, int w, int h)
AVBufferRef * hwaccel_priv_buf
static int get_bits_count(const GetBitContext *s)
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
uint8_t left_partition_ctx[8]
bitstream reader API header.
uint8_t * above_uv_nnz_ctx[2]
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
#define AV_PIX_FMT_YUV422P12
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
static av_cold int vp9_decode_free(AVCodecContext *avctx)
uint8_t partition[4][4][3]
struct VP9Context::@149 prob
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
int flags
AV_CODEC_FLAG_*.
void * hwaccel_picture_private
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
static void * av_mallocz_array(size_t nmemb, size_t size)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
uint8_t * above_filter_ctx
const uint8_t ff_vp9_model_pareto8[256][8]
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
uint8_t left_y_nnz_ctx[16]
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define FF_THREAD_FRAME
Decode more than one frame at once.
ITU-R BT2020 non-constant luminance system.
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
#define atomic_load_explicit(object, order)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
struct VP9Context::@147 filter_lut
uint8_t * above_partition_ctx
#define pthread_mutex_unlock(a)
uint8_t * above_segpred_ctx
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define atomic_fetch_add_explicit(object, operand, order)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
struct ProbContext::@146 mv_comp[2]
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
static int init_frames(AVCodecContext *avctx)
VP56mv(* above_mv_ctx)[2]
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
uint8_t * data
The data buffer.
static int update_block_buffers(AVCodecContext *avctx)
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static unsigned int get_bits1(GetBitContext *s)
#define AV_PIX_FMT_YUV420P10
uint8_t * above_y_nnz_ctx
int16_t * uvblock_base[2]
static void skip_bits(GetBitContext *s, int n)
enum AVColorSpace colorspace
YUV colorspace type.
#define AV_PIX_FMT_YUV440P12
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
static int read_colorspace_details(AVCodecContext *avctx)
uint8_t * above_intra_ctx
int allocate_progress
Whether to allocate progress for frame threading.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
static enum AVPixelFormat pix_fmts[]
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV422P10
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
static int vp9_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
unsigned eob[4][2][2][6][6][2]
Hardware surfaces for Direct3D11.
the normal 219*2^(n-8) "MPEG" YUV ranges
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
static av_always_inline int inv_recenter_nonneg(int v, int m)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
static int ref[MAX_W *MAX_W]
#define assign(var, type, n)
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
enum AVPixelFormat pix_fmt last_fmt gf_fmt
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
unsigned properties
Properties of the stream that gets decoded.
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Core video DSP helper functions.
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
#define FF_ENABLE_DEPRECATION_WARNINGS
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
struct AVCodecInternal * internal
Private context used for internal data.
static int decode012(GetBitContext *gb)
int key_frame
1 -> keyframe, 0-> not
struct VP9Context::@148 prob_ctx[4]
static const uint8_t * align_get_bits(GetBitContext *s)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
#define atomic_init(obj, value)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
static int update_prob(VP56RangeCoder *c, int p)
#define av_malloc_array(a, b)
const ProbContext ff_vp9_default_probs
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
const AVProfile ff_vp9_profiles[]
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
AVPixelFormat
Pixel format.
This structure stores compressed data.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int block_alloc_using_2pass