FFmpeg
vulkan_vp9.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "vp9shared.h"
20 
21 #include "vulkan_decode.h"
22 
25  .decode_extension = FF_VK_EXT_VIDEO_DECODE_VP9,
26  .queue_flags = VK_QUEUE_VIDEO_DECODE_BIT_KHR,
27  .decode_op = VK_VIDEO_CODEC_OPERATION_DECODE_VP9_BIT_KHR,
28  .ext_props = {
29  .extensionName = VK_STD_VULKAN_VIDEO_CODEC_VP9_DECODE_EXTENSION_NAME,
30  .specVersion = VK_STD_VULKAN_VIDEO_CODEC_VP9_DECODE_SPEC_VERSION,
31  },
32 };
33 
34 typedef struct VP9VulkanDecodePicture {
36 
37  /* TODO: investigate if this can be removed to make decoding completely
38  * independent. */
40 
41  /* Current picture */
42  StdVideoVP9ColorConfig color_config;
43  StdVideoVP9Segmentation segmentation;
44  StdVideoVP9LoopFilter loop_filter;
45  StdVideoDecodeVP9PictureInfo std_pic_info;
46  VkVideoDecodeVP9PictureInfoKHR vp9_pic_info;
47 
48  const VP9Frame *ref_src[8];
49 
50  uint8_t frame_id_set;
51  uint8_t frame_id;
54 
55 static int vk_vp9_fill_pict(AVCodecContext *avctx, const VP9Frame **ref_src,
56  VkVideoReferenceSlotInfoKHR *ref_slot, /* Main structure */
57  VkVideoPictureResourceInfoKHR *ref, /* Goes in ^ */
58  const VP9Frame *pic, int is_current)
59 {
63  FFVulkanDecodePicture *vkpic = &hp->vp;
64 
65  int err = ff_vk_decode_prepare_frame(dec, pic->tf.f, vkpic, is_current,
66  dec->dedicated_dpb);
67  if (err < 0)
68  return err;
69 
70  *ref = (VkVideoPictureResourceInfoKHR) {
71  .sType = VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_INFO_KHR,
72  .codedOffset = (VkOffset2D){ 0, 0 },
73  .codedExtent = (VkExtent2D){ pic->tf.f->width, pic->tf.f->height },
74  .baseArrayLayer = (dec->dedicated_dpb && ctx->common.layered_dpb) ?
75  hp->frame_id : 0,
76  .imageViewBinding = vkpic->view.ref[0],
77  };
78 
79  *ref_slot = (VkVideoReferenceSlotInfoKHR) {
80  .sType = VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_INFO_KHR,
81  .slotIndex = hp->frame_id,
82  .pPictureResource = ref,
83  };
84 
85  if (ref_src)
86  *ref_src = pic;
87 
88  return 0;
89 }
90 
91 static enum StdVideoVP9InterpolationFilter remap_interp(uint8_t is_filter_switchable,
92  uint8_t raw_interpolation_filter_type)
93 {
94  static const enum StdVideoVP9InterpolationFilter remap[] = {
95  STD_VIDEO_VP9_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH,
96  STD_VIDEO_VP9_INTERPOLATION_FILTER_EIGHTTAP,
97  STD_VIDEO_VP9_INTERPOLATION_FILTER_EIGHTTAP_SHARP,
98  STD_VIDEO_VP9_INTERPOLATION_FILTER_BILINEAR,
99  };
100  if (is_filter_switchable)
101  return STD_VIDEO_VP9_INTERPOLATION_FILTER_SWITCHABLE;
102  return remap[raw_interpolation_filter_type];
103 }
104 
106  av_unused const AVBufferRef *buffer_ref,
107  av_unused const uint8_t *buffer,
108  av_unused uint32_t size)
109 {
110  int err;
111  int ref_count = 0;
112  const VP9SharedContext *s = avctx->priv_data;
113  uint32_t frame_id_alloc_mask = 0;
114 
115  const VP9Frame *pic = &s->frames[CUR_FRAME];
117  uint8_t profile = (pic->frame_header->profile_high_bit << 1) | pic->frame_header->profile_low_bit;
118 
120  FFVulkanDecodePicture *vp = &ap->vp;
121 
122  /* Use the current frame_ids in ref_frames[] to decide occupied frame_ids */
123  for (int i = 0; i < STD_VIDEO_VP9_NUM_REF_FRAMES; i++) {
124  const VP9VulkanDecodePicture* rp = s->ref_frames[i].hwaccel_picture_private;
125  if (rp)
126  frame_id_alloc_mask |= 1 << rp->frame_id;
127  }
128 
129  if (!ap->frame_id_set) {
130  unsigned slot_idx = 0;
131  for (unsigned i = 0; i < 32; i++) {
132  if (!(frame_id_alloc_mask & (1 << i))) {
133  slot_idx = i;
134  break;
135  }
136  }
137  ap->frame_id = slot_idx;
138  ap->frame_id_set = 1;
139  frame_id_alloc_mask |= (1 << slot_idx);
140  }
141 
142  for (int i = 0; i < STD_VIDEO_VP9_REFS_PER_FRAME; i++) {
143  const int idx = pic->frame_header->ref_frame_idx[i];
144  const VP9Frame *ref_frame = &s->ref_frames[idx];
145  VP9VulkanDecodePicture *hp = ref_frame->hwaccel_picture_private;
146  int found = 0;
147 
148  if (!ref_frame->tf.f)
149  continue;
150 
151  for (int j = 0; j < ref_count; j++) {
152  if (vp->ref_slots[j].slotIndex == hp->frame_id) {
153  found = 1;
154  break;
155  }
156  }
157  if (found)
158  continue;
159 
160  err = vk_vp9_fill_pict(avctx, &ap->ref_src[ref_count],
161  &vp->ref_slots[ref_count], &vp->refs[ref_count],
162  ref_frame, 0);
163  if (err < 0)
164  return err;
165 
166  ref_count++;
167  }
168 
169  err = vk_vp9_fill_pict(avctx, NULL, &vp->ref_slot, &vp->ref,
170  pic, 1);
171  if (err < 0)
172  return err;
173 
174  ap->loop_filter = (StdVideoVP9LoopFilter) {
175  .flags = (StdVideoVP9LoopFilterFlags) {
176  .loop_filter_delta_enabled = pic->frame_header->loop_filter_delta_enabled,
177  .loop_filter_delta_update = pic->frame_header->loop_filter_delta_update,
178  },
179  .loop_filter_level = pic->frame_header->loop_filter_level,
180  .loop_filter_sharpness = pic->frame_header->loop_filter_sharpness,
181  .update_ref_delta = 0x0,
182  .update_mode_delta = 0x0,
183  };
184 
185  for (int i = 0; i < 2; i++)
186  ap->loop_filter.update_mode_delta |= pic->frame_header->update_mode_delta[i];
187 
188  for (int i = 0; i < STD_VIDEO_VP9_MAX_REF_FRAMES; i++) {
189  ap->loop_filter.loop_filter_ref_deltas[i] = pic->frame_header->loop_filter_ref_deltas[i];
190  ap->loop_filter.update_ref_delta |= pic->frame_header->update_ref_delta[i];
191  }
192  for (int i = 0; i < STD_VIDEO_VP9_LOOP_FILTER_ADJUSTMENTS; i++)
193  ap->loop_filter.loop_filter_mode_deltas[i] = pic->frame_header->loop_filter_mode_deltas[i];
194 
195  ap->segmentation = (StdVideoVP9Segmentation) {
196  .flags = (StdVideoVP9SegmentationFlags) {
197  .segmentation_update_map = pic->frame_header->segmentation_update_map,
198  .segmentation_temporal_update = pic->frame_header->segmentation_temporal_update,
199  .segmentation_update_data = pic->frame_header->segmentation_update_data,
200  .segmentation_abs_or_delta_update = pic->frame_header->segmentation_abs_or_delta_update,
201  },
202  };
203 
204  for (int i = 0; i < STD_VIDEO_VP9_MAX_SEGMENTATION_TREE_PROBS; i++)
205  ap->segmentation.segmentation_tree_probs[i] = pic->frame_header->segmentation_tree_probs[i];
206  for (int i = 0; i < STD_VIDEO_VP9_MAX_SEGMENTATION_PRED_PROB; i++)
207  ap->segmentation.segmentation_pred_prob[i] = pic->frame_header->segmentation_pred_prob[i];
208  for (int i = 0; i < STD_VIDEO_VP9_MAX_SEGMENTS; i++) {
209  ap->segmentation.FeatureEnabled[i] = 0x0;
210  for (int j = 0; j < STD_VIDEO_VP9_SEG_LVL_MAX; j++) {
211  ap->segmentation.FeatureEnabled[i] |= pic->frame_header->feature_enabled[i][j] << j;
212  ap->segmentation.FeatureData[i][j] = pic->frame_header->feature_sign[i][j] ?
213  -pic->frame_header->feature_value[i][j] :
214  +pic->frame_header->feature_value[i][j];
215  }
216  }
217 
218  ap->color_config = (StdVideoVP9ColorConfig) {
219  .flags = (StdVideoVP9ColorConfigFlags) {
220  .color_range = pic->frame_header->color_range,
221  },
222  .BitDepth = profile < 2 ? 8 :
223  pic->frame_header->ten_or_twelve_bit ? 12 : 10,
224  .subsampling_x = pic->frame_header->subsampling_x,
225  .subsampling_y = pic->frame_header->subsampling_y,
226  .color_space = pic->frame_header->color_space,
227  };
228 
229  ap->std_pic_info = (StdVideoDecodeVP9PictureInfo) {
230  .flags = (StdVideoDecodeVP9PictureInfoFlags) {
231  .error_resilient_mode = pic->frame_header->error_resilient_mode,
232  .intra_only = pic->frame_header->intra_only,
233  .allow_high_precision_mv = pic->frame_header->allow_high_precision_mv,
234  .refresh_frame_context = pic->frame_header->refresh_frame_context,
235  .frame_parallel_decoding_mode = pic->frame_header->frame_parallel_decoding_mode,
236  .segmentation_enabled = pic->frame_header->segmentation_enabled,
237  .show_frame = pic->frame_header->segmentation_enabled,
238  .UsePrevFrameMvs = s->h.use_last_frame_mvs,
239  },
240  .profile = profile,
241  .frame_type = pic->frame_header->frame_type,
242  .frame_context_idx = pic->frame_header->frame_context_idx,
243  .reset_frame_context = pic->frame_header->reset_frame_context,
244  .refresh_frame_flags = pic->frame_header->refresh_frame_flags,
245  .ref_frame_sign_bias_mask = 0x0,
246  .interpolation_filter = remap_interp(pic->frame_header->is_filter_switchable,
248  .base_q_idx = pic->frame_header->base_q_idx,
249  .delta_q_y_dc = pic->frame_header->delta_q_y_dc,
250  .delta_q_uv_dc = pic->frame_header->delta_q_uv_dc,
251  .delta_q_uv_ac = pic->frame_header->delta_q_uv_ac,
252  .tile_cols_log2 = pic->frame_header->tile_cols_log2,
253  .tile_rows_log2 = pic->frame_header->tile_rows_log2,
254  /* Reserved */
255  .pColorConfig = &ap->color_config,
256  .pLoopFilter = &ap->loop_filter,
257  .pSegmentation = &ap->segmentation,
258  };
259 
260  for (int i = VP9_LAST_FRAME; i <= VP9_ALTREF_FRAME; i++)
261  ap->std_pic_info.ref_frame_sign_bias_mask |= pic->frame_header->ref_frame_sign_bias[i] << i;
262 
263  ap->vp9_pic_info = (VkVideoDecodeVP9PictureInfoKHR) {
264  .sType = VK_STRUCTURE_TYPE_VIDEO_DECODE_VP9_PICTURE_INFO_KHR,
265  .pStdPictureInfo = &ap->std_pic_info,
266  .uncompressedHeaderOffset = 0,
267  .compressedHeaderOffset = s->h.uncompressed_header_size,
268  .tilesOffset = s->h.uncompressed_header_size +
269  s->h.compressed_header_size,
270  };
271 
272  for (int i = 0; i < STD_VIDEO_VP9_REFS_PER_FRAME; i++) {
273  const int idx = pic->frame_header->ref_frame_idx[i];
274  const VP9Frame *ref_frame = &s->ref_frames[idx];
275  VP9VulkanDecodePicture *hp = ref_frame->hwaccel_picture_private;
276 
277  if (!ref_frame->tf.f)
278  ap->vp9_pic_info.referenceNameSlotIndices[i] = -1;
279  else
280  ap->vp9_pic_info.referenceNameSlotIndices[i] = hp->frame_id;
281  }
282 
283  vp->decode_info = (VkVideoDecodeInfoKHR) {
284  .sType = VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR,
285  .pNext = &ap->vp9_pic_info,
286  .flags = 0x0,
287  .pSetupReferenceSlot = &vp->ref_slot,
288  .referenceSlotCount = ref_count,
289  .pReferenceSlots = vp->ref_slots,
290  .dstPictureResource = (VkVideoPictureResourceInfoKHR) {
291  .sType = VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_INFO_KHR,
292  .codedOffset = (VkOffset2D){ 0, 0 },
293  .codedExtent = (VkExtent2D){ pic->tf.f->width, pic->tf.f->height },
294  .baseArrayLayer = 0,
295  .imageViewBinding = vp->view.out[0],
296  },
297  };
298 
299  ap->dec = dec;
300 
301  return 0;
302 }
303 
305  const uint8_t *data,
306  uint32_t size)
307 {
308  int err;
309  const VP9SharedContext *s = avctx->priv_data;
310  VP9VulkanDecodePicture *ap = s->frames[CUR_FRAME].hwaccel_picture_private;
311  FFVulkanDecodePicture *vp = &ap->vp;
312 
313  err = ff_vk_decode_add_slice(avctx, vp, data, size, 0, NULL, NULL);
314  if (err < 0)
315  return err;
316 
317  return 0;
318 }
319 
321 {
322  const VP9SharedContext *s = avctx->priv_data;
323 
324  const VP9Frame *pic = &s->frames[CUR_FRAME];
326  FFVulkanDecodePicture *vp = &ap->vp;
327  FFVulkanDecodePicture *rvp[STD_VIDEO_VP9_REFS_PER_FRAME] = { 0 };
328  AVFrame *rav[STD_VIDEO_VP9_REFS_PER_FRAME] = { 0 };
329 
330  for (int i = 0; i < vp->decode_info.referenceSlotCount; i++) {
331  const VP9Frame *rp = ap->ref_src[i];
333 
334  rvp[i] = &rhp->vp;
335  rav[i] = ap->ref_src[i]->tf.f;
336  }
337 
338  av_log(avctx, AV_LOG_VERBOSE, "Decoding frame, %"SIZE_SPECIFIER" bytes\n",
339  vp->slices_size);
340 
341  return ff_vk_decode_frame(avctx, pic->tf.f, vp, rav, rvp);
342 }
343 
345 {
346  AVHWDeviceContext *hwctx = _hwctx.nc;
348 
349  /* Free frame resources, this also destroys the session parameters. */
350  ff_vk_decode_free_frame(hwctx, &ap->vp);
351 }
352 
354  .p.name = "vp9_vulkan",
355  .p.type = AVMEDIA_TYPE_VIDEO,
356  .p.id = AV_CODEC_ID_VP9,
357  .p.pix_fmt = AV_PIX_FMT_VULKAN,
358  .start_frame = &vk_vp9_start_frame,
359  .decode_slice = &vk_vp9_decode_slice,
360  .end_frame = &vk_vp9_end_frame,
361  .free_frame_priv = &vk_vp9_free_frame_priv,
362  .frame_priv_data_size = sizeof(VP9VulkanDecodePicture),
367  .frame_params = &ff_vk_frame_params,
368  .priv_data_size = sizeof(FFVulkanDecodeContext),
369  .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
370 };
VP9RawFrameHeader::intra_only
uint8_t intra_only
Definition: cbs_vp9.h:104
FFVulkanDecodePicture::slices_size
size_t slices_size
Definition: vulkan_decode.h:102
VP9VulkanDecodePicture::frame_id_set
uint8_t frame_id_set
Definition: vulkan_vp9.c:50
VP9RawFrameHeader::ref_frame_idx
uint8_t ref_frame_idx[VP9_REFS_PER_FRAME]
Definition: cbs_vp9.h:107
VP9RawFrameHeader::delta_q_uv_dc
int8_t delta_q_uv_dc
Definition: cbs_vp9.h:142
VP9RawFrameHeader::loop_filter_mode_deltas
int8_t loop_filter_mode_deltas[2]
Definition: cbs_vp9.h:137
VP9Frame
Definition: vp9shared.h:66
FFVulkanDecodeContext::shared_ctx
FFVulkanDecodeShared * shared_ctx
Definition: vulkan_decode.h:57
AVRefStructOpaque
RefStruct is an API for creating reference-counted objects with minimal overhead.
Definition: refstruct.h:58
VP9VulkanDecodePicture::segmentation
StdVideoVP9Segmentation segmentation
Definition: vulkan_vp9.c:43
VP9RawFrameHeader::tile_rows_log2
uint8_t tile_rows_log2
Definition: cbs_vp9.h:159
AVRefStructOpaque::nc
void * nc
Definition: refstruct.h:59
av_unused
#define av_unused
Definition: attributes.h:131
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:421
AVFrame::width
int width
Definition: frame.h:493
VP9VulkanDecodePicture::ref_src
const VP9Frame * ref_src[8]
Definition: vulkan_vp9.c:48
VP9RawFrameHeader::profile_high_bit
uint8_t profile_high_bit
Definition: cbs_vp9.h:86
VP9RawFrameHeader::error_resilient_mode
uint8_t error_resilient_mode
Definition: cbs_vp9.h:93
data
const char data[16]
Definition: mxf.c:149
FFVulkanDecodeDescriptor::codec_id
enum AVCodecID codec_id
Definition: vulkan_decode.h:30
VP9RawFrameHeader::frame_context_idx
uint8_t frame_context_idx
Definition: cbs_vp9.h:115
FFVulkanDecodePicture::view
struct FFVulkanDecodePicture::@319 view
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
VP9Frame::tf
ProgressFrame tf
Definition: vp9shared.h:70
VP9RawFrameHeader::raw_interpolation_filter_type
uint8_t raw_interpolation_filter_type
Definition: cbs_vp9.h:127
VP9RawFrameHeader::subsampling_y
uint8_t subsampling_y
Definition: cbs_vp9.h:100
FFVulkanDecodeContext
Definition: vulkan_decode.h:56
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:379
ff_vk_decode_prepare_frame
int ff_vk_decode_prepare_frame(FFVulkanDecodeContext *dec, AVFrame *pic, FFVulkanDecodePicture *vkpic, int is_current, int alloc_dpb)
Prepare a frame, creates the image view, and sets up the dpb fields.
Definition: vulkan_decode.c:176
vk_vp9_free_frame_priv
static void vk_vp9_free_frame_priv(AVRefStructOpaque _hwctx, void *data)
Definition: vulkan_vp9.c:344
VP9VulkanDecodePicture
Definition: vulkan_vp9.c:34
FFHWAccel
Definition: hwaccel_internal.h:34
AVCodecContext::refs
int refs
number of reference frames
Definition: avcodec.h:689
VP9RawFrameHeader::subsampling_x
uint8_t subsampling_x
Definition: cbs_vp9.h:99
ff_vk_decode_frame
int ff_vk_decode_frame(AVCodecContext *avctx, AVFrame *pic, FFVulkanDecodePicture *vp, AVFrame *rpic[], FFVulkanDecodePicture *rvkp[])
Decode a frame.
Definition: vulkan_decode.c:397
FFVulkanDecodeShared
Definition: vulkan_decode.h:38
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference.
Definition: vp9shared.h:76
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:63
VP9VulkanDecodePicture::vp9_pic_info
VkVideoDecodeVP9PictureInfoKHR vp9_pic_info
Definition: vulkan_vp9.c:46
VP9_LAST_FRAME
@ VP9_LAST_FRAME
Definition: cbs_vp9.h:70
vp9shared.h
VP9_ALTREF_FRAME
@ VP9_ALTREF_FRAME
Definition: cbs_vp9.h:72
VP9VulkanDecodePicture::color_config
StdVideoVP9ColorConfig color_config
Definition: vulkan_vp9.c:42
VP9VulkanDecodePicture::dec
FFVulkanDecodeContext * dec
Definition: vulkan_vp9.c:39
s
#define s(width, name)
Definition: cbs_vp9.c:198
VP9SharedContext
Definition: vp9shared.h:168
FFVulkanDecodePicture
Definition: vulkan_decode.h:75
ff_vk_dec_vp9_desc
const FFVulkanDecodeDescriptor ff_vk_dec_vp9_desc
Definition: vulkan_vp9.c:23
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
remap_interp
static enum StdVideoVP9InterpolationFilter remap_interp(uint8_t is_filter_switchable, uint8_t raw_interpolation_filter_type)
Definition: vulkan_vp9.c:91
vk_vp9_end_frame
static int vk_vp9_end_frame(AVCodecContext *avctx)
Definition: vulkan_vp9.c:320
VP9RawFrameHeader::segmentation_tree_probs
uint8_t segmentation_tree_probs[7]
Definition: cbs_vp9.h:148
VP9VulkanDecodePicture::vp
FFVulkanDecodePicture vp
Definition: vulkan_vp9.c:35
VP9VulkanDecodePicture::std_pic_info
StdVideoDecodeVP9PictureInfo std_pic_info
Definition: vulkan_vp9.c:45
ctx
AVFormatContext * ctx
Definition: movenc.c:49
VP9RawFrameHeader::ten_or_twelve_bit
uint8_t ten_or_twelve_bit
Definition: cbs_vp9.h:96
VP9VulkanDecodePicture::loop_filter
StdVideoVP9LoopFilter loop_filter
Definition: vulkan_vp9.c:44
HWACCEL_CAP_ASYNC_SAFE
#define HWACCEL_CAP_ASYNC_SAFE
Header providing the internals of AVHWAccel.
Definition: hwaccel_internal.h:31
VP9RawFrameHeader::refresh_frame_context
uint8_t refresh_frame_context
Definition: cbs_vp9.h:112
VP9VulkanDecodePicture::frame_id
uint8_t frame_id
Definition: vulkan_vp9.c:51
NULL
#define NULL
Definition: coverity.c:32
VP9Frame::frame_header
VP9RawFrameHeader * frame_header
Definition: vp9shared.h:68
ff_vk_decode_free_frame
void ff_vk_decode_free_frame(AVHWDeviceContext *dev_ctx, FFVulkanDecodePicture *vp)
Free a frame and its state.
Definition: vulkan_decode.c:603
FF_VK_EXT_VIDEO_DECODE_VP9
#define FF_VK_EXT_VIDEO_DECODE_VP9
Definition: vulkan_functions.h:62
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
ff_vk_decode_uninit
int ff_vk_decode_uninit(AVCodecContext *avctx)
Free decoder.
Definition: vulkan_decode.c:1191
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts.c:370
VP9RawFrameHeader::feature_value
uint8_t feature_value[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX]
Definition: cbs_vp9.h:154
VP9RawFrameHeader::profile_low_bit
uint8_t profile_low_bit
Definition: cbs_vp9.h:85
ff_vk_frame_params
int ff_vk_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Initialize hw_frames_ctx with the parameters needed to decode the stream using the parameters from av...
Definition: vulkan_decode.c:1075
VP9RawFrameHeader::ref_frame_sign_bias
uint8_t ref_frame_sign_bias[VP9_MAX_REF_FRAMES]
Definition: cbs_vp9.h:108
VP9RawFrameHeader::delta_q_y_dc
int8_t delta_q_y_dc
Definition: cbs_vp9.h:141
VP9RawFrameHeader::segmentation_temporal_update
uint8_t segmentation_temporal_update
Definition: cbs_vp9.h:149
VP9RawFrameHeader::segmentation_update_map
uint8_t segmentation_update_map
Definition: cbs_vp9.h:147
VP9RawFrameHeader::loop_filter_ref_deltas
int8_t loop_filter_ref_deltas[VP9_MAX_REF_FRAMES]
Definition: cbs_vp9.h:135
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:130
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
FFVulkanDecodePicture::ref
VkImageView ref[AV_NUM_DATA_POINTERS]
Definition: vulkan_decode.h:79
size
int size
Definition: twinvq_data.h:10344
ref_frame
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
Definition: dec.c:616
VP9RawFrameHeader::frame_parallel_decoding_mode
uint8_t frame_parallel_decoding_mode
Definition: cbs_vp9.h:113
remap
static const int remap[16]
Definition: msvideo1enc.c:66
VP9RawFrameHeader::feature_sign
uint8_t feature_sign[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX]
Definition: cbs_vp9.h:155
VP9RawFrameHeader::is_filter_switchable
uint8_t is_filter_switchable
Definition: cbs_vp9.h:126
VP9RawFrameHeader::feature_enabled
uint8_t feature_enabled[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX]
Definition: cbs_vp9.h:153
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:1942
VP9RawFrameHeader::segmentation_abs_or_delta_update
uint8_t segmentation_abs_or_delta_update
Definition: cbs_vp9.h:152
VP9RawFrameHeader::delta_q_uv_ac
int8_t delta_q_uv_ac
Definition: cbs_vp9.h:143
ff_vp9_vulkan_hwaccel
const FFHWAccel ff_vp9_vulkan_hwaccel
Definition: vulkan_vp9.c:353
uninit
static void uninit(AVBSFContext *ctx)
Definition: pcm_rechunk.c:68
VP9RawFrameHeader::color_range
uint8_t color_range
Definition: cbs_vp9.h:98
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
VP9RawFrameHeader::tile_cols_log2
uint8_t tile_cols_log2
Definition: cbs_vp9.h:158
VP9RawFrameHeader::segmentation_pred_prob
uint8_t segmentation_pred_prob[3]
Definition: cbs_vp9.h:150
vk_vp9_fill_pict
static int vk_vp9_fill_pict(AVCodecContext *avctx, const VP9Frame **ref_src, VkVideoReferenceSlotInfoKHR *ref_slot, VkVideoPictureResourceInfoKHR *ref, const VP9Frame *pic, int is_current)
Definition: vulkan_vp9.c:55
VP9RawFrameHeader::base_q_idx
uint8_t base_q_idx
Definition: cbs_vp9.h:140
vk_vp9_decode_slice
static int vk_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *data, uint32_t size)
Definition: vulkan_vp9.c:304
VP9RawFrameHeader::reset_frame_context
uint8_t reset_frame_context
Definition: cbs_vp9.h:105
VP9RawFrameHeader::frame_type
uint8_t frame_type
Definition: cbs_vp9.h:91
profile
int profile
Definition: mxfenc.c:2278
ff_vk_decode_flush
void ff_vk_decode_flush(AVCodecContext *avctx)
Flush decoder.
Definition: vulkan_decode.c:361
VP9RawFrameHeader::loop_filter_sharpness
uint8_t loop_filter_sharpness
Definition: cbs_vp9.h:131
ProgressFrame::f
struct AVFrame * f
Definition: progressframe.h:74
ff_vk_decode_add_slice
int ff_vk_decode_add_slice(AVCodecContext *avctx, FFVulkanDecodePicture *vp, const uint8_t *data, size_t size, int add_startcode, uint32_t *nb_slices, const uint32_t **offsets)
Add slice data to frame.
Definition: vulkan_decode.c:280
VP9RawFrameHeader::update_ref_delta
uint8_t update_ref_delta[VP9_MAX_REF_FRAMES]
Definition: cbs_vp9.h:134
SIZE_SPECIFIER
#define SIZE_SPECIFIER
Definition: internal.h:129
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:493
FFVulkanDecodeContext::dedicated_dpb
int dedicated_dpb
Definition: vulkan_decode.h:60
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
VP9RawFrameHeader::color_space
uint8_t color_space
Definition: cbs_vp9.h:97
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
FFVulkanDecodeDescriptor
Definition: vulkan_decode.h:29
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
ff_vk_update_thread_context
int ff_vk_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Synchronize the contexts between 2 threads.
Definition: vulkan_decode.c:120
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
VP9RawFrameHeader::segmentation_enabled
uint8_t segmentation_enabled
Definition: cbs_vp9.h:146
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:172
VP9RawFrameHeader::segmentation_update_data
uint8_t segmentation_update_data
Definition: cbs_vp9.h:151
VP9RawFrameHeader::loop_filter_delta_enabled
uint8_t loop_filter_delta_enabled
Definition: cbs_vp9.h:132
vulkan_decode.h
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
VP9RawFrameHeader::update_mode_delta
uint8_t update_mode_delta[2]
Definition: cbs_vp9.h:136
VP9RawFrameHeader::allow_high_precision_mv
uint8_t allow_high_precision_mv
Definition: cbs_vp9.h:110
VP9VulkanDecodePicture::ref_frame_sign_bias_mask
uint8_t ref_frame_sign_bias_mask
Definition: vulkan_vp9.c:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
VP9RawFrameHeader::loop_filter_delta_update
uint8_t loop_filter_delta_update
Definition: cbs_vp9.h:133
ff_vk_decode_init
int ff_vk_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: vulkan_decode.c:1243
FFVulkanDecodePicture::decode_info
VkVideoDecodeInfoKHR decode_info
Definition: vulkan_decode.h:98
vk_vp9_start_frame
static int vk_vp9_start_frame(AVCodecContext *avctx, av_unused const AVBufferRef *buffer_ref, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vulkan_vp9.c:105
VP9RawFrameHeader::loop_filter_level
uint8_t loop_filter_level
Definition: cbs_vp9.h:130
VP9RawFrameHeader::refresh_frame_flags
uint8_t refresh_frame_flags
Definition: cbs_vp9.h:102