Go to the documentation of this file.
24 #include <vdpau/vdpau.h>
43 rf->surface = VDP_INVALID_HANDLE;
44 rf->is_long_term = VDP_FALSE;
45 rf->top_is_reference = VDP_FALSE;
46 rf->bottom_is_reference = VDP_FALSE;
47 rf->field_order_cnt[0] = 0;
48 rf->field_order_cnt[1] = 0;
57 if (pic_structure == 0)
60 rf->surface = surface;
76 VdpReferenceFrameH264 *rf = &
info->referenceFrames[0];
77 #define H264_RF_COUNT FF_ARRAY_ELEMS(info->referenceFrames)
81 int i, ls =
list ? 16 :
h->short_ref_count;
83 for (
i = 0;
i < ls; ++
i) {
85 VdpReferenceFrameH264 *rf2;
86 VdpVideoSurface surface_ref;
94 rf2 = &
info->referenceFrames[0];
96 if ((rf2->surface == surface_ref) &&
97 (rf2->is_long_term == pic->
long_ref) &&
98 (rf2->frame_idx == pic_frame_idx))
130 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
131 VdpPictureInfoH264Predictive *info2 = &pic_ctx->
info.h264_predictive;
135 info->slice_count = 0;
138 info->is_reference =
h->nal_ref_idc != 0;
139 info->frame_num =
h->poc.frame_num;
142 info->num_ref_frames =
sps->ref_frame_count;
143 info->mb_adaptive_frame_field_flag =
sps->mb_aff && !
info->field_pic_flag;
144 info->constrained_intra_pred_flag =
pps->constrained_intra_pred;
145 info->weighted_pred_flag =
pps->weighted_pred;
146 info->weighted_bipred_idc =
pps->weighted_bipred_idc;
147 info->frame_mbs_only_flag =
sps->frame_mbs_only_flag;
148 info->transform_8x8_mode_flag =
pps->transform_8x8_mode;
149 info->chroma_qp_index_offset =
pps->chroma_qp_index_offset[0];
150 info->second_chroma_qp_index_offset =
pps->chroma_qp_index_offset[1];
151 info->pic_init_qp_minus26 =
pps->init_qp - 26;
152 info->num_ref_idx_l0_active_minus1 =
pps->ref_count[0] - 1;
153 info->num_ref_idx_l1_active_minus1 =
pps->ref_count[1] - 1;
154 info->log2_max_frame_num_minus4 =
sps->log2_max_frame_num - 4;
155 info->pic_order_cnt_type =
sps->poc_type;
156 info->log2_max_pic_order_cnt_lsb_minus4 =
sps->poc_type ? 0 :
sps->log2_max_poc_lsb - 4;
157 info->delta_pic_order_always_zero_flag =
sps->delta_pic_order_always_zero_flag;
158 info->direct_8x8_inference_flag =
sps->direct_8x8_inference_flag;
159 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
160 info2->qpprime_y_zero_transform_bypass_flag =
sps->transform_bypass;
161 info2->separate_colour_plane_flag =
sps->residual_color_transform_flag;
163 info->entropy_coding_mode_flag =
pps->cabac;
164 info->pic_order_present_flag =
pps->pic_order_present;
165 info->deblocking_filter_control_present_flag =
pps->deblocking_filter_parameters_present;
166 info->redundant_pic_cnt_present_flag =
pps->redundant_pic_cnt_present;
168 memcpy(
info->scaling_lists_4x4,
pps->scaling_matrix4,
169 sizeof(
info->scaling_lists_4x4));
170 memcpy(
info->scaling_lists_8x8[0],
pps->scaling_matrix8[0],
171 sizeof(
info->scaling_lists_8x8[0]));
172 memcpy(
info->scaling_lists_8x8[1],
pps->scaling_matrix8[3],
173 sizeof(
info->scaling_lists_8x8[1]));
225 profile = VDP_DECODER_PROFILE_H264_BASELINE;
228 #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE
229 profile = VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE;
233 profile = VDP_DECODER_PROFILE_H264_MAIN;
236 profile = VDP_DECODER_PROFILE_H264_HIGH;
238 #ifdef VDP_DECODER_PROFILE_H264_EXTENDED
240 profile = VDP_DECODER_PROFILE_H264_EXTENDED;
246 profile = VDP_DECODER_PROFILE_H264_HIGH;
248 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
252 profile = VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE;
260 level = VDP_DECODER_LEVEL_H264_1b;
266 .
p.
name =
"h264_vdpau",
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AV_PROFILE_H264_INTRA
#define AV_PROFILE_H264_MAIN
AVHWAccel p
The public AVHWAccel.
const FFHWAccel ff_h264_vdpau_hwaccel
#define PICT_BOTTOM_FIELD
int frame_num
frame_num (raw frame_num from slice header)
#define AV_PROFILE_H264_EXTENDED
static double val(void *priv, double ch)
static int vdpau_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
static void vdpau_h264_set_reference_frames(AVCodecContext *avctx)
#define AV_PROFILE_H264_HIGH_10
static void vdpau_h264_clear_rf(VdpReferenceFrameH264 *rf)
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
#define AV_PROFILE_H264_HIGH_422
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
#define HWACCEL_CAP_ASYNC_SAFE
Header providing the internals of AVHWAccel.
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
int level
Encoding level descriptor.
av_cold int ff_vdpau_common_uninit(AVCodecContext *avctx)
static int vdpau_h264_end_frame(AVCodecContext *avctx)
int pic_id
pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num)
int(* init)(AVBSFContext *ctx)
#define AV_PROFILE_H264_CAVLC_444
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
const char * name
Name of the hardware accelerated codec.
static int vdpau_h264_start_frame(AVCodecContext *avctx, const AVBufferRef *buffer_ref, const uint8_t *buffer, uint32_t size)
static const uint8_t start_code_prefix[3]
#define i(width, name, range_min, range_max)
#define AV_PROFILE_H264_HIGH_444_PREDICTIVE
static int32_t h264_foc(int foc)
#define AV_PROFILE_H264_BASELINE
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
av_cold int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
main external API structure.
#define AV_PROFILE_H264_HIGH
union VDPAUPictureInfo info
VDPAU picture information.
int field_poc[2]
top/bottom POC
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
#define AV_PROFILE_H264_CONSTRAINED_BASELINE
A reference to a data buffer.
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
static void vdpau_h264_set_rf(VdpReferenceFrameH264 *rf, H264Picture *pic, int pic_structure)
static av_cold int vdpau_h264_init(AVCodecContext *avctx)
int long_ref
1->long term reference 0->short term reference