Go to the documentation of this file.
25 #include <sys/types.h>
27 #include <mfx/mfxvideo.h>
64 }
else if (hw_frames_ref) {
77 q->
iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY);
84 }
else if (hw_device_ref) {
108 MFXVideoDECODE_Close(q->
session);
115 return sizeof(mfxSyncPoint*) +
sizeof(
QSVFrame*);
125 mfxVideoParam param_out = { .mfx.CodecId = param_in->mfx.CodecId };
128 #define CHECK_MATCH(x) \
130 if (param_out.mfx.x != param_in->mfx.x) { \
131 av_log(avctx, AV_LOG_WARNING, "Required "#x" %d is unsupported\n", \
136 ret = MFXVideoDECODE_Query(q->
session, param_in, ¶m_out);
153 mfxSession session =
NULL;
155 mfxVideoParam param = { 0 };
183 if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
184 iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
185 else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
186 iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
191 iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
204 param.mfx.CodecId =
ret;
208 param.mfx.FrameInfo.BitDepthLuma =
desc->comp[0].depth;
209 param.mfx.FrameInfo.BitDepthChroma =
desc->comp[0].depth;
210 param.mfx.FrameInfo.Shift =
desc->comp[0].depth > 8;
211 param.mfx.FrameInfo.FourCC = q->
fourcc;
212 param.mfx.FrameInfo.Width = frame_width;
213 param.mfx.FrameInfo.Height = frame_height;
214 param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
218 param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
221 param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_FIELD_TFF;
224 param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_FIELD_BFF;
227 param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_UNKNOWN;
239 "Current input bitstream is not supported by QSV decoder.\n");
242 ret = MFXVideoDECODE_Init(q->
session, ¶m);
245 "Error initializing the MFX video decoder");
261 frame->surface = *(mfxFrameSurface1*)
frame->frame->data[3];
265 frame->surface.Data.PitchLow =
frame->frame->linesize[0];
266 frame->surface.Data.Y =
frame->frame->data[0];
267 frame->surface.Data.UV =
frame->frame->data[1];
277 frame->surface.Data.ExtParam = &
frame->ext_param;
278 frame->surface.Data.NumExtParam = 1;
279 frame->ext_param = (mfxExtBuffer*)&
frame->dec_info;
280 frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
281 frame->dec_info.Header.BufferSz =
sizeof(
frame->dec_info);
314 *surf = &
frame->surface;
336 *surf = &
frame->surface;
357 mfxFrameSurface1 *insurf;
358 mfxFrameSurface1 *outsurf;
360 mfxBitstream bs = { { { 0 } } };
364 bs.Data = avpkt->
data;
365 bs.DataLength = avpkt->
size;
366 bs.MaxLength = bs.DataLength;
367 bs.TimeStamp = avpkt->
pts;
369 bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
386 insurf, &outsurf, sync);
387 if (
ret == MFX_WRN_DEVICE_BUSY)
390 }
while (
ret == MFX_WRN_DEVICE_BUSY ||
ret == MFX_ERR_MORE_SURFACE);
392 if (
ret != MFX_ERR_NONE &&
393 ret != MFX_ERR_MORE_DATA &&
394 ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
395 ret != MFX_ERR_MORE_SURFACE) {
398 "Error during QSV decoding.");
403 if (!*sync && !bs.DataOffset) {
404 bs.DataOffset = avpkt->
size;
408 }
else if (!*sync && bs.DataOffset) {
419 "The returned surface does not correspond to any frame\n");
441 ret = MFXVideoCORE_SyncOperation(q->
session, *sync, 1000);
442 }
while (
ret == MFX_WRN_IN_EXECUTION);
447 src_frame = out_frame->
frame;
457 frame->pkt_pts = outsurf->Data.TimeStamp;
460 frame->pts = outsurf->Data.TimeStamp;
463 outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
464 outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
465 outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
466 frame->top_field_first =
467 outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
468 frame->interlaced_frame =
469 !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
473 frame->key_frame = !!(out_frame->
dec_info.FrameType & MFX_FRAMETYPE_IDR);
477 ((mfxFrameSurface1*)
frame->data[3])->Info = outsurf->Info;
482 return bs.DataOffset;
490 MFXVideoDECODE_Close(q->
session);
553 &dummy_data, &dummy_size,
578 if (qsv_format < 0) {
580 "Decoding pixel format '%s' is not supported\n",
605 if (
desc->comp[0].depth > 8) {
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *avpkt)
const AVCodecHWConfigInternal * ff_qsv_hw_configs[]
#define FF_ENABLE_DEPRECATION_WARNINGS
void * hwaccel_context
Hardware accelerator context.
#define AV_LOG_WARNING
Something somehow does not look correct.
int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *pkt)
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
AVBufferRef * hw_frames_ctx
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
uint8_t * data
The data buffer.
void av_fifo_free(AVFifoBuffer *f)
Free an AVFifoBuffer.
static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q)
static unsigned int qsv_fifo_size(const AVFifoBuffer *fifo)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
enum AVFieldOrder field_order
Field order.
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
QSVFrame * work_frames
a linked list of frames currently being used by QSV
AVCodecContext * avctx_internal
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
int ff_qsv_decode_close(QSVContext *q)
int iopattern
The IO pattern to use.
int coded_width
Dimensions of the coded video.
@ AV_CODEC_HW_CONFIG_METHOD_AD_HOC
The codec supports this format by some ad-hoc method.
mfxSession internal_session
AVCodecParserContext * av_parser_init(int codec_id)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
void ff_qsv_decode_flush(AVCodecContext *avctx, QSVContext *q)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
static unsigned int qsv_fifo_item_size(void)
QSVFramesContext frames_ctx
int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
static enum AVPixelFormat pix_fmts[]
int format
The format of the coded data, corresponds to enum AVPixelFormat for video and for enum AVSampleFormat...
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
int av_usleep(unsigned usec)
Sleep for a period of time.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
AVFifoBuffer * async_fifo
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
int ff_qsv_map_pixfmt(enum AVPixelFormat format, uint32_t *fourcc)
mfxExtBuffer ** ext_buffers
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
mfxSession session
If non-NULL, the session to use for encoding or decoding.
int width
Dimensions of the decoded video intended for presentation.
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile)
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque)
#define PARSER_FLAG_COMPLETE_FRAMES
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static int check_dec_param(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param_in)
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
This struct describes a set or pool of "hardware" frames (i.e.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
main external API structure.
enum AVFieldOrder field_order
enum AVPixelFormat orig_pix_fmt
This struct is used for communicating QSV parameters between libavcodec and the caller.
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
AVCodecParserContext * parser
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
#define FF_DISABLE_DEPRECATION_WARNINGS
This struct is allocated as AVHWFramesContext.hwctx.
int coded_width
Bitstream width / height, may be different from width/height e.g.
int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts, int64_t pos)
Parse a packet.
A reference to a data buffer.
mfxExtDecodedFrameInfo dec_info
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
int64_t pos
byte position in stream, -1 if unknown
static void qsv_clear_unused_frames(QSVContext *q)
int width
picture width / height.
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins)
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
void av_parser_close(AVCodecParserContext *s)
int ff_qsv_init_internal_session(AVCodecContext *avctx, mfxSession *session, const char *load_plugins)