Go to the documentation of this file.
68 s->outputView->lpVtbl->Release(
s->outputView);
73 s->processor->lpVtbl->Release(
s->processor);
78 s->enumerator->lpVtbl->Release(
s->enumerator);
83 s->videoDevice->lpVtbl->Release(
s->videoDevice);
84 s->videoDevice =
NULL;
93 s->output_format = DXGI_FORMAT_NV12;
96 s->output_format = DXGI_FORMAT_P010;
106 s->device = d3d11_hwctx->device;
107 s->context = d3d11_hwctx->device_context;
110 s->inputWidth,
s->inputHeight,
s->width,
s->height);
113 D3D11_VIDEO_PROCESSOR_CONTENT_DESC contentDesc = {
114 .InputFrameFormat = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE,
115 .InputWidth =
s->inputWidth,
116 .InputHeight =
s->inputHeight,
117 .OutputWidth =
s->width,
118 .OutputHeight =
s->height,
119 .Usage = D3D11_VIDEO_USAGE_PLAYBACK_NORMAL,
123 hr =
s->device->lpVtbl->QueryInterface(
s->device, &IID_ID3D11VideoDevice, (
void **)&
s->videoDevice);
130 hr =
s->videoDevice->lpVtbl->CreateVideoProcessorEnumerator(
s->videoDevice, &contentDesc, &
s->enumerator);
137 hr =
s->videoDevice->lpVtbl->CreateVideoProcessor(
s->videoDevice,
s->enumerator, 0, &
s->processor);
152 ID3D11VideoProcessorInputView *inputView =
NULL;
153 ID3D11VideoContext *videoContext =
NULL;
173 if (!
s->hw_device_ctx) {
182 if (input_device_ctx->
type != filter_device_ctx->type) {
205 D3D11_TEXTURE2D_DESC textureDesc;
206 ID3D11Texture2D *input_texture = (ID3D11Texture2D *)in->
data[0];
207 input_texture->lpVtbl->GetDesc(input_texture, &textureDesc);
209 s->inputWidth = textureDesc.Width;
210 s->inputHeight = textureDesc.Height;
211 s->input_format = textureDesc.Format;
221 ID3D11Texture2D *d3d11_texture = (ID3D11Texture2D *)in->
data[0];
222 int subIdx = (
int)(intptr_t)in->
data[1];
224 D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC inputViewDesc = {
225 .FourCC =
s->input_format,
226 .ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D,
227 .Texture2D.ArraySlice = subIdx
230 hr =
s->videoDevice->lpVtbl->CreateVideoProcessorInputView(
231 s->videoDevice, (ID3D11Resource *)d3d11_texture,
s->enumerator, &inputViewDesc, &inputView);
239 ID3D11Texture2D *output_texture = (ID3D11Texture2D *)
out->data[0];
240 D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC outputViewDesc = {
241 .ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D,
242 .Texture2D = { .MipSlice = 0 },
245 hr =
s->videoDevice->lpVtbl->CreateVideoProcessorOutputView(
246 s->videoDevice, (ID3D11Resource *)output_texture,
s->enumerator, &outputViewDesc, &
s->outputView);
254 D3D11_VIDEO_PROCESSOR_STREAM stream = {
256 .pInputSurface = inputView,
261 hr =
s->context->lpVtbl->QueryInterface(
s->context, &IID_ID3D11VideoContext, (
void **)&videoContext);
269 hr = videoContext->lpVtbl->VideoProcessorBlt(videoContext,
s->processor,
s->outputView, 0, 1, &stream);
283 out->data[0] = (uint8_t *)output_texture;
284 out->data[1] = (uint8_t *)(intptr_t)0;
285 out->width =
s->width;
286 out->height =
s->height;
290 inputView->lpVtbl->Release(inputView);
291 videoContext->lpVtbl->Release(videoContext);
293 s->outputView->lpVtbl->Release(
s->outputView);
294 s->outputView =
NULL;
303 inputView->lpVtbl->Release(inputView);
305 videoContext->lpVtbl->Release(videoContext);
307 s->outputView->lpVtbl->Release(
s->outputView);
308 s->outputView =
NULL;
334 outlink->
w =
s->width;
335 outlink->
h =
s->height;
351 if (!
s->hw_device_ctx) {
354 if (!
s->hw_device_ctx) {
364 s->device = d3d11_hwctx->device;
365 s->context = d3d11_hwctx->device_context;
367 if (!
s->device || !
s->context) {
374 if (!
s->hw_frames_ctx_out)
380 frames_ctx->
width =
s->width;
381 frames_ctx->
height =
s->height;
384 if (
ctx->extra_hw_frames > 0)
389 frames_hwctx->
BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_VIDEO_ENCODER;
437 #define OFFSET(x) offsetof(ScaleD3D11Context, x)
438 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
450 .
p.
name =
"scale_d3d11",
453 .p.priv_class = &scale_d3d11_class,
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
AVPixelFormat
Pixel format.
static const AVOption scale_d3d11_options[]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFILTER_DEFINE_CLASS(scale_d3d11)
DXGI_FORMAT output_format
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
uint8_t * data
The data buffer.
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
UINT MiscFlags
D3D11_TEXTURE2D_DESC.MiscFlags used for texture creation.
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
static int scale_d3d11_filter_frame(AVFilterLink *inlink, AVFrame *in)
#define AV_LOG_VERBOSE
Detailed information.
int ff_scale_eval_dimensions(void *log_ctx, const char *w_expr, const char *h_expr, AVFilterLink *inlink, AVFilterLink *outlink, int *ret_w, int *ret_h)
Parse and evaluate string expressions for width and height.
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
const char * name
Filter name.
int width
The allocated dimensions of the frames in this pool.
A link between two filters.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static av_cold int scale_d3d11_init(AVFilterContext *ctx)
Link properties exposed to filter code, but not external callers.
UINT BindFlags
D3D11_TEXTURE2D_DESC.BindFlags used for texture creation.
A filter pad used for either input or output.
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int scale_d3d11_configure_processor(ScaleD3D11Context *s, AVFilterContext *ctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const AVFilterPad scale_d3d11_outputs[]
ID3D11VideoProcessor * processor
ID3D11DeviceContext * context
enum AVPixelFormat format
D3D11 objects.
#define FILTER_OUTPUTS(array)
Describe the class of an AVClass context structure.
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
ID3D11VideoDevice * videoDevice
const FFFilter ff_vf_scale_d3d11
This struct is allocated as AVHWFramesContext.hwctx.
static FilterLink * ff_filter_link(AVFilterLink *link)
static av_cold void scale_d3d11_uninit(AVFilterContext *ctx)
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
static int scale_d3d11_config_props(AVFilterLink *outlink)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
AVFilterContext * src
source filter
#define AVERROR_EXTERNAL
Generic error in an external library.
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
AVBufferRef * hw_frames_ctx
For hwaccel pixel formats, this should be a reference to the AVHWFramesContext describing the frames.
int w
agreed upon image width
This struct is allocated as AVHWDeviceContext.hwctx.
ID3D11VideoProcessorOutputView * outputView
const char * name
Pad name.
AVBufferRef * hw_frames_ctx_out
Dimensions and formats.
This struct describes a set or pool of "hardware" frames (i.e.
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
ID3D11VideoProcessorEnumerator * enumerator
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
static void release_d3d11_resources(ScaleD3D11Context *s)
static const AVFilterPad scale_d3d11_inputs[]
int h
agreed upon image height
AVBufferRef * hw_device_ctx
@ AV_OPT_TYPE_PIXEL_FMT
Underlying C type is enum AVPixelFormat.
int initial_pool_size
Initial size of the frame pool.
AVFilter p
The public AVFilter.
A reference to a data buffer.
ID3D11VideoProcessorInputView * inputView
Buffer references.
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
#define FILTER_SINGLE_PIXFMT(pix_fmt_)