Go to the documentation of this file.
31 #include <DeckLinkAPIVersion.h>
32 #include <DeckLinkAPI.h>
33 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0e030000
34 #include <DeckLinkAPI_v14_2_1.h>
49 #include "libklvanc/vanc.h"
50 #include "libklvanc/vanc-lines.h"
51 #include "libklvanc/pixels.h"
69 return ((
GetWidth() + 47) / 48) * 128;
74 return bmdFormat8BitYUV;
76 return bmdFormat10BitYUV;
78 virtual BMDFrameFlags STDMETHODCALLTYPE
GetFlags (
void)
81 return _avframe->
linesize[0] < 0 ? bmdFrameFlagFlipVertical : bmdFrameFlagDefault;
83 return bmdFrameFlagDefault;
99 virtual HRESULT STDMETHODCALLTYPE
GetTimecode (BMDTimecodeFormat
format, IDeckLinkTimecode **timecode) {
return S_FALSE; }
100 virtual HRESULT STDMETHODCALLTYPE
GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
110 virtual HRESULT STDMETHODCALLTYPE
SetAncillaryData(IDeckLinkVideoFrameAncillary *ancillary)
121 *ppv =
static_cast<IUnknown*
>(
this);
126 return E_NOINTERFACE;
168 if (
frame->_avpacket)
172 ctx->frames_buffer_available_spots++;
182 *ppv =
static_cast<IUnknown*
>(
this);
187 return E_NOINTERFACE;
193 virtual ULONG STDMETHODCALLTYPE
AddRef(
void) {
return 1; }
194 virtual ULONG STDMETHODCALLTYPE
Release(
void) {
return 1; }
211 " Only AV_PIX_FMT_UYVY422 is supported.\n");
214 ctx->raw_format = bmdFormat8BitYUV;
217 " Only V210 and wrapped frame with AV_PIX_FMT_UYVY422 are supported.\n");
220 ctx->raw_format = bmdFormat10BitYUV;
230 " Check available formats with -list_formats 1.\n");
233 if (
ctx->supports_vanc &&
ctx->dlo->EnableVideoOutput(
ctx->bmd_mode, bmdVideoOutputVANC) != S_OK) {
235 ctx->supports_vanc = 0;
237 if (!
ctx->supports_vanc &&
ctx->dlo->EnableVideoOutput(
ctx->bmd_mode, bmdVideoOutputFlagDefault) != S_OK) {
244 ctx->dlo->SetScheduledFrameCompletionCallback(
ctx->output_callback);
248 ctx->frames_preroll /= 1000;
251 ctx->frames_buffer =
ctx->frames_preroll * 2;
252 ctx->frames_buffer =
FFMIN(
ctx->frames_buffer, 60);
255 ctx->frames_buffer_available_spots =
ctx->frames_buffer;
258 avctx->
url,
ctx->frames_preroll,
ctx->frames_buffer);
284 if (
c->sample_rate != 48000) {
286 " Only 48kHz is supported.\n");
289 if (
c->ch_layout.nb_channels != 2 &&
c->ch_layout.nb_channels != 8 &&
c->ch_layout.nb_channels != 16) {
291 " Only 2, 8 or 16 channels are supported.\n");
294 ctx->channels =
c->ch_layout.nb_channels;
297 " Only PCM_S16LE and AC-3 are supported.\n");
301 if (
ctx->dlo->EnableAudioOutput(bmdAudioSampleRate48kHz,
302 bmdAudioSampleType16bitInteger,
304 bmdAudioOutputStreamTimestamped) != S_OK) {
308 if (
ctx->dlo->BeginAudioPreroll() != S_OK) {
328 uint16_t bitcount =
pkt->
size * 8;
329 uint8_t *s337_payload;
338 s337_payload = (uint8_t *)
av_malloc(payload_size);
339 if (s337_payload ==
NULL)
342 bytestream2_put_le16u(&pb, 0xf872);
343 bytestream2_put_le16u(&pb, 0x4e1f);
344 bytestream2_put_le16u(&pb, 0x0001);
345 bytestream2_put_le16u(&pb, bitcount);
353 bytestream2_put_le16u(&pb, 0);
355 *outsize = payload_size;
356 *outbuf = s337_payload;
403 if (
ctx->playback_started) {
405 ctx->dlo->StopScheduledPlayback(
ctx->last_pts *
ctx->bmd_tb_num,
406 &actual,
ctx->bmd_tb_den);
407 ctx->dlo->DisableVideoOutput();
409 ctx->dlo->DisableAudioOutput();
414 if (
ctx->output_callback)
415 delete ctx->output_callback;
421 klvanc_context_destroy(
ctx->vanc_ctx);
433 AVPacket *
pkt,
struct klvanc_line_set_s *vanc_lines)
435 struct klvanc_packet_eia_708b_s *cdp;
448 ret = klvanc_create_eia708_cdp(&cdp);
452 ret = klvanc_set_framerate_EIA_708B(cdp,
ctx->bmd_tb_num,
ctx->bmd_tb_den);
455 ctx->bmd_tb_num,
ctx->bmd_tb_den);
456 klvanc_destroy_eia708_cdp(cdp);
460 if (cc_count > KLVANC_MAX_CC_COUNT) {
462 cc_count = KLVANC_MAX_CC_COUNT;
466 cdp->header.ccdata_present = 1;
467 cdp->header.caption_service_active = 1;
468 cdp->ccdata.cc_count = cc_count;
469 for (
i = 0;
i < cc_count;
i++) {
470 if (
data [3*
i] & 0x04)
471 cdp->ccdata.cc[
i].cc_valid = 1;
472 cdp->ccdata.cc[
i].cc_type =
data[3*
i] & 0x03;
473 cdp->ccdata.cc[
i].cc_data[0] =
data[3*
i+1];
474 cdp->ccdata.cc[
i].cc_data[1] =
data[3*
i+2];
477 klvanc_finalize_EIA_708B(cdp,
ctx->cdp_sequence_num++);
478 ret = klvanc_convert_EIA_708B_to_words(cdp, &cdp_words, &
len);
479 klvanc_destroy_eia708_cdp(cdp);
485 ret = klvanc_line_insert(
ctx->vanc_ctx, vanc_lines, cdp_words,
len, 11, 0);
495 AVPacket *
pkt,
struct klvanc_line_set_s *vanc_lines,
498 struct klvanc_packet_afd_s *afd =
NULL;
499 uint16_t *afd_words =
NULL;
502 int f1_line = 12, f2_line = 0,
ret;
508 ret = klvanc_create_AFD(&afd);
512 ret = klvanc_set_AFD_val(afd,
data[0]);
516 klvanc_destroy_AFD(afd);
526 afd->aspectRatio = ASPECT_16x9;
528 afd->aspectRatio = ASPECT_4x3;
530 ret = klvanc_convert_AFD_to_words(afd, &afd_words, &
len);
536 ret = klvanc_line_insert(
ctx->vanc_ctx, vanc_lines, afd_words,
len, f1_line, 0);
544 switch (
ctx->bmd_mode) {
546 case bmdModeNTSC2398:
547 f2_line = 273 - 10 + f1_line;
550 f2_line = 319 - 6 + f1_line;
552 case bmdModeHD1080i50:
553 case bmdModeHD1080i5994:
554 case bmdModeHD1080i6000:
555 f2_line = 569 - 7 + f1_line;
563 ret = klvanc_line_insert(
ctx->vanc_ctx, vanc_lines, afd_words,
len, f2_line, 0);
572 klvanc_destroy_AFD(afd);
596 struct klvanc_line_set_s vanc_lines = { 0 };
599 if (!
ctx->supports_vanc)
602 parse_608subs(avctx,
ctx,
pkt);
603 construct_cc(avctx,
ctx,
pkt, &vanc_lines);
604 construct_afd(avctx,
ctx,
pkt, &vanc_lines, st);
613 if (
pts >
ctx->last_pts) {
620 if (vanc_pkt.
pts + 1 <
ctx->last_pts) {
628 struct klvanc_smpte2038_anc_data_packet_s *pkt_2038 =
NULL;
630 klvanc_smpte2038_parse_pes_payload(vanc_pkt.
data, vanc_pkt.
size, &pkt_2038);
631 if (pkt_2038 ==
NULL) {
636 for (
int i = 0;
i < pkt_2038->lineCount;
i++) {
637 struct klvanc_smpte2038_anc_data_line_s *l = &pkt_2038->lines[
i];
638 uint16_t *vancWords =
NULL;
639 uint16_t vancWordCount;
641 if (klvanc_smpte2038_convert_line_to_words(l, &vancWords,
645 ret = klvanc_line_insert(
ctx->vanc_ctx, &vanc_lines, vancWords,
646 vancWordCount, l->line_number, 0);
653 klvanc_smpte2038_anc_data_packet_free(pkt_2038);
658 IDeckLinkVideoFrameAncillary *vanc;
659 int result =
ctx->dlo->CreateAncillaryData(bmdFormat10BitYUV, &vanc);
668 for (
i = 0;
i < vanc_lines.num_lines;
i++) {
669 struct klvanc_line_s *
line = vanc_lines.lines[
i];
678 real_line =
line->line_number;
680 result = vanc->GetBufferForVerticalBlankingLine(real_line, &buf);
687 result = klvanc_generate_vanc_line_v210(
ctx->vanc_ctx,
line, (uint8_t *) buf,
703 for (
i = 0;
i < vanc_lines.num_lines;
i++)
704 klvanc_line_free(vanc_lines.lines[
i]);
725 tmp->width !=
ctx->bmd_width ||
726 tmp->height !=
ctx->bmd_height) {
748 if (decklink_construct_vanc(avctx,
ctx,
pkt,
frame, st))
762 while (
ctx->frames_buffer_available_spots == 0) {
765 ctx->frames_buffer_available_spots--;
772 hr =
ctx->dlo->ScheduleVideoFrame(
frame,
774 ctx->bmd_tb_num,
ctx->bmd_tb_den);
779 " error %08x.\n", (uint32_t) hr);
783 ctx->dlo->GetBufferedVideoFrameCount(&buffered);
785 if (
pkt->
pts > 2 && buffered <= 2)
787 " Video may misbehave!\n");
790 if (!
ctx->playback_started &&
pkt->
pts > (
ctx->first_pts +
ctx->frames_preroll)) {
792 if (
ctx->audio &&
ctx->dlo->EndAudioPreroll() != S_OK) {
797 if (
ctx->dlo->StartScheduledPlayback(
ctx->first_pts *
ctx->bmd_tb_num,
ctx->bmd_tb_den, 1.0) != S_OK) {
801 ctx->playback_started = 1;
814 uint8_t *outbuf =
NULL;
817 ctx->dlo->GetBufferedAudioSampleFrameCount(&buffered);
818 if (
pkt->
pts > 1 && !buffered)
820 " Audio will misbehave!\n");
828 sample_count = outbuf_size / 4;
830 sample_count =
pkt->
size / (
ctx->channels << 1);
834 if (
ctx->dlo->ScheduleAudioSamples(outbuf, sample_count,
pkt->
pts,
835 bmdAudioSampleRate48kHz,
NULL) != S_OK) {
889 if (klvanc_context_create(&
ctx->vanc_ctx) < 0) {
893 ctx->supports_vanc = 1;
897 if (
ctx->list_devices) {
915 if (
ctx->list_formats) {
static void error(const char *err)
virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat(void)
int64_t ff_decklink_packet_queue_peekpts(DecklinkPacketQueue *q)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
#define AV_LOG_WARNING
Something somehow does not look correct.
int ff_decklink_packet_queue_put(DecklinkPacketQueue *q, AVPacket *pkt)
int ff_ccfifo_extractbytes(CCFifo *ccf, uint8_t *cc_bytes, size_t len)
Just like ff_ccfifo_extract(), but takes the raw bytes instead of an AVFrame.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVMediaType codec_type
General type of the encoded data.
This struct describes the properties of an encoded stream.
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
virtual ULONG STDMETHODCALLTYPE Release(void)
AVStream ** streams
A list of all streams in the file.
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
virtual long STDMETHODCALLTYPE GetHeight(void)
void ff_decklink_packet_queue_end(DecklinkPacketQueue *q)
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_decklink_init_device(AVFormatContext *avctx, const char *name)
void ff_decklink_list_devices_legacy(AVFormatContext *avctx, int show_inputs, int show_outputs)
virtual ULONG STDMETHODCALLTYPE AddRef(void)
IDeckLinkVideoFrameAncillary * _ancillary
#define IDeckLinkVideoFrame_v14_2_1
#define IID_IDeckLinkVideoOutputCallback_v14_2_1
static int decklink_write_audio_packet(AVFormatContext *avctx, AVPacket *pkt)
int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt)
#define IID_IDeckLinkVideoFrame_v14_2_1
av_cold int ff_decklink_write_header(AVFormatContext *avctx)
void ff_ccfifo_uninit(CCFifo *ccf)
Free all memory allocated in a CCFifo and clear the context.
static int decklink_write_subtitle_packet(AVFormatContext *avctx, AVPacket *pkt)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
static int ff_ccfifo_getoutputsize(const CCFifo *ccf)
Provide the size in bytes of an output buffer to allocate.
static int decklink_write_data_packet(AVFormatContext *avctx, AVPacket *pkt)
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
AVRational sample_aspect_ratio
Video only.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
@ AV_CODEC_ID_WRAPPED_AVFRAME
Passthrough codec, AVFrames wrapped in AVPacket.
static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags(void)
int ff_decklink_packet_queue_get(DecklinkPacketQueue *q, AVPacket *pkt, int block)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, LPVOID *ppv)
static int decklink_setup_data(AVFormatContext *avctx, AVStream *st)
and forward the result(frame or status change) to the corresponding input. If nothing is possible
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Rational number (pair of numerator and denominator).
virtual HRESULT STDMETHODCALLTYPE SetAncillaryData(IDeckLinkVideoFrameAncillary *ancillary)
int ff_decklink_set_configs(AVFormatContext *avctx, decklink_direction_t direction)
virtual HRESULT STDMETHODCALLTYPE ScheduledFrameCompleted(IDeckLinkVideoFrame_v14_2_1 *_frame, BMDOutputFrameCompletionResult result)
int ff_decklink_list_output_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
AVCodecID
Identify the syntax and semantics of the bitstream.
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
#define IDeckLinkVideoOutputCallback_v14_2_1
char * url
input or output URL.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int decklink_write_video_packet(AVFormatContext *avctx, AVPacket *pkt)
#define AV_NOPTS_VALUE
Undefined timestamp value.
static bool DECKLINK_IsEqualIID(const REFIID &riid1, const REFIID &riid2)
void ff_decklink_packet_queue_init(AVFormatContext *avctx, DecklinkPacketQueue *q, int64_t queue_size)
static const BMDLinkConfiguration decklink_link_conf_map[]
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
void ff_decklink_cleanup(AVFormatContext *avctx)
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
virtual HRESULT STDMETHODCALLTYPE GetTimecode(BMDTimecodeFormat format, IDeckLinkTimecode **timecode)
decklink_frame(struct decklink_ctx *ctx, AVFrame *avframe, AVCodecID codec_id, int height, int width)
virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
virtual ULONG STDMETHODCALLTYPE Release(void)
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
virtual ULONG STDMETHODCALLTYPE AddRef(void)
int ff_ccfifo_init(CCFifo *ccf, AVRational framerate, void *log_ctx)
Initialize a CCFifo.
int ff_ccfifo_injectbytes(CCFifo *ccf, uint8_t *cc_data, size_t len)
Just like ff_ccfifo_inject(), but takes the raw bytes to insert the CC data int rather than an AVFram...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
struct decklink_ctx * _ctx
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
unsigned long long ff_decklink_packet_queue_size(DecklinkPacketQueue *q)
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, LPVOID *ppv)
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
virtual long STDMETHODCALLTYPE GetWidth(void)
virtual HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped(void)
virtual long STDMETHODCALLTYPE GetRowBytes(void)
#define IID_IDeckLinkOutput_v14_2_1
static int ff_ccfifo_ccdetected(const CCFifo *ccf)
Returns 1 if captions have been found as a prior call to ff_ccfifo_extract() or ff_ccfifo_extractbyte...
@ AV_PKT_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
decklink_frame(struct decklink_ctx *ctx, AVPacket *avpacket, AVCodecID codec_id, int height, int width)
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction)
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
static int decklink_setup_video(AVFormatContext *avctx, AVStream *st)
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
static int create_s337_payload(AVPacket *pkt, uint8_t **outbuf, int *outsize)
void * priv_data
Format private data.
int ff_decklink_list_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list, int show_inputs, int show_outputs)
static int decklink_setup_subtitle(AVFormatContext *avctx, AVStream *st)
AVPacket * av_packet_clone(const AVPacket *src)
Create a new packet that references the same data as src.
static int decklink_setup_audio(AVFormatContext *avctx, AVStream *st)
virtual HRESULT STDMETHODCALLTYPE GetBytes(void **buffer)