Go to the documentation of this file.
   33 #include <DeckLinkAPI.h> 
   55 #define MAX_WIDTH_VANC 1920 
   74     {bmdModeNTSC, 11, 19, 274, 282},
 
   75     {bmdModeNTSC2398, 11, 19, 274, 282},
 
   76     {bmdModePAL, 7, 22, 320, 335},
 
   77     {bmdModeNTSCp, 11, -1, -1, 39},
 
   78     {bmdModePALp, 7, -1, -1, 45},
 
   82     {bmdModeHD1080p2398, 8, -1, -1, 42},
 
   83     {bmdModeHD1080p24, 8, -1, -1, 42},
 
   84     {bmdModeHD1080p25, 8, -1, -1, 42},
 
   85     {bmdModeHD1080p2997, 8, -1, -1, 42},
 
   86     {bmdModeHD1080p30, 8, -1, -1, 42},
 
   87     {bmdModeHD1080i50, 8, 20, 570, 585},
 
   88     {bmdModeHD1080i5994, 8, 20, 570, 585},
 
   89     {bmdModeHD1080i6000, 8, 20, 570, 585},
 
   90     {bmdModeHD1080p50, 8, -1, -1, 42},
 
   91     {bmdModeHD1080p5994, 8, -1, -1, 42},
 
   92     {bmdModeHD1080p6000, 8, -1, -1, 42},
 
   96     {bmdModeHD720p50, 8, -1, -1, 26},
 
   97     {bmdModeHD720p5994, 8, -1, -1, 26},
 
   98     {bmdModeHD720p60, 8, -1, -1, 26},
 
  101     {bmdModeUnknown, 0, -1, -1, -1}
 
  111         virtual HRESULT STDMETHODCALLTYPE 
AllocateBuffer(
unsigned int bufferSize, 
void* *allocatedBuffer)
 
  115                 return E_OUTOFMEMORY;
 
  116             *allocatedBuffer = buf;
 
  124         virtual HRESULT STDMETHODCALLTYPE 
Commit() { 
return S_OK; }
 
  125         virtual HRESULT STDMETHODCALLTYPE 
Decommit() { 
return S_OK; }
 
  128         virtual HRESULT STDMETHODCALLTYPE 
QueryInterface(REFIID iid, LPVOID *ppv) { 
return E_NOINTERFACE; }
 
  145     IUnknown *obj = (
class IUnknown *)opaque;
 
  169     uint16_t vanc_sum = 0;
 
  170     for (
i = 3; 
i < 
len - 1; 
i++) {
 
  173         int p = av_parity(v & 0xff);
 
  174         if ((!!p ^ !!(v & 0x100)) || (np != 1 && np != 2)) {
 
  181     vanc_sum |= ((~vanc_sum & 0x100) << 1);
 
  194         *dst++ = (
src[1] >> 2) + ((
src[2] & 15) << 6);
 
  195         *dst++ =  
src[4]       + ((
src[5] &  3) << 8);
 
  196         *dst++ = (
src[6] >> 4) + ((
src[7] & 63) << 4);
 
  204     for (
i = 0; 
i < 
width * 2 / 3; 
i++) {
 
  205         *dst++ =  
src[0]       + ((
src[1] & 3)  << 8);
 
  206         *dst++ = (
src[1] >> 2) + ((
src[2] & 15) << 6);
 
  207         *dst++ = (
src[2] >> 4) + ((
src[3] & 63) << 4);
 
  233     vbi_bit_slicer slicer;
 
  235     vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
 
  237     if (vbi_bit_slice(&slicer, 
src, tgt + 4) == FALSE)
 
  254         *py++ = (
src[1] >> 4) + ((
src[2] & 15) << 4);
 
  255         *py++ = (
src[4] >> 2) + ((
src[5] & 3 ) << 6);
 
  256         *py++ = (
src[6] >> 6) + ((
src[7] & 63) << 2);
 
  259     return teletext_data_unit_from_vbi_data(
line, y, tgt, VBI_PIXFMT_YUV420);
 
  267     if (py[0] != 0x255 || py[1] != 0x255 || py[2] != 0x227)
 
  275     for (
i = 0; 
i < 42; 
i++)
 
  294         if (py[0] == 0x151 && py[1] == 0x115 && py[3] == 0x102) {       
 
  295             uint16_t *descriptors = py + 4;
 
  298             for (
i = 0; 
i < 5 && py < pend - 45; 
i++, py += 45) {
 
  299                 int line = (descriptors[
i] & 31) + (!(descriptors[
i] & 128)) * 313;
 
  310     uint16_t did = py[0];                                               
 
  311     uint16_t sdid = py[1];                                              
 
  312     uint16_t 
dc = py[2] & 255;                                          
 
  315     if (did == 0x143 && sdid == 0x102) {                                
 
  317     } 
else if (allow_multipacket && did == 0x143 && sdid == 0x203) {    
 
  319         while (py < pend - 3) {
 
  321             py += 4 + (py[2] & 255);                                    
 
  330     size_t i, 
len = (buf[5] & 0xff) + 6 + 1;
 
  334     uint16_t *cdp = &buf[6]; 
 
  335     if (cdp[0] != 0x96 || cdp[1] != 0x69) {
 
  348     for (
i = 0; 
i < 
len - 1; 
i++)
 
  350     cdp_sum = cdp_sum ? 256 - cdp_sum : 0;
 
  351     if (cdp[
len - 1] != cdp_sum) {
 
  357     if (!(rate & 0x0f)) {
 
  367     if (!(cdp[4] & 0x43))  {
 
  372     hdr = (cdp[5] << 8) | cdp[6];
 
  373     if (cdp[7] != 0x72)  {
 
  379     if (!(cc_count & 0xe0)) {
 
  385     if ((
len - 13) < cc_count * 3) {
 
  390     if (cdp[
len - 4] != 0x74)  {
 
  395     ftr = (cdp[
len - 3] << 8) | cdp[
len - 2];
 
  407     for (
size_t i = 0; 
i < cc_count; 
i++) {
 
  408         cc[3*
i + 0] = cdp[9 + 3*
i+0] ;
 
  409         cc[3*
i + 1] = cdp[9 + 3*
i+1];
 
  410         cc[3*
i + 2] = cdp[9 + 3*
i+2];
 
  421     uint16_t *max_buf = buf + 
width;
 
  423     while (buf < max_buf - 6) {
 
  425         uint16_t did = buf[3] & 0xFF;                                  
 
  426         uint16_t sdid = buf[4] & 0xFF;                                 
 
  428         if (buf[0] != 0 || buf[1] != 0x3ff || buf[2] != 0x3ff) {
 
  432         len = (buf[5] & 0xff) + 6 + 1;
 
  433         if (
len > max_buf - buf) {
 
  439         if (did == 0x43 && (sdid == 0x02 || sdid == 0x03) && cctx->
teletext_lines &&
 
  440             width == 1920 && tgt_size >= 1920) {
 
  446         } 
else if (did == 0x61 && sdid == 0x01) {
 
  447             unsigned int data_len;
 
  506     unsigned long long size;
 
  592         virtual HRESULT STDMETHODCALLTYPE 
QueryInterface(REFIID iid, LPVOID *ppv) { 
return E_NOINTERFACE; }
 
  593         virtual ULONG STDMETHODCALLTYPE 
AddRef(
void);
 
  594         virtual ULONG STDMETHODCALLTYPE  
Release(
void);
 
  595         virtual HRESULT STDMETHODCALLTYPE 
VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
 
  596         virtual HRESULT STDMETHODCALLTYPE 
VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
 
  634                            IDeckLinkAudioInputPacket *audioFrame,
 
  636                            int64_t abs_wallclock,
 
  642     BMDTimeValue bmd_pts;
 
  643     BMDTimeValue bmd_duration;
 
  644     HRESULT res = E_INVALIDARG;
 
  648                 res = audioFrame->GetPacketTime(&bmd_pts, time_base.
den);
 
  652                 res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.
den);
 
  656                 res = videoFrame->GetHardwareReferenceTimestamp(time_base.
den, &bmd_pts, &bmd_duration);
 
  675         pts = bmd_pts / time_base.
num;
 
  688     IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
 
  691     void *audioFrameBytes;
 
  692     BMDTimeValue frameTime;
 
  693     BMDTimeValue frameDuration;
 
  694     int64_t wallclock = 0, abs_wallclock = 0;
 
  698         if (videoFrame && !(videoFrame->GetFlags() & bmdFrameHasNoInputSource) &&
 
  710         if (
av_cmp_q(remainder, frame_duration) > 0) {
 
  729                     "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
 
  731                     videoFrame->GetRowBytes() * videoFrame->GetHeight(),
 
  732                     (
double)qsize / 1024 / 1024);
 
  735         videoFrame->GetBytes(&frameBytes);
 
  736         videoFrame->GetStreamTime(&frameTime, &frameDuration,
 
  739         if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
 
  740             if (
ctx->
draw_bars && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
 
  742                     0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
 
  743                     0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
 
  744                 int width  = videoFrame->GetWidth();
 
  745                 int height = videoFrame->GetHeight();
 
  746                 unsigned *p = (
unsigned *)frameBytes;
 
  748                 for (
int y = 0; y < 
height; y++) {
 
  749                     for (
int x = 0; x < 
width; x += 2)
 
  750                         *p++ = bars[(x * 8) / 
width];
 
  768                 IDeckLinkTimecode *timecode;
 
  769                 if (videoFrame->GetTimecode(
ctx->
tc_format, &timecode) == S_OK) {
 
  772                     if (timecode->GetString(&decklink_tc) == S_OK) {
 
  784                             if (packed_metadata) {
 
  814         pkt.
size         = videoFrame->GetRowBytes() *
 
  815                            videoFrame->GetHeight();
 
  819             IDeckLinkVideoFrameAncillary *vanc;
 
  824             if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
 
  826                 int64_t line_mask = 1;
 
  827                 BMDPixelFormat vanc_format = vanc->GetPixelFormat();
 
  832                     (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
 
  834                     for (
i = 6; 
i < 336; 
i++, line_mask <<= 1) {
 
  836                         if ((
ctx->
teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(
i, (
void**)&buf) == S_OK) {
 
  837                             if (vanc_format == bmdFormat8BitYUV)
 
  838                                 txt_buf = teletext_data_unit_from_vbi_data(
i, buf, txt_buf, VBI_PIXFMT_UYVY);
 
  840                                 txt_buf = teletext_data_unit_from_vbi_data_10bit(
i, buf, txt_buf);
 
  847                 if (vanc_format == bmdFormat10BitYUV && videoFrame->GetWidth() <= 
MAX_WIDTH_VANC) {
 
  851                         if (vanc->GetBufferForVerticalBlankingLine(
i, (
void**)&buf) == S_OK) {
 
  853                             size_t vanc_size = videoFrame->GetWidth();
 
  855                                 vanc_size = vanc_size * 2;
 
  861                                                    txt_buf, 
sizeof(txt_buf0) - (txt_buf - txt_buf0), &
pkt);
 
  868                 if (txt_buf - txt_buf0 > 1) {
 
  869                     int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
 
  870                     while (stuffing_units--) {
 
  871                         memset(txt_buf, 0xff, 46);
 
  879                     txt_pkt.
data = txt_buf0;
 
  880                     txt_pkt.
size = txt_buf - txt_buf0;
 
  890             videoFrame->AddRef();
 
  900         BMDTimeValue audio_pts;
 
  905         audioFrame->GetBytes(&audioFrameBytes);
 
  924     BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *
mode,
 
  925     BMDDetectedVideoInputFormatFlags)
 
  936     if (
ctx->attr->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &autodetect_supported) != S_OK)
 
  938     if (autodetect_supported == 
false)
 
  942     ctx->bmd_mode  = bmdModeUnknown;
 
  945                                    bmdVideoInputEnableFormatDetection) != S_OK) {
 
  949     if (
ctx->dli->StartStreams() != S_OK) {
 
  954     for (
i = 0; 
i < 30; 
i++) {
 
  959         if (
ctx->bmd_mode != bmdModeUnknown &&
 
  964     ctx->dli->PauseStreams();
 
  965     ctx->dli->FlushStreams();
 
  967     if (
ctx->bmd_mode != bmdModeUnknown) {
 
  987         ctx->dli->StopStreams();
 
  988         ctx->dli->DisableVideoInput();
 
  989         ctx->dli->DisableAudioInput();
 
 1047             av_log(avctx, 
AV_LOG_ERROR, 
"Value for audio bit depth option must be either 16 or 32\n");
 
 1052     if (
ctx->list_devices) {
 
 1053         av_log(avctx, 
AV_LOG_WARNING, 
"The -list_devices option is deprecated and will be removed. Please use ffmpeg -sources decklink instead.\n");
 
 1063     if (
ctx->dl->QueryInterface(IID_IDeckLinkInput, (
void **) &
ctx->dli) != S_OK) {
 
 1077     if (
ctx->list_formats) {
 
 1093     allocator->Release();
 
 1115     if (
ctx->teletext_lines && 
ctx->bmd_mode == bmdModePAL) {
 
 1116         av_log(avctx, 
AV_LOG_ERROR, 
"Libzvbi support is needed for capturing SD PAL teletext, please recompile FFmpeg.\n");
 
 1131     st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
 
 1143     st->codecpar->width       = 
ctx->bmd_width;
 
 1144     st->codecpar->height      = 
ctx->bmd_height;
 
 1146     st->time_base.den      = 
ctx->bmd_tb_den;
 
 1147     st->time_base.num      = 
ctx->bmd_tb_num;
 
 1148     st->r_frame_rate       = 
av_make_q(st->time_base.den, st->time_base.num);
 
 1151     case bmdFormat8BitYUV:
 
 1153         st->codecpar->codec_tag   = 
MKTAG(
'U', 
'Y', 
'V', 
'Y');
 
 1155         st->codecpar->bit_rate    = 
av_rescale(
ctx->bmd_width * 
ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
 
 1157     case bmdFormat10BitYUV:
 
 1159         st->codecpar->codec_tag   = 
MKTAG(
'V',
'2',
'1',
'0');
 
 1160         st->codecpar->bit_rate    = 
av_rescale(
ctx->bmd_width * 
ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
 
 1161         st->codecpar->bits_per_coded_sample = 10;
 
 1163     case bmdFormat8BitARGB:
 
 1167         st->codecpar->bit_rate    = 
av_rescale(
ctx->bmd_width * 
ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
 
 1169     case bmdFormat8BitBGRA:
 
 1173         st->codecpar->bit_rate    = 
av_rescale(
ctx->bmd_width * 
ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
 
 1175     case bmdFormat10BitRGB:
 
 1177         st->codecpar->codec_tag   = 
MKTAG(
'R',
'2',
'1',
'0');
 
 1179         st->codecpar->bit_rate    = 
av_rescale(
ctx->bmd_width * 
ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
 
 1180         st->codecpar->bits_per_coded_sample = 10;
 
 1188     switch (
ctx->bmd_field_dominance) {
 
 1189     case bmdUpperFieldFirst:
 
 1192     case bmdLowerFieldFirst:
 
 1195     case bmdProgressiveFrame:
 
 1196     case bmdProgressiveSegmentedFrame:
 
 1205     if (
ctx->teletext_lines) {
 
 1213         st->time_base.den         = 
ctx->bmd_tb_den;
 
 1214         st->time_base.num         = 
ctx->bmd_tb_num;
 
 1217         ctx->teletext_st = st;
 
 1221     result = 
ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, cctx->
audio_depth == 32 ? bmdAudioSampleType32bitInteger : bmdAudioSampleType16bitInteger, 
ctx->audio_st->codecpar->channels);
 
 1231                                         bmdVideoInputFlagDefault);
 
 1241     if (
ctx->dli->StartStreams() != S_OK) {
 
 1264         if (side_metadata) {
 
  
static uint8_t * get_metadata(AVFormatContext *avctx, uint16_t *buf, size_t width, uint8_t *tgt, size_t tgt_size, AVPacket *pkt)
 
static void error(const char *err)
 
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
 
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
 
#define AV_LOG_WARNING
Something somehow does not look correct.
 
static uint8_t * teletext_data_unit_from_ancillary_packet(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines, int allow_multipacket)
 
AVPixelFormat
Pixel format.
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
 
#define MKTAG(a, b, c, d)
 
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
 
const BMDDisplayMode AUTODETECT_DEFAULT_MODE
 
DecklinkPtsSource video_pts_source
 
struct AVPacketList * next
 
av_cold int ff_decklink_read_close(AVFormatContext *avctx)
 
@ AV_CODEC_ID_DVB_TELETEXT
 
const uint8_t ff_reverse[256]
 
virtual HRESULT STDMETHODCALLTYPE Decommit()
 
DecklinkPtsSource audio_pts_source
 
#define AV_LOG_VERBOSE
Detailed information.
 
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
 
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
 
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
 
static int get_vanc_line_idx(BMDDisplayMode mode)
 
virtual ~decklink_allocator()
 
unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt)
Return a value representing the fourCC code associated to the pixel format pix_fmt,...
 
int ff_decklink_init_device(AVFormatContext *avctx, const char *name)
 
void ff_decklink_list_devices_legacy(AVFormatContext *avctx, int show_inputs, int show_outputs)
 
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
 
uint8_t * av_packet_pack_dictionary(AVDictionary *dict, int *size)
Pack a dictionary for use in side_data.
 
DecklinkPtsSource audio_pts_source
 
static const BMDVideoConnection decklink_video_connection_map[]
 
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
 
@ AV_PKT_DATA_STRINGS_METADATA
A list of zero terminated key/value strings.
 
int ff_decklink_read_packet(AVFormatContext *avctx, AVPacket *pkt)
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
AVDictionary * metadata
Metadata that applies to the whole file.
 
static const uint16_t mask[17]
 
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
 
static uint8_t * teletext_data_unit_from_op47_data(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines)
 
static const BMDAudioConnection decklink_audio_connection_map[]
 
static void decklink_object_free(void *opaque, uint8_t *data)
 
#define av_assert0(cond)
assert() equivalent, that is always enabled.
 
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
 
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
 
static int check_vanc_parity_checksum(uint16_t *buf, int len, uint16_t checksum)
 
int av_usleep(unsigned usec)
Sleep for a period of time.
 
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
 
AVCodecParameters * codecpar
Codec parameters associated with this stream.
 
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
 
static int avpacket_queue_get(AVPacketQueue *q, AVPacket *pkt, int block)
 
and forward the result(frame or status change) to the corresponding input. If nothing is possible
 
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
 
Rational number (pair of numerator and denominator).
 
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
 
static const BMDTimecodeFormat decklink_timecode_format_map[]
 
virtual HRESULT STDMETHODCALLTYPE Commit()
 
av_cold int ff_decklink_read_header(AVFormatContext *avctx)
 
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
 
int ff_decklink_set_configs(AVFormatContext *avctx, decklink_direction_t direction)
 
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
 
static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
 
#define pthread_mutex_unlock(a)
 
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
 
static unsigned long long avpacket_queue_size(AVPacketQueue *q)
 
virtual HRESULT STDMETHODCALLTYPE ReleaseBuffer(void *buffer)
 
char * url
input or output URL.
 
static AVRational av_make_q(int num, int den)
Create an AVRational.
 
#define AV_NOPTS_VALUE
Undefined timestamp value.
 
int ff_decklink_list_input_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list)
 
static int decklink_autodetect(struct decklink_cctx *cctx)
 
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
 
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
 
#define AVERROR_EXTERNAL
Generic error in an external library.
 
int flags
A combination of AV_PKT_FLAG values.
 
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
 
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
 
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
 
void ff_decklink_cleanup(AVFormatContext *avctx)
 
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
 
#define AV_LOG_INFO
Standard information.
 
static void fill_data_unit_head(int line, uint8_t *tgt)
 
DecklinkPtsSource video_pts_source
 
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
 
static uint8_t calc_parity_and_line_offset(int line)
 
#define i(width, name, range_min, range_max)
 
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
 
#define AV_TIME_BASE
Internal time base represented as integer.
 
static VANCLineNumber vanc_line_numbers[]
 
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
 
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
 
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
 
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
 
static void avpacket_queue_end(AVPacketQueue *q)
 
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
 
static uint8_t * teletext_data_unit_from_op47_vbi_packet(int line, uint16_t *py, uint8_t *tgt)
 
static int linemask_matches(int line, int64_t mask)
 
static void unpack_v210(uint16_t *dst, const uint8_t *src, int width)
 
static void avpacket_queue_flush(AVPacketQueue *q)
 
static volatile int checksum
 
#define AV_INPUT_BUFFER_PADDING_SIZE
 
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
 
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
 
#define FF_ARRAY_ELEMS(a)
 
int index
stream index in AVFormatContext
 
static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
 
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
 
static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame, int64_t wallclock, int64_t abs_wallclock, DecklinkPtsSource pts_src, AVRational time_base, int64_t *initial_pts, int copyts)
 
AVRational r_frame_rate
Real base framerate of the stream.
 
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 
int64_t av_gettime(void)
Get the current time in microseconds.
 
static int shift(int a, int b)
 
virtual ULONG STDMETHODCALLTYPE Release(void)
 
int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction)
 
virtual ULONG STDMETHODCALLTYPE AddRef(void)
 
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv)
 
This structure stores compressed data.
 
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
 
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
 
The exact code depends on how similar the blocks are and how related they are to the block
 
static void extract_luma_from_v210(uint16_t *dst, const uint8_t *src, int width)
 
static void avpacket_queue_init(AVFormatContext *avctx, AVPacketQueue *q)
 
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
 
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
 
void * priv_data
Format private data.
 
BMDTimecodeFormat tc_format
 
int ff_decklink_list_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list, int show_inputs, int show_outputs)
 
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
 
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
 
#define pthread_mutex_lock(a)
 
static void clear_parity_bits(uint16_t *buf, int len)
 
virtual HRESULT STDMETHODCALLTYPE AllocateBuffer(unsigned int bufferSize, void **allocatedBuffer)
 
static uint8_t * vanc_to_cc(AVFormatContext *avctx, uint16_t *buf, size_t words, unsigned &cc_count)